source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
rl_scheduler.py | import os
import json
import time
import pickle
import logging
import threading
import multiprocessing as mp
from collections import OrderedDict
import mxnet as mx
from .resource import DistributedResource
from ..utils import (save, load, mkdir, try_import_mxboard, tqdm)
from ..core import Task
from ..core.decorator import _autogluon_method
from ..searcher import RLSearcher
from .fifo import FIFOScheduler
from .reporter import DistStatusReporter
__all__ = ['RLScheduler']
logger = logging.getLogger(__name__)
class RLScheduler(FIFOScheduler):
r"""Scheduler that uses Reinforcement Learning with a LSTM controller created based on the provided search spaces
Parameters
----------
train_fn : callable
A task launch function for training. Note: please add the `@ag.args` decorater to the original function.
args : object (optional)
Default arguments for launching train_fn.
resource : dict
Computation resources. For example, `{'num_cpus':2, 'num_gpus':1}`
searcher : object (optional)
Autogluon searcher. For example, autogluon.searcher.RandomSearcher
time_attr : str
A training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_epoch` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
reward_attr : str
The training result objective value attribute. As with `time_attr`, this may refer to any objective value.
Stopping procedures will use this attribute.
controller_resource : int
Batch size for training controllers.
dist_ip_addrs : list of str
IP addresses of remote machines.
Examples
--------
>>> import numpy as np
>>> import autogluon as ag
>>>
>>> @ag.args(
... lr=ag.space.Real(1e-3, 1e-2, log=True),
... wd=ag.space.Real(1e-3, 1e-2))
>>> def train_fn(args, reporter):
... print('lr: {}, wd: {}'.format(args.lr, args.wd))
... for e in range(10):
... dummy_accuracy = 1 - np.power(1.8, -np.random.uniform(e, 2*e))
... reporter(epoch=e, accuracy=dummy_accuracy, lr=args.lr, wd=args.wd)
...
>>> scheduler = ag.scheduler.RLScheduler(train_fn,
... resource={'num_cpus': 2, 'num_gpus': 0},
... num_trials=20,
... reward_attr='accuracy',
... time_attr='epoch')
>>> scheduler.run()
>>> scheduler.join_jobs()
>>> scheduler.get_training_curves(plot=True)
"""
def __init__(self, train_fn, args=None, resource=None, checkpoint='./exp/checkpoint.ag',
resume=False, num_trials=None, time_attr='epoch', reward_attr='accuracy',
visualizer='none', controller_lr=1e-3, ema_baseline_decay=0.95,
controller_resource={'num_cpus': 0, 'num_gpus': 0},
controller_batch_size=1,
dist_ip_addrs=[], sync=True, **kwargs):
assert isinstance(train_fn, _autogluon_method), 'Please use @ag.args ' + \
'to decorate your training script.'
self.ema_baseline_decay = ema_baseline_decay
self.sync = sync
# create RL searcher/controller
searcher = RLSearcher(train_fn.kwspaces)
super(RLScheduler,self).__init__(
train_fn, train_fn.args, resource, searcher,
checkpoint=checkpoint, resume=False, num_trials=num_trials,
time_attr=time_attr, reward_attr=reward_attr,
visualizer=visualizer, dist_ip_addrs=dist_ip_addrs, **kwargs)
# reserve controller computation resource on master node
master_node = self.REMOTE_MANAGER.get_master_node()
self.controller_resource = DistributedResource(**controller_resource)
assert self.RESOURCE_MANAGER.reserve_resource(
master_node, self.controller_resource), 'Not Enough Resource on Master Node' + \
' for Training Controller'
self.controller_ctx = [mx.gpu(i) for i in self.controller_resource.gpu_ids] if \
controller_resource['num_gpus'] > 0 else [mx.cpu()]
# controller setup
self.controller = searcher.controller
self.controller.collect_params().reset_ctx(self.controller_ctx)
self.controller_optimizer = mx.gluon.Trainer(
self.controller.collect_params(), 'adam',
optimizer_params={'learning_rate': controller_lr*controller_batch_size})
self.controller_batch_size = controller_batch_size
self.baseline = None
self.lock = mp.Lock()
# async buffers
if not sync:
self.mp_count = mp.Value('i', 0)
self.mp_seed = mp.Value('i', 0)
self.mp_fail = mp.Value('i', 0)
if resume:
if os.path.isfile(checkpoint):
self.load_state_dict(load(checkpoint))
else:
msg = 'checkpoint path {} is not available for resume.'.format(checkpoint)
logger.exception(msg)
def run(self, **kwargs):
"""Run multiple number of trials
"""
self.num_trials = kwargs.get('num_trials', self.num_trials)
logger.info('Starting Experiments')
logger.info('Num of Finished Tasks is {}'.format(self.num_finished_tasks))
logger.info('Num of Pending Tasks is {}'.format(self.num_trials - self.num_finished_tasks))
if self.sync:
self._run_sync()
else:
self._run_async()
def _run_sync(self):
decay = self.ema_baseline_decay
for i in tqdm(range(self.num_trials // self.controller_batch_size + 1)):
with mx.autograd.record():
# sample controller_batch_size number of configurations
batch_size = self.num_trials % self.num_trials \
if i == self.num_trials // self.controller_batch_size \
else self.controller_batch_size
if batch_size == 0: continue
configs, log_probs, entropies = self.controller.sample(
batch_size, with_details=True)
# schedule the training tasks and gather the reward
rewards = self.sync_schedule_tasks(configs)
# substract baseline
if self.baseline is None:
self.baseline = rewards[0]
avg_rewards = mx.nd.array([reward - self.baseline for reward in rewards],
ctx=self.controller.context)
# EMA baseline
for reward in rewards:
self.baseline = decay * self.baseline + (1 - decay) * reward
# negative policy gradient
log_probs = log_probs.sum(axis=1)
loss = - log_probs * avg_rewards#.reshape(-1, 1)
loss = loss.sum() # or loss.mean()
# update
loss.backward()
self.controller_optimizer.step(batch_size)
logger.debug('controller loss: {}'.format(loss.asscalar()))
def _run_async(self):
def _async_run_trial():
self.mp_count.value += 1
self.mp_seed.value += 1
seed = self.mp_seed.value
mx.random.seed(seed)
with mx.autograd.record():
# sample one configuration
with self.lock:
config, log_prob, entropy = self.controller.sample(with_details=True)
config = config[0]
task = Task(self.train_fn, {'args': self.args, 'config': config},
DistributedResource(**self.resource))
# start training task
reporter = DistStatusReporter()
task.args['reporter'] = reporter
task_thread = self.add_job(task)
# run reporter
last_result = None
config = task.args['config']
while task_thread.is_alive():
reported_result = reporter.fetch()
if 'done' in reported_result and reported_result['done'] is True:
reporter.move_on()
task_thread.join()
break
self._add_training_result(task.task_id, reported_result, task.args['config'])
reporter.move_on()
last_result = reported_result
reward = last_result[self._reward_attr]
self.searcher.update(config, reward, done=True)
with self.lock:
if self.baseline is None:
self.baseline = reward
avg_reward = mx.nd.array([reward - self.baseline], ctx=self.controller.context)
# negative policy gradient
with self.lock:
loss = -log_prob * avg_reward.reshape(-1, 1)
loss = loss.sum()
# update
print('loss', loss)
with self.lock:
try:
loss.backward()
self.controller_optimizer.step(1)
except Exception:
self.mp_fail.value += 1
logger.warning('Exception during backward {}.'.format(self.mp_fail.value))
self.mp_count.value -= 1
# ema
with self.lock:
decay = self.ema_baseline_decay
self.baseline = decay * self.baseline + (1 - decay) * reward
reporter_threads = []
for i in range(self.num_trials):
while self.mp_count.value >= self.controller_batch_size:
time.sleep(0.2)
#_async_run_trial()
reporter_thread = threading.Thread(target=_async_run_trial)
reporter_thread.start()
reporter_threads.append(reporter_thread)
for p in reporter_threads:
p.join()
def sync_schedule_tasks(self, configs):
rewards = []
results = {}
def _run_reporter(task, task_job, reporter):
last_result = None
config = task.args['config']
while not task_job.done():
reported_result = reporter.fetch()
#print('reported_result', reported_result)
if 'done' in reported_result and reported_result['done'] is True:
reporter.move_on()
break
self._add_training_result(task.task_id, reported_result, task.args['config'])
reporter.move_on()
last_result = reported_result
if last_result is not None:
self.searcher.update(config, last_result[self._reward_attr], done=True)
with self.lock:
results[pickle.dumps(config)] = last_result[self._reward_attr]
# launch the tasks
tasks = []
task_jobs = []
reporter_threads = []
for config in configs:
logger.debug('scheduling config: {}'.format(config))
# create task
task = Task(self.train_fn, {'args': self.args, 'config': config},
DistributedResource(**self.resource))
reporter = DistStatusReporter()
task.args['reporter'] = reporter
task_job = self.add_job(task)
# run reporter
reporter_thread = threading.Thread(target=_run_reporter, args=(task, task_job, reporter))
reporter_thread.start()
tasks.append(task)
task_jobs.append(task_job)
reporter_threads.append(reporter_thread)
for p1, p2 in zip(task_jobs, reporter_threads):
p1.result()
p2.join()
with self.LOCK:
for task in tasks:
self.finished_tasks.append({'TASK_ID': task.task_id,
'Config': task.args['config']})
if self._checkpoint is not None:
logger.debug('Saving Checkerpoint')
self.save()
for config in configs:
rewards.append(results[pickle.dumps(config)])
return rewards
def add_job(self, task, **kwargs):
"""Adding a training task to the scheduler.
Args:
task (:class:`autogluon.scheduler.Task`): a new training task
"""
cls = RLScheduler
cls.RESOURCE_MANAGER._request(task.resources)
# main process
#task_thread = threading.Thread(target=cls._start_distributed_job, args=(
# task, cls.RESOURCE_MANAGER, self.env_sem))
#task_thread.start()
job = cls._start_distributed_job(task, cls.RESOURCE_MANAGER, self.env_sem)
return job
def join_tasks(self):
pass
def state_dict(self, destination=None):
"""Returns a dictionary containing a whole state of the Scheduler
Examples
--------
>>> ag.save(scheduler.state_dict(), 'checkpoint.ag')
"""
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
logger.debug('\nState_Dict self.finished_tasks: {}'.format(self.finished_tasks))
destination['finished_tasks'] = pickle.dumps(self.finished_tasks)
destination['baseline'] = pickle.dumps(self.baseline)
destination['TASK_ID'] = Task.TASK_ID.value
destination['searcher'] = self.searcher.state_dict()
destination['training_history'] = json.dumps(self.training_history)
if self.visualizer == 'mxboard' or self.visualizer == 'tensorboard':
destination['visualizer'] = json.dumps(self.mxboard._scalar_dict)
return destination
def load_state_dict(self, state_dict):
"""Load from the saved state dict.
Examples
--------
>>> scheduler.load_state_dict(ag.load('checkpoint.ag'))
"""
self.finished_tasks = pickle.loads(state_dict['finished_tasks'])
#self.baseline = pickle.loads(state_dict['baseline'])
Task.set_id(state_dict['TASK_ID'])
self.searcher.load_state_dict(state_dict['searcher'])
self.training_history = json.loads(state_dict['training_history'])
if self.visualizer == 'mxboard' or self.visualizer == 'tensorboard':
self.mxboard._scalar_dict = json.loads(state_dict['visualizer'])
logger.debug('Loading Searcher State {}'.format(self.searcher))
|
api_chess.py | from multiprocessing import connection, Pipe
import time
from threading import Thread
import numpy as np
from blackqueen.config import Config
class ChessModelAPI:
# noinspection PyUnusedLocal
def __init__(self, config: Config, agent_model): # ChessModel
self.agent_model = agent_model
self.pipes = []
def start(self):
prediction_worker = Thread(target=self.predict_batch_worker, name="prediction_worker")
prediction_worker.daemon = True
prediction_worker.start()
def get_pipe(self):
me, you = Pipe()
self.pipes.append(me)
return you
def predict_batch_worker(self):
while True:
ready = connection.wait(self.pipes,timeout=0.001)
if not ready:
continue
data, result_pipes = [], []
for pipe in ready:
while pipe.poll():
data.append(pipe.recv())
result_pipes.append(pipe)
# print(f"predicting {len(result_pipes)} items")
data = np.asarray(data, dtype=np.float32)
policy_ary, value_ary = self.agent_model.model.predict_on_batch(data)
for pipe, p, v in zip(result_pipes, policy_ary, value_ary):
pipe.send((p, float(v)))
|
alpaca.py | #
# Copyright 2018 Alpaca
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import alpaca_trade_api as tradeapi
from alpaca_trade_api.rest import APIError
from alpaca_trade_api.entity import Order
from requests.exceptions import HTTPError
import numpy as np
import pandas as pd
from trading_calendars import (
get_calendar,
register_calendar_alias,
)
from trading_calendars.calendar_utils import (
global_calendar_dispatcher as default_calendar,
)
from datetime import timedelta
import uuid
from .base import BaseBackend
from pylivetrader.api import symbol as symbol_lookup
from pylivetrader.misc.api_context import set_context
import pylivetrader.protocol as zp
from pylivetrader.finance.order import (
Order as ZPOrder,
ORDER_STATUS as ZP_ORDER_STATUS,
)
from pylivetrader.finance.execution import (
MarketOrder,
LimitOrder,
StopOrder,
StopLimitOrder,
)
from pylivetrader.misc.pd_utils import normalize_date
from pylivetrader.misc.parallel_utils import parallelize
from pylivetrader.errors import SymbolNotFound
from pylivetrader.assets import Equity
from logbook import Logger
from threading import Thread
import asyncio
log = Logger('Alpaca')
NY = 'America/New_York'
end_offset = pd.Timedelta('1000 days')
one_day_offset = pd.Timedelta('1 day')
def skip_http_error(statuses):
'''
A decorator to wrap with try..except to swallow
specific HTTP errors.
@skip_http_error((404, 503))
def fetch():
...
'''
assert isinstance(statuses, tuple)
def decorator(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as e:
status_code = e.response.status_code
if status_code in statuses:
log.warn(str(e))
else:
raise
return wrapper
return decorator
class Backend(BaseBackend):
def __init__(
self,
key_id=None,
secret=None,
base_url=None,
api_version='v1'
):
self._key_id = key_id
self._secret = secret
self._base_url = base_url
self._api = tradeapi.REST(
key_id, secret, base_url, api_version=api_version
)
self._cal = get_calendar('NYSE')
self._open_orders = {}
self._orders_pending_submission = {}
def initialize_data(self, context):
# Open a websocket stream to get updates in real time
stream_process = Thread(
target=self._get_stream, daemon=True, args=(context,)
)
stream_process.start()
# Load all open orders
existing_orders = self.all_orders(status='open', initialize=True)
for k, v in existing_orders.items():
if self._open_orders.get(k) is not None:
self._open_orders[k] += v
else:
self._open_orders[k] = v
def _get_stream(self, context):
set_context(context)
asyncio.set_event_loop(asyncio.new_event_loop())
conn = tradeapi.StreamConn(self._key_id, self._secret, self._base_url)
channels = ['trade_updates']
@conn.on(r'trade_updates')
async def handle_trade_update(conn, channel, data):
# Check for any pending orders
waiting_order = self._orders_pending_submission.get(
data.order['client_order_id']
)
if waiting_order is not None:
if data.event == 'fill':
# Submit the waiting order
self.order(*waiting_order)
self._orders_pending_submission.pop(
data.order['client_order_id'], None
)
elif data.event in ['canceled', 'rejected']:
# Remove the waiting order
self._orders_pending_submission.pop(
data.order['client_order_id'], None
)
if data.event in ['canceled', 'rejected', 'fill']:
self._open_orders.pop(data.order['client_order_id'], None)
else:
self._open_orders[data.order['client_order_id']] = (
self._order2zp(Order(data.order))
)
conn.run(channels)
def _symbols2assets(self, symbols):
'''
Utility for debug/testing
'''
assets = {a.symbol: a for a in self.get_equities()}
return [assets[symbol] for symbol in symbols if symbol in assets]
def get_equities(self):
assets = []
t = normalize_date(pd.Timestamp('now', tz=NY))
raw_assets = self._api.list_assets(asset_class='us_equity')
for raw_asset in raw_assets:
asset = Equity(
raw_asset.id, raw_asset.exchange,
symbol=raw_asset.symbol,
asset_name=raw_asset.symbol,
)
asset.start_date = t - one_day_offset
if raw_asset.status == 'active' and raw_asset.tradable:
asset.end_date = t + end_offset
else:
# if asset is not tradable, set end_date = day before
asset.end_date = t - one_day_offset
asset.auto_close_date = asset.end_date
assets.append(asset)
# register all unseen exchange name as
# alias of NYSE (e.g. AMEX, ARCA, NYSEARCA.)
if not default_calendar.has_calendar(raw_asset.exchange):
register_calendar_alias(raw_asset.exchange,
'NYSE', force=True)
return assets
@property
def positions(self):
z_positions = zp.Positions()
positions = self._api.list_positions()
position_map = {}
symbols = []
for pos in positions:
symbol = pos.symbol
try:
z_position = zp.Position(symbol_lookup(symbol))
except SymbolNotFound:
continue
z_position.amount = int(pos.qty)
z_position.cost_basis = float(pos.cost_basis) / float(pos.qty)
z_position.last_sale_price = None
z_position.last_sale_date = None
z_positions[symbol_lookup(symbol)] = z_position
symbols.append(symbol)
position_map[symbol] = z_position
trades = self._symbol_trades(symbols)
for symbol, trade in trades.items():
z_position = position_map[symbol]
if trade is None:
z_position.last_sale_price = np.nan
z_position.last_sale_date = pd.NaT
else:
z_position.last_sale_price = float(trade.price)
z_position.last_sale_date = trade.timestamp
return z_positions
@property
def portfolio(self):
account = self._api.get_account()
z_portfolio = zp.Portfolio()
z_portfolio.cash = float(account.cash)
z_portfolio.positions = self.positions
z_portfolio.positions_value = float(
account.portfolio_value) - float(account.cash)
z_portfolio.portfolio_value = float(account.portfolio_value)
return z_portfolio
@property
def account(self):
account = self._api.get_account()
z_account = zp.Account()
z_account.buying_power = float(account.buying_power)
z_account.total_position_value = float(
account.portfolio_value) - float(account.cash)
return z_account
def _order2zp(self, order):
zp_order = ZPOrder(
id=order.client_order_id,
asset=symbol_lookup(order.symbol),
amount=int(order.qty) if order.side == 'buy' else -int(order.qty),
stop=float(order.stop_price) if order.stop_price else None,
limit=float(order.limit_price) if order.limit_price else None,
dt=order.submitted_at,
commission=0,
)
zp_order._status = ZP_ORDER_STATUS.OPEN
if order.canceled_at:
zp_order._status = ZP_ORDER_STATUS.CANCELLED
if order.failed_at:
zp_order._status = ZP_ORDER_STATUS.REJECTED
if order.filled_at:
zp_order._status = ZP_ORDER_STATUS.FILLED
zp_order.filled = int(order.filled_qty)
return zp_order
def _new_order_id(self):
return uuid.uuid4().hex
def batch_order(self, args):
return [self.order(*order) for order in args]
def order(self, asset, amount, style, quantopian_compatible=True):
symbol = asset.symbol
zp_order_id = self._new_order_id()
if quantopian_compatible:
current_position = self.positions[asset]
if (
abs(amount) > abs(current_position.amount) and
amount * current_position.amount < 0
):
# The order would take us from a long position to a short
# position or vice versa and needs to be broken up
self._orders_pending_submission[zp_order_id] = (
asset,
amount + current_position.amount,
style
)
amount = -1 * current_position.amount
qty = amount if amount > 0 else -amount
side = 'buy' if amount > 0 else 'sell'
order_type = 'market'
if isinstance(style, MarketOrder):
order_type = 'market'
elif isinstance(style, LimitOrder):
order_type = 'limit'
elif isinstance(style, StopOrder):
order_type = 'stop'
elif isinstance(style, StopLimitOrder):
order_type = 'stop_limit'
limit_price = style.get_limit_price(side == 'buy') or None
stop_price = style.get_stop_price(side == 'buy') or None
log.debug(
('submitting {} order for {} - '
'qty:{}, side:{}, limit_price:{}, stop_price:{}').format(
order_type,
symbol,
qty,
side,
limit_price,
stop_price
)
)
try:
order = self._api.submit_order(
symbol=symbol,
qty=qty,
side=side,
type=order_type,
time_in_force='day',
limit_price=limit_price,
stop_price=stop_price,
client_order_id=zp_order_id,
)
zp_order = self._order2zp(order)
self._open_orders[zp_order_id] = zp_order
return zp_order
except APIError as e:
log.warning('order for symbol {} is rejected {}'.format(
symbol,
e
))
return None
@property
def orders(self):
return {
o.client_order_id: self._order2zp(o)
for o in self._api.list_orders('all')
}
def get_order(self, zp_order_id):
order = None
try:
order = self._open_orders[zp_order_id]
except Exception:
# Order was not found in our open order list, may be closed
order = self._order2zp(
self._api.get_order_by_client_order_id(zp_order_id))
return order
def all_orders(
self,
before=None,
status='all',
days_back=None,
initialize=False):
# Check if the open order list is being asked for
if (not initialize and status == 'open'
and before is None and days_back is None):
return self._open_orders
# Get all orders submitted days_back days before `before` or now.
now = pd.Timestamp.utcnow()
start = now.isoformat() if before is None else before.isoformat()
# A session label refers to the market date that an order submitted
# at a given minute would be executed on. We'll need to keep track of
# this if the function is bounded by days_back.
start_session_label = self._cal.minute_to_session_label(now)
reached_end_date = False
all_orders = {}
batch_size = 500
orders = self._api.list_orders(status, batch_size, until=start)
while len(orders) > 0 and not reached_end_date:
batch_orders = {}
for order in orders:
if days_back is not None:
# Verify that the order is not too old.
# `session_distance()` ignores holidays and weekends.
days_since_order = self._cal.session_distance(
self._cal.minute_to_session_label(order.submitted_at),
start_session_label
)
if days_since_order > days_back:
reached_end_date = True
break
batch_orders[order.client_order_id] = self._order2zp(order)
all_orders.update(batch_orders)
if not reached_end_date:
# Get the timestamp of the earliest order in the batch.
until = pd.Timestamp(orders[-1].submitted_at).isoformat()
orders = self._api.list_orders(status, batch_size, until=until)
return all_orders
def cancel_order(self, zp_order_id):
try:
order = self._api.get_order_by_client_order_id(zp_order_id)
self._api.cancel_order(order.id)
except Exception as e:
print('Error: Could not cancel order {}'.format(zp_order_id))
log.error(e)
return
def get_last_traded_dt(self, asset):
trade = self._api.polygon.last_trade(asset.symbol)
return trade.timestamp
def get_spot_value(
self,
assets,
field,
dt,
date_frequency,
quantopian_compatible=True):
assert(field in (
'open', 'high', 'low', 'close', 'volume', 'price', 'last_traded'))
assets_is_scalar = not isinstance(assets, (list, set, tuple))
if assets_is_scalar:
symbols = [assets.symbol]
else:
symbols = [asset.symbol for asset in assets]
if ((quantopian_compatible and field == 'last_traded') or
(not quantopian_compatible and field in ('price', 'last_traded'))):
results = self._get_spot_trade(symbols, field)
else:
results = self._get_spot_bars(symbols, field)
return results[0] if assets_is_scalar else results
def _get_spot_trade(self, symbols, field):
assert(field in ('price', 'last_traded'))
symbol_trades = self._symbol_trades(symbols)
def get_for_symbol(symbol_trades, symbol):
trade = symbol_trades.get(symbol)
if field == 'price':
if trade is None:
return np.nan
return trade.price
else:
if trade is None:
return pd.NaT
return trade.timestamp
return [get_for_symbol(symbol_trades, symbol) for symbol in symbols]
def _get_spot_bars(self, symbols, field):
symbol_bars = self._symbol_bars(symbols, 'minute', limit=1)
def get_for_symbol(symbol_bars, symbol, field):
bars = symbol_bars.get(symbol)
if bars is None or len(bars) == 0:
return np.nan
return bars[field].values[-1]
ohlcv_field = 'close' if field == 'price' else field
results = [
get_for_symbol(symbol_bars, symbol, ohlcv_field)
for symbol in symbols
]
return results
def get_bars(self, assets, data_frequency, bar_count=500):
'''
Interface method.
Return: pd.Dataframe() with columns MultiIndex [asset -> OHLCV]
'''
assets_is_scalar = not isinstance(assets, (list, set, tuple))
is_daily = 'd' in data_frequency # 'daily' or '1d'
if assets_is_scalar:
symbols = [assets.symbol]
else:
symbols = [asset.symbol for asset in assets]
symbol_bars = self._symbol_bars(
symbols, 'day' if is_daily else 'minute', limit=bar_count)
if is_daily:
intra_bars = {}
symbol_bars_minute = self._symbol_bars(
symbols, 'minute', limit=1000)
for symbol, df in symbol_bars_minute.items():
agged = df.resample('1D').agg(dict(
open='first',
high='max',
low='min',
close='last',
volume='sum',
)).dropna()
intra_bars[symbol] = agged
dfs = []
for asset in assets if not assets_is_scalar else [assets]:
symbol = asset.symbol
df = symbol_bars.get(symbol)
if df is None:
dfs.append(pd.DataFrame(
[], columns=[
'open', 'high', 'low', 'close', 'volume']
))
continue
if is_daily:
agged = intra_bars.get(symbol)
if agged is not None and len(
agged.index) > 0 and agged.index[-1] not in df.index:
if not (agged.index[-1] > df.index[-1]):
log.warn(
('agged.index[-1] = {}, df.index[-1] = {} '
'for {}').format(
agged.index[-1], df.index[-1], symbol))
df = df.append(agged.iloc[-1])
df.columns = pd.MultiIndex.from_product([[asset, ], df.columns])
dfs.append(df)
return pd.concat(dfs, axis=1)
def _symbol_bars(
self,
symbols,
size,
_from=None,
to=None,
limit=None):
'''
Query historic_agg either minute or day in parallel
for multiple symbols, and return in dict.
symbols: list[str]
size: str ('day', 'minute')
_from: str or pd.Timestamp
to: str or pd.Timestamp
limit: str or int
return: dict[str -> pd.DataFrame]
'''
assert size in ('day', 'minute')
if not (_from or to):
to = pd.to_datetime('now', utc=True).tz_convert('America/New_York')
if not (_from and to) and limit:
# temp workaround for less bars after masking by
# market hours
query_limit = limit
if query_limit is not None:
query_limit *= 2
if _from:
if size == 'day':
to = _from + timedelta(days=query_limit+1)
else:
to = _from + timedelta(minutes=query_limit+1)
else:
if size == 'day':
_from = to - timedelta(days=query_limit+1)
else:
_from = to - timedelta(minutes=query_limit+1)
@skip_http_error((404, 504))
def fetch(symbol):
df = self._api.polygon.historic_agg_v2(
symbol, 1, size,
int(_from.timestamp()) * 1000,
int(to.timestamp()) * 1000
).df
# rename Polygon's v2 agg fields to match their full titles
df = df.rename(index=str, columns={
't': 'timestamp',
'o': 'open',
'h': 'high',
'l': 'low',
'c': 'close',
'v': 'volume'
})
# convert timestamps to datetimes
# astype is necessary to deal with empty result
df.index = pd.to_datetime(
df.index.astype('int64') * 1000000,
utc=True,
).tz_convert('America/New_York')
df.index.name = 'timestamp'
# zipline -> right label
# API result -> left label (beginning of bucket)
if size == 'minute':
df.index += pd.Timedelta('1min')
if not df.empty:
# mask out bars outside market hours
mask = self._cal.minutes_in_range(
df.index[0], df.index[-1],
).tz_convert(NY)
df = df.reindex(mask)
if limit is not None:
df = df.iloc[-limit:]
return df
return parallelize(fetch)(symbols)
def _symbol_trades(self, symbols):
'''
Query last_trade in parallel for multiple symbols and
return in dict.
symbols: list[str]
return: dict[str -> polygon.Trade]
'''
@skip_http_error((404, 504))
def fetch(symbol):
return self._api.polygon.last_trade(symbol)
return parallelize(fetch)(symbols)
|
env.py | """
@author: Viet Nguyen <nhviet1009@gmail.com>
"""
import gym_super_mario_bros
from gym.spaces import Box
from gym import Wrapper
from nes_py.wrappers import JoypadSpace
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT, RIGHT_ONLY
import cv2
import numpy as np
import subprocess as sp
import torch.multiprocessing as mp
class Monitor:
def __init__(self, width, height, saved_path):
self.command = ["ffmpeg", "-y", "-f", "rawvideo", "-vcodec", "rawvideo", "-s", "{}X{}".format(width, height),
"-pix_fmt", "rgb24", "-r", "60", "-i", "-", "-an", "-vcodec", "mpeg4", saved_path]
try:
self.pipe = sp.Popen(self.command, stdin=sp.PIPE, stderr=sp.PIPE)
except FileNotFoundError:
pass
def record(self, image_array):
self.pipe.stdin.write(image_array.tostring())
def process_frame(frame):
if frame is not None:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (84, 84))[None, :, :] / 255.
return frame
else:
return np.zeros((1, 84, 84))
class CustomReward(Wrapper):
def __init__(self, env=None, world=None, stage=None, monitor=None):
super(CustomReward, self).__init__(env)
self.observation_space = Box(low=0, high=255, shape=(1, 84, 84))
self.curr_score = 0
self.current_x = 40
self.world = world
self.stage = stage
if monitor:
self.monitor = monitor
else:
self.monitor = None
def step(self, action):
state, reward, done, info = self.env.step(action)
if self.monitor:
self.monitor.record(state)
state = process_frame(state)
reward += (info["score"] - self.curr_score) / 40.
self.curr_score = info["score"]
if done:
if info["flag_get"]:
reward += 50
else:
reward -= 50
if self.world == 7 and self.stage == 4:
if (506 <= info["x_pos"] <= 832 and info["y_pos"] > 127) or (
832 < info["x_pos"] <= 1064 and info["y_pos"] < 80) or (
1113 < info["x_pos"] <= 1464 and info["y_pos"] < 191) or (
1579 < info["x_pos"] <= 1943 and info["y_pos"] < 191) or (
1946 < info["x_pos"] <= 1964 and info["y_pos"] >= 191) or (
1984 < info["x_pos"] <= 2060 and (info["y_pos"] >= 191 or info["y_pos"] < 127)) or (
2114 < info["x_pos"] < 2440 and info["y_pos"] < 191) or info["x_pos"] < self.current_x - 500:
reward -= 50
done = True
if self.world == 4 and self.stage == 4:
if (info["x_pos"] <= 1500 and info["y_pos"] < 127) or (
1588 <= info["x_pos"] < 2380 and info["y_pos"] >= 127):
reward = -50
done = True
self.current_x = info["x_pos"]
return state, reward / 10., done, info
def reset(self):
self.curr_score = 0
self.current_x = 40
return process_frame(self.env.reset())
class CustomSkipFrame(Wrapper):
def __init__(self, env, skip=4):
super(CustomSkipFrame, self).__init__(env)
self.observation_space = Box(low=0, high=255, shape=(skip, 84, 84))
self.skip = skip
self.states = np.zeros((skip, 84, 84), dtype=np.float32)
def step(self, action):
total_reward = 0
last_states = []
for i in range(self.skip):
state, reward, done, info = self.env.step(action)
total_reward += reward
if i >= self.skip / 2:
last_states.append(state)
if done:
self.reset()
return self.states[None, :, :, :].astype(np.float32), total_reward, done, info
max_state = np.max(np.concatenate(last_states, 0), 0)
self.states[:-1] = self.states[1:]
self.states[-1] = max_state
return self.states[None, :, :, :].astype(np.float32), total_reward, done, info
def reset(self):
state = self.env.reset()
self.states = np.concatenate([state for _ in range(self.skip)], 0)
return self.states[None, :, :, :].astype(np.float32)
def create_train_env(world, stage, actions, output_path=None):
env = gym_super_mario_bros.make("SuperMarioBros-{}-{}-v0".format(world, stage))
if output_path:
monitor = Monitor(256, 240, output_path)
else:
monitor = None
env = JoypadSpace(env, actions)
env = CustomReward(env, world, stage, monitor)
env = CustomSkipFrame(env)
return env
class MultipleEnvironments:
def __init__(self, world, stage, action_type, num_envs, output_path=None):
self.agent_conns, self.env_conns = zip(*[mp.Pipe() for _ in range(num_envs)])
if action_type == "right":
actions = RIGHT_ONLY
elif action_type == "simple":
actions = SIMPLE_MOVEMENT
else:
actions = COMPLEX_MOVEMENT
self.envs = [create_train_env(world, stage, actions, output_path=output_path) for _ in range(num_envs)]
self.num_states = self.envs[0].observation_space.shape[0]
self.num_actions = len(actions)
for index in range(num_envs):
process = mp.Process(target=self.run, args=(index,))
process.start()
self.env_conns[index].close()
def run(self, index):
self.agent_conns[index].close()
while True:
request, action = self.env_conns[index].recv()
if request == "step":
self.env_conns[index].send(self.envs[index].step(action.item()))
elif request == "reset":
self.env_conns[index].send(self.envs[index].reset())
else:
raise NotImplementedError
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 40376
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
artifacts.py | import json
import yaml
import mimetypes
import os
import pickle
from six.moves.urllib.parse import quote
from copy import deepcopy
from datetime import datetime
from multiprocessing import RLock, Event
from multiprocessing.pool import ThreadPool
from tempfile import mkdtemp, mkstemp
from threading import Thread
from time import time
from zipfile import ZipFile, ZIP_DEFLATED
import six
from PIL import Image
from pathlib2 import Path
from six.moves.urllib.parse import urlparse
from typing import Dict, Union, Optional, Any, Sequence
from ..backend_api import Session
from ..backend_api.services import tasks
from ..backend_interface.metrics.events import UploadEvent
from ..debugging.log import LoggerRoot
from ..storage.helper import remote_driver_schemes
from ..storage.util import sha256sum, format_size, get_common_path
from ..utilities.proxy_object import LazyEvalWrapper
try:
import pandas as pd
DataFrame = pd.DataFrame
except ImportError:
pd = None
DataFrame = None
try:
import numpy as np
except ImportError:
np = None
try:
from pathlib import Path as pathlib_Path
except ImportError:
pathlib_Path = None
class Artifact(object):
"""
Read-Only Artifact object
"""
_not_set = object()
@property
def url(self):
# type: () -> str
"""
:return: The URL of uploaded artifact.
"""
return self._url
@property
def name(self):
# type: () -> str
"""
:return: The name of artifact.
"""
return self._name
@property
def size(self):
# type: () -> int
"""
:return: The size in bytes of artifact.
"""
return self._size
@property
def type(self):
# type: () -> str
"""
:return: The type (str) of of artifact.
"""
return self._type
@property
def mode(self):
# type: () -> Union["input", "output"] # noqa: F821
"""
:return: The mode (str) of of artifact: "input" or "output".
"""
return self._mode
@property
def hash(self):
# type: () -> str
"""
:return: SHA2 hash (str) of of artifact content.
"""
return self._hash
@property
def timestamp(self):
# type: () -> datetime
"""
:return: Timestamp (datetime) of uploaded artifact.
"""
return self._timestamp
@property
def metadata(self):
# type: () -> Optional[Dict[str, str]]
"""
:return: Key/Value dictionary attached to artifact.
"""
return self._metadata
@property
def preview(self):
# type: () -> str
"""
:return: A string (str) representation of the artifact.
"""
return self._preview
def __init__(self, artifact_api_object):
"""
construct read-only object from api artifact object
:param tasks.Artifact artifact_api_object:
"""
self._name = artifact_api_object.key
self._size = artifact_api_object.content_size
self._type = artifact_api_object.type
self._mode = artifact_api_object.mode
self._url = artifact_api_object.uri
self._hash = artifact_api_object.hash
self._timestamp = datetime.fromtimestamp(artifact_api_object.timestamp or 0)
self._metadata = dict(artifact_api_object.display_data) if artifact_api_object.display_data else {}
self._preview = artifact_api_object.type_data.preview if artifact_api_object.type_data else None
self._content_type = artifact_api_object.type_data.content_type if artifact_api_object.type_data else None
self._object = self._not_set
def get(self, force_download=False):
# type: (bool) -> Any
"""
Return an object constructed from the artifact file
Currently supported types: Numpy.array, pandas.DataFrame, PIL.Image, dict.
Supported content types are:
- dict - ``.json``, ``.yaml``
- pandas.DataFrame - ``.csv.gz``, ``.parquet``, ``.feather``, ``.pickle``
- numpy.ndarray - ``.npz``, ``.csv.gz``
- PIL.Image - whatever content types PIL supports
All other types will return a pathlib2.Path object pointing to a local copy of the artifacts file (or directory).
In case the content of a supported type could not be parsed, a pathlib2.Path object
pointing to a local copy of the artifacts file (or directory) will be returned
:param bool force_download: download file from remote even if exists in local cache
:return: One of the following objects Numpy.array, pandas.DataFrame, PIL.Image, dict (json), or pathlib2.Path.
"""
if self._object is not self._not_set:
return self._object
local_file = self.get_local_copy(raise_on_error=True, force_download=force_download)
# noinspection PyBroadException
try:
if self.type == "numpy" and np:
if self._content_type == "text/csv":
self._object = np.genfromtxt(local_file, delimiter=",")
else:
self._object = np.load(local_file)[self.name]
elif self.type == Artifacts._pd_artifact_type or self.type == "pandas" and pd:
if self._content_type == "application/parquet":
self._object = pd.read_parquet(local_file)
elif self._content_type == "application/feather":
self._object = pd.read_feather(local_file)
elif self._content_type == "application/pickle":
self._object = pd.read_pickle(local_file)
elif self.type == Artifacts._pd_artifact_type:
self._object = pd.read_csv(local_file)
else:
self._object = pd.read_csv(local_file, index_col=[0])
elif self.type == "image":
self._object = Image.open(local_file)
elif self.type == "JSON" or self.type == "dict":
with open(local_file, "rt") as f:
if self.type == "JSON" or self._content_type == "application/json":
self._object = json.load(f)
else:
self._object = yaml.safe_load(f)
elif self.type == "string":
with open(local_file, "rt") as f:
self._object = f.read()
elif self.type == "pickle":
with open(local_file, "rb") as f:
self._object = pickle.load(f)
except Exception as e:
LoggerRoot.get_base_logger().warning(
"Exception '{}' encountered when getting artifact with type {} and content type {}".format(
e, self.type, self._content_type
)
)
if self._object is self._not_set:
local_file = Path(local_file)
self._object = local_file
return self._object
def get_local_copy(self, extract_archive=True, raise_on_error=False, force_download=False):
# type: (bool, bool, bool) -> str
"""
:param bool extract_archive: If True and artifact is of type 'archive' (compressed folder)
The returned path will be a temporary folder containing the archive content
:param bool raise_on_error: If True and the artifact could not be downloaded,
raise ValueError, otherwise return None on failure and output log warning.
:param bool force_download: download file from remote even if exists in local cache
:raise: Raises error if local copy not found.
:return: A local path to a downloaded copy of the artifact.
"""
from clearml.storage import StorageManager
local_copy = StorageManager.get_local_copy(
remote_url=self.url,
extract_archive=extract_archive and self.type == 'archive',
name=self.name,
force_download=force_download
)
if raise_on_error and local_copy is None:
raise ValueError(
"Could not retrieve a local copy of artifact {}, failed downloading {}".format(self.name, self.url))
return local_copy
def __repr__(self):
return str({'name': self.name, 'size': self.size, 'type': self.type, 'mode': self.mode, 'url': self.url,
'hash': self.hash, 'timestamp': self.timestamp,
'metadata': self.metadata, 'preview': self.preview, })
class Artifacts(object):
max_preview_size_bytes = 65536
_flush_frequency_sec = 300.
# notice these two should match
_save_format = '.csv.gz'
_compression = 'gzip'
# hashing constants
_hash_block_size = 65536
_pd_artifact_type = 'data-audit-table'
class _ProxyDictWrite(dict):
""" Dictionary wrapper that updates an arguments instance on any item set in the dictionary """
def __init__(self, artifacts_manager, *args, **kwargs):
super(Artifacts._ProxyDictWrite, self).__init__(*args, **kwargs)
self._artifacts_manager = artifacts_manager
# list of artifacts we should not upload (by name & weak-reference)
self.artifact_metadata = {}
# list of hash columns to calculate uniqueness for the artifacts
self.artifact_hash_columns = {}
def __setitem__(self, key, value):
# check that value is of type pandas
if pd and isinstance(value, pd.DataFrame):
super(Artifacts._ProxyDictWrite, self).__setitem__(key, value)
if self._artifacts_manager:
self._artifacts_manager.flush()
else:
raise ValueError('Artifacts currently support pandas.DataFrame objects only')
def unregister_artifact(self, name):
self.artifact_metadata.pop(name, None)
self.pop(name, None)
def add_metadata(self, name, metadata):
self.artifact_metadata[name] = deepcopy(metadata)
def get_metadata(self, name):
return self.artifact_metadata.get(name)
def add_hash_columns(self, artifact_name, hash_columns):
self.artifact_hash_columns[artifact_name] = hash_columns
def get_hash_columns(self, artifact_name):
return self.artifact_hash_columns.get(artifact_name)
@property
def registered_artifacts(self):
# type: () -> Dict[str, Artifact]
return self._artifacts_container
@property
def summary(self):
# type: () -> str
return self._summary
def __init__(self, task):
self._task = task
# notice the double link, this is important since the Artifact
# dictionary needs to signal the Artifacts base on changes
self._artifacts_container = self._ProxyDictWrite(self)
self._last_artifacts_upload = {}
self._unregister_request = set()
self._thread = None
self._flush_event = Event()
self._exit_flag = False
self._summary = ''
self._temp_folder = []
self._task_artifact_list = []
self._task_edit_lock = RLock()
self._storage_prefix = None
def register_artifact(self, name, artifact, metadata=None, uniqueness_columns=True):
# type: (str, DataFrame, Optional[dict], Union[bool, Sequence[str]]) -> ()
"""
:param str name: name of the artifacts. Notice! it will override previous artifacts if name already exists.
:param pandas.DataFrame artifact: artifact object, supported artifacts object types: pandas.DataFrame
:param dict metadata: dictionary of key value to store with the artifact (visible in the UI)
:param list uniqueness_columns: list of columns for artifact uniqueness comparison criteria. The default value
is True, which equals to all the columns (same as artifact.columns).
"""
# currently we support pandas.DataFrame (which we will upload as csv.gz)
if name in self._artifacts_container:
LoggerRoot.get_base_logger().info('Register artifact, overwriting existing artifact \"{}\"'.format(name))
self._artifacts_container.add_hash_columns(
name, list(artifact.columns if uniqueness_columns is True else uniqueness_columns)
)
self._artifacts_container[name] = artifact
if metadata:
self._artifacts_container.add_metadata(name, metadata)
def unregister_artifact(self, name):
# type: (str) -> ()
# Remove artifact from the watch list
self._unregister_request.add(name)
self.flush()
def upload_artifact(self, name, artifact_object=None, metadata=None, preview=None,
delete_after_upload=False, auto_pickle=True, wait_on_upload=False, extension_name=None):
# type: (str, Optional[object], Optional[dict], Optional[str], bool, bool, bool, Optional[str]) -> bool
if not Session.check_min_api_version('2.3'):
LoggerRoot.get_base_logger().warning('Artifacts not supported by your ClearML-server version, '
'please upgrade to the latest server version')
return False
if name in self._artifacts_container:
raise ValueError("Artifact by the name of {} is already registered, use register_artifact".format(name))
# cast preview to string
if preview is not None and not (isinstance(preview, bool) and preview is False):
preview = str(preview)
# evaluate lazy proxy object
if isinstance(artifact_object, LazyEvalWrapper):
# noinspection PyProtectedMember
artifact_object = LazyEvalWrapper._load_object(artifact_object)
pathlib_types = (Path, pathlib_Path,) if pathlib_Path is not None else (Path,)
local_filename = None
# try to convert string Path object (it might reference a file/folder)
# dont not try to serialize long texts.
if isinstance(artifact_object, six.string_types) and artifact_object and len(artifact_object) < 2048:
# noinspection PyBroadException
try:
artifact_path = Path(artifact_object)
if artifact_path.exists():
artifact_object = artifact_path
elif '*' in artifact_object or '?' in artifact_object:
# hackish, detect wildcard in tr files
folder = Path('').joinpath(*artifact_path.parts[:-1])
if folder.is_dir() and folder.parts:
wildcard = artifact_path.parts[-1]
if list(Path(folder).rglob(wildcard)):
artifact_object = artifact_path
except Exception:
pass
store_as_pickle = False
artifact_type_data = tasks.ArtifactTypeData()
artifact_type_data.preview = ''
override_filename_in_uri = None
override_filename_ext_in_uri = None
uri = None
def get_extension(extension_name_, valid_extensions, default_extension, artifact_type_):
if not extension_name_:
return default_extension
if extension_name_ in valid_extensions:
return extension_name_
LoggerRoot.get_base_logger().warning(
"{} artifact can not be uploaded with extension {}. Valid extensions are: {}. Defaulting to {}.".format(
artifact_type_, extension_name_, ", ".join(valid_extensions), default_extension
)
)
return default_extension
if np and isinstance(artifact_object, np.ndarray):
artifact_type = 'numpy'
artifact_type_data.preview = preview or str(artifact_object.__repr__())
override_filename_ext_in_uri = get_extension(
extension_name, [".npz", ".csv.gz"], ".npz", artifact_type
)
override_filename_in_uri = name + override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
if override_filename_ext_in_uri == ".npz":
artifact_type_data.content_type = "application/numpy"
np.savez_compressed(local_filename, **{name: artifact_object})
elif override_filename_ext_in_uri == ".csv.gz":
artifact_type_data.content_type = "text/csv"
np.savetxt(local_filename, artifact_object, delimiter=",")
delete_after_upload = True
elif pd and isinstance(artifact_object, pd.DataFrame):
artifact_type = "pandas"
artifact_type_data.preview = preview or str(artifact_object.__repr__())
override_filename_ext_in_uri = get_extension(
extension_name, [".csv.gz", ".parquet", ".feather", ".pickle"], ".csv.gz", artifact_type
)
override_filename_in_uri = name
fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
if override_filename_ext_in_uri == ".csv.gz":
artifact_type_data.content_type = "text/csv"
artifact_object.to_csv(local_filename, compression=self._compression)
elif override_filename_ext_in_uri == ".parquet":
try:
artifact_type_data.content_type = "application/parquet"
artifact_object.to_parquet(local_filename)
except Exception as e:
LoggerRoot.get_base_logger().warning(
"Exception '{}' encountered when uploading artifact as .parquet. Defaulting to .csv.gz".format(
e
)
)
artifact_type_data.content_type = "text/csv"
artifact_object.to_csv(local_filename, compression=self._compression)
elif override_filename_ext_in_uri == ".feather":
try:
artifact_type_data.content_type = "application/feather"
artifact_object.to_feather(local_filename)
except Exception as e:
LoggerRoot.get_base_logger().warning(
"Exception '{}' encountered when uploading artifact as .feather. Defaulting to .csv.gz".format(
e
)
)
artifact_type_data.content_type = "text/csv"
artifact_object.to_csv(local_filename, compression=self._compression)
elif override_filename_ext_in_uri == ".pickle":
artifact_type_data.content_type = "application/pickle"
artifact_object.to_pickle(local_filename)
delete_after_upload = True
elif isinstance(artifact_object, Image.Image):
artifact_type = "image"
artifact_type_data.content_type = "image/png"
desc = str(artifact_object.__repr__())
artifact_type_data.preview = preview or desc[1:desc.find(' at ')]
# noinspection PyBroadException
try:
if not Image.EXTENSION:
Image.init()
if not Image.EXTENSION:
raise Exception()
override_filename_ext_in_uri = get_extension(
extension_name, Image.EXTENSION.keys(), ".png", artifact_type
)
except Exception:
override_filename_ext_in_uri = ".png"
if extension_name and extension_name != ".png":
LoggerRoot.get_base_logger().warning(
"image artifact can not be uploaded with extension {}. Defaulting to .png.".format(
extension_name
)
)
override_filename_in_uri = name + override_filename_ext_in_uri
artifact_type_data.content_type = "image/unknown-type"
guessed_type = mimetypes.guess_type(override_filename_in_uri)[0]
if guessed_type:
artifact_type_data.content_type = guessed_type
fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
artifact_object.save(local_filename)
delete_after_upload = True
elif isinstance(artifact_object, dict):
artifact_type = "dict"
override_filename_ext_in_uri = get_extension(extension_name, [".json", ".yaml"], ".json", artifact_type)
if override_filename_ext_in_uri == ".json":
artifact_type_data.content_type = "application/json"
# noinspection PyBroadException
try:
serialized_text = json.dumps(artifact_object, sort_keys=True, indent=4)
except Exception:
if not auto_pickle:
raise
LoggerRoot.get_base_logger().warning(
"JSON serialization of artifact \'{}\' failed, reverting to pickle".format(name))
store_as_pickle = True
serialized_text = None
else:
artifact_type_data.content_type = "application/yaml"
# noinspection PyBroadException
try:
serialized_text = yaml.dump(artifact_object, sort_keys=True, indent=4)
except Exception:
if not auto_pickle:
raise
LoggerRoot.get_base_logger().warning(
"YAML serialization of artifact \'{}\' failed, reverting to pickle".format(name))
store_as_pickle = True
serialized_text = None
if serialized_text is not None:
override_filename_in_uri = name + override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=quote(name, safe="") + ".", suffix=override_filename_ext_in_uri)
os.write(fd, bytes(serialized_text.encode()))
os.close(fd)
preview = preview or serialized_text
if len(preview) < self.max_preview_size_bytes:
artifact_type_data.preview = preview
else:
artifact_type_data.preview = (
"# full serialized dict too large to store, storing first {}kb\n{}".format(
self.max_preview_size_bytes // 1024, preview[: self.max_preview_size_bytes]
)
)
delete_after_upload = True
elif isinstance(artifact_object, pathlib_types):
# check if single file
artifact_object = Path(artifact_object)
artifact_object.expanduser().absolute()
# noinspection PyBroadException
try:
create_zip_file = not artifact_object.is_file()
except Exception: # Hack for windows pathlib2 bug, is_file isn't valid.
create_zip_file = True
else: # We assume that this is not Windows os
if artifact_object.is_dir():
# change to wildcard
artifact_object /= '*'
if create_zip_file:
folder = Path('').joinpath(*artifact_object.parts[:-1])
if not folder.is_dir() or not folder.parts:
raise ValueError("Artifact file/folder '{}' could not be found".format(
artifact_object.as_posix()))
wildcard = artifact_object.parts[-1]
files = list(Path(folder).rglob(wildcard))
override_filename_ext_in_uri = '.zip'
override_filename_in_uri = folder.parts[-1] + override_filename_ext_in_uri
fd, zip_file = mkstemp(
prefix=quote(folder.parts[-1], safe="") + '.', suffix=override_filename_ext_in_uri
)
try:
artifact_type_data.content_type = 'application/zip'
archive_preview = 'Archive content {}:\n'.format(artifact_object.as_posix())
with ZipFile(zip_file, 'w', allowZip64=True, compression=ZIP_DEFLATED) as zf:
for filename in sorted(files):
if filename.is_file():
relative_file_name = filename.relative_to(folder).as_posix()
archive_preview += '{} - {}\n'.format(
relative_file_name, format_size(filename.stat().st_size))
zf.write(filename.as_posix(), arcname=relative_file_name)
except Exception as e:
# failed uploading folder:
LoggerRoot.get_base_logger().warning('Exception {}\nFailed zipping artifact folder {}'.format(
folder, e))
return False
finally:
os.close(fd)
artifact_type_data.preview = preview or archive_preview
artifact_object = zip_file
artifact_type = 'archive'
artifact_type_data.content_type = mimetypes.guess_type(artifact_object)[0]
local_filename = artifact_object
delete_after_upload = True
else:
if not artifact_object.is_file():
raise ValueError("Artifact file '{}' could not be found".format(artifact_object.as_posix()))
override_filename_in_uri = artifact_object.parts[-1]
artifact_type_data.preview = preview or '{} - {}\n'.format(
artifact_object, format_size(artifact_object.stat().st_size))
artifact_object = artifact_object.as_posix()
artifact_type = 'custom'
artifact_type_data.content_type = mimetypes.guess_type(artifact_object)[0]
local_filename = artifact_object
elif isinstance(artifact_object, (list, tuple)) and all(isinstance(p, pathlib_types) for p in artifact_object):
# find common path if exists
list_files = [Path(p) for p in artifact_object]
override_filename_ext_in_uri = '.zip'
override_filename_in_uri = quote(name, safe="") + override_filename_ext_in_uri
common_path = get_common_path(list_files)
fd, zip_file = mkstemp(
prefix='artifact_folder.', suffix=override_filename_ext_in_uri
)
try:
artifact_type_data.content_type = 'application/zip'
archive_preview = 'Archive content:\n'
with ZipFile(zip_file, 'w', allowZip64=True, compression=ZIP_DEFLATED) as zf:
for filename in sorted(list_files):
if filename.is_file():
relative_file_name = filename.relative_to(Path(common_path)).as_posix() \
if common_path else filename.as_posix()
archive_preview += '{} - {}\n'.format(
relative_file_name, format_size(filename.stat().st_size))
zf.write(filename.as_posix(), arcname=relative_file_name)
else:
LoggerRoot.get_base_logger().warning(
"Failed zipping artifact file '{}', file not found!".format(filename.as_posix()))
except Exception as e:
# failed uploading folder:
LoggerRoot.get_base_logger().warning('Exception {}\nFailed zipping artifact files {}'.format(
artifact_object, e))
return False
finally:
os.close(fd)
artifact_type_data.preview = preview or archive_preview
artifact_object = zip_file
artifact_type = 'archive'
artifact_type_data.content_type = mimetypes.guess_type(artifact_object)[0]
local_filename = artifact_object
delete_after_upload = True
elif (
isinstance(artifact_object, six.string_types) and len(artifact_object) < 4096
and urlparse(artifact_object).scheme in remote_driver_schemes
):
# we should not upload this, just register
local_filename = None
uri = artifact_object
artifact_type = 'custom'
artifact_type_data.content_type = mimetypes.guess_type(artifact_object)[0]
if preview:
artifact_type_data.preview = preview
elif isinstance(artifact_object, six.string_types) and artifact_object:
# if we got here, we should store it as text file.
artifact_type = 'string'
artifact_type_data.content_type = 'text/plain'
if preview:
artifact_type_data.preview = preview
elif len(artifact_object) < self.max_preview_size_bytes:
artifact_type_data.preview = artifact_object
else:
artifact_type_data.preview = '# full text too large to store, storing first {}kb\n{}'.format(
self.max_preview_size_bytes//1024, artifact_object[:self.max_preview_size_bytes]
)
delete_after_upload = True
override_filename_ext_in_uri = ".txt"
override_filename_in_uri = name + override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=quote(name, safe="") + ".", suffix=override_filename_ext_in_uri)
os.close(fd)
# noinspection PyBroadException
try:
with open(local_filename, "wt") as f:
f.write(artifact_object)
except Exception:
# cleanup and raise exception
os.unlink(local_filename)
raise
elif artifact_object is None or (isinstance(artifact_object, str) and artifact_object == ""):
artifact_type = ''
store_as_pickle = True
elif auto_pickle:
# revert to pickling the object
store_as_pickle = True
else:
raise ValueError("Artifact type {} not supported".format(type(artifact_object)))
# revert to serializing the object with pickle
if store_as_pickle:
# if we are here it means we do not know what to do with the object, so we serialize it with pickle.
artifact_type = 'pickle'
artifact_type_data.content_type = 'application/pickle'
# noinspection PyBroadException
try:
artifact_type_data.preview = preview or str(artifact_object.__repr__())[:self.max_preview_size_bytes]
except Exception:
artifact_type_data.preview = preview or ''
delete_after_upload = True
override_filename_ext_in_uri = '.pkl'
override_filename_in_uri = name + override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
# noinspection PyBroadException
try:
with open(local_filename, 'wb') as f:
pickle.dump(artifact_object, f)
except Exception:
# cleanup and raise exception
os.unlink(local_filename)
raise
# verify preview not out of scope:
if artifact_type_data.preview and len(artifact_type_data.preview) > (self.max_preview_size_bytes+1024):
artifact_type_data.preview = '# full preview too large to store, storing first {}kb\n{}'.format(
self.max_preview_size_bytes // 1024, artifact_type_data.preview[:self.max_preview_size_bytes]
)
# remove from existing list, if exists
for artifact in self._task_artifact_list:
if artifact.key == name:
if artifact.type == self._pd_artifact_type:
raise ValueError("Artifact of name {} already registered, "
"use register_artifact instead".format(name))
self._task_artifact_list.remove(artifact)
break
if not local_filename:
file_size = None
file_hash = None
else:
# check that the file to upload exists
local_filename = Path(local_filename).absolute()
if not local_filename.exists() or not local_filename.is_file():
LoggerRoot.get_base_logger().warning('Artifact upload failed, cannot find file {}'.format(
local_filename.as_posix()))
return False
file_hash, _ = sha256sum(local_filename.as_posix(), block_size=Artifacts._hash_block_size)
file_size = local_filename.stat().st_size
uri = self._upload_local_file(local_filename, name,
delete_after_upload=delete_after_upload,
override_filename=override_filename_in_uri,
override_filename_ext=override_filename_ext_in_uri,
wait_on_upload=wait_on_upload)
timestamp = int(time())
artifact = tasks.Artifact(key=name, type=artifact_type,
uri=uri,
content_size=file_size,
hash=file_hash,
timestamp=timestamp,
type_data=artifact_type_data,
display_data=[(str(k), str(v)) for k, v in metadata.items()] if metadata else None)
# update task artifacts
self._add_artifact(artifact)
return True
def flush(self):
# type: () -> ()
# start the thread if it hasn't already:
self._start()
# flush the current state of all artifacts
self._flush_event.set()
def stop(self, wait=True):
# type: (bool) -> ()
# stop the daemon thread and quit
# wait until thread exists
self._exit_flag = True
self._flush_event.set()
if wait:
if self._thread:
self._thread.join()
# remove all temp folders
for f in self._temp_folder:
# noinspection PyBroadException
try:
Path(f).rmdir()
except Exception:
pass
def _start(self):
# type: () -> ()
""" Start daemon thread if any artifacts are registered and thread is not up yet """
if not self._thread and self._artifacts_container:
# start the daemon thread
self._flush_event.clear()
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
def _daemon(self):
# type: () -> ()
while not self._exit_flag:
self._flush_event.wait(self._flush_frequency_sec)
self._flush_event.clear()
artifact_keys = list(self._artifacts_container.keys())
for name in artifact_keys:
try:
self._upload_data_audit_artifacts(name)
except Exception as e:
LoggerRoot.get_base_logger().warning(str(e))
# create summary
self._summary = self._get_statistics()
def _add_artifact(self, artifact):
if not self._task:
raise ValueError("Task object not set")
with self._task_edit_lock:
if artifact not in self._task_artifact_list:
self._task_artifact_list.append(artifact)
# noinspection PyProtectedMember
self._task._add_artifacts(self._task_artifact_list)
def _upload_data_audit_artifacts(self, name):
# type: (str) -> ()
logger = self._task.get_logger()
pd_artifact = self._artifacts_container.get(name)
pd_metadata = self._artifacts_container.get_metadata(name)
# remove from artifacts watch list
if name in self._unregister_request:
try:
self._unregister_request.remove(name)
except KeyError:
pass
self._artifacts_container.unregister_artifact(name)
if pd_artifact is None:
return
override_filename_ext_in_uri = self._save_format
override_filename_in_uri = name
fd, local_csv = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
local_csv = Path(local_csv)
pd_artifact.to_csv(local_csv.as_posix(), index=False, compression=self._compression)
current_sha2, file_sha2 = sha256sum(
local_csv.as_posix(), skip_header=32, block_size=Artifacts._hash_block_size)
if name in self._last_artifacts_upload:
previous_sha2 = self._last_artifacts_upload[name]
if previous_sha2 == current_sha2:
# nothing to do, we can skip the upload
# noinspection PyBroadException
try:
local_csv.unlink()
except Exception:
pass
return
self._last_artifacts_upload[name] = current_sha2
# If old clearml-server, upload as debug image
if not Session.check_min_api_version('2.3'):
logger.report_image(title='artifacts', series=name, local_path=local_csv.as_posix(),
delete_after_upload=True, iteration=self._task.get_last_iteration(),
max_image_history=2)
return
# Find our artifact
artifact = None
for an_artifact in self._task_artifact_list:
if an_artifact.key == name:
artifact = an_artifact
break
file_size = local_csv.stat().st_size
# upload file
uri = self._upload_local_file(local_csv, name, delete_after_upload=True,
override_filename=override_filename_in_uri,
override_filename_ext=override_filename_ext_in_uri)
# update task artifacts
with self._task_edit_lock:
if not artifact:
artifact = tasks.Artifact(key=name, type=self._pd_artifact_type)
artifact_type_data = tasks.ArtifactTypeData()
artifact_type_data.data_hash = current_sha2
artifact_type_data.content_type = "text/csv"
artifact_type_data.preview = str(pd_artifact.__repr__(
)) + '\n\n' + self._get_statistics({name: pd_artifact})
artifact.type_data = artifact_type_data
artifact.uri = uri
artifact.content_size = file_size
artifact.hash = file_sha2
artifact.timestamp = int(time())
artifact.display_data = [(str(k), str(v)) for k, v in pd_metadata.items()] if pd_metadata else None
self._add_artifact(artifact)
def _upload_local_file(
self, local_file, name, delete_after_upload=False, override_filename=None, override_filename_ext=None,
wait_on_upload=False
):
# type: (str, str, bool, Optional[str], Optional[str], bool) -> str
"""
Upload local file and return uri of the uploaded file (uploading in the background)
"""
from clearml.storage import StorageManager
upload_uri = self._task.output_uri or self._task.get_logger().get_default_upload_destination()
if not isinstance(local_file, Path):
local_file = Path(local_file)
ev = UploadEvent(metric='artifacts', variant=name,
image_data=None, upload_uri=upload_uri,
local_image_path=local_file.as_posix(),
delete_after_upload=delete_after_upload,
override_filename=override_filename,
override_filename_ext=override_filename_ext,
override_storage_key_prefix=self._get_storage_uri_prefix())
_, uri = ev.get_target_full_upload_uri(upload_uri, quote_uri=False)
# send for upload
# noinspection PyProtectedMember
if wait_on_upload:
StorageManager.upload_file(local_file.as_posix(), uri, wait_for_upload=True, retries=ev.retries)
if delete_after_upload:
try:
os.unlink(local_file.as_posix())
except OSError:
LoggerRoot.get_base_logger().warning('Failed removing temporary {}'.format(local_file))
else:
self._task._reporter._report(ev)
_, quoted_uri = ev.get_target_full_upload_uri(upload_uri)
return quoted_uri
def _get_statistics(self, artifacts_dict=None):
# type: (Optional[Dict[str, Artifact]]) -> str
summary = ''
artifacts_dict = artifacts_dict or self._artifacts_container
thread_pool = ThreadPool()
try:
# build hash row sets
artifacts_summary = []
for a_name, a_df in artifacts_dict.items():
hash_cols = self._artifacts_container.get_hash_columns(a_name)
if not pd or not isinstance(a_df, pd.DataFrame):
continue
if hash_cols is True:
hash_col_drop = []
else:
hash_cols = set(hash_cols)
missing_cols = hash_cols.difference(a_df.columns)
if missing_cols == hash_cols:
LoggerRoot.get_base_logger().warning(
'Uniqueness columns {} not found in artifact {}. '
'Skipping uniqueness check for artifact.'.format(list(missing_cols), a_name)
)
continue
elif missing_cols:
# missing_cols must be a subset of hash_cols
hash_cols.difference_update(missing_cols)
LoggerRoot.get_base_logger().warning(
'Uniqueness columns {} not found in artifact {}. Using {}.'.format(
list(missing_cols), a_name, list(hash_cols)
)
)
hash_col_drop = [col for col in a_df.columns if col not in hash_cols]
a_unique_hash = set()
def hash_row(r):
a_unique_hash.add(hash(bytes(r)))
a_shape = a_df.shape
# parallelize
a_hash_cols = a_df.drop(columns=hash_col_drop)
thread_pool.map(hash_row, a_hash_cols.values)
# add result
artifacts_summary.append((a_name, a_shape, a_unique_hash,))
# build intersection summary
for i, (name, shape, unique_hash) in enumerate(artifacts_summary):
summary += '[{name}]: shape={shape}, {unique} unique rows, {percentage:.1f}% uniqueness\n'.format(
name=name, shape=shape, unique=len(unique_hash),
percentage=100 * len(unique_hash) / float(shape[0]))
for name2, shape2, unique_hash2 in artifacts_summary[i + 1:]:
intersection = len(unique_hash & unique_hash2)
summary += '\tIntersection with [{name2}] {intersection} rows: {percentage:.1f}%\n'.format(
name2=name2, intersection=intersection,
percentage=100 * intersection / float(len(unique_hash2)))
except Exception as e:
LoggerRoot.get_base_logger().warning(str(e))
finally:
thread_pool.close()
thread_pool.terminate()
return summary
def _get_temp_folder(self, force_new=False):
# type: (bool) -> str
if force_new or not self._temp_folder:
new_temp = mkdtemp(prefix='artifacts_')
self._temp_folder.append(new_temp)
return new_temp
return self._temp_folder[0]
def _get_storage_uri_prefix(self):
# type: () -> str
if not self._storage_prefix:
# noinspection PyProtectedMember
self._storage_prefix = self._task._get_output_destination_suffix()
return self._storage_prefix
|
worker.py | from multiprocessing import Process
from multiprocessing import Pipe
from planet import take_snapshot
from config import PLANET_INIT
from config import PLANET_ANGLE
from config import NUMBER_OF_WORKERS
from config import IMAGE_SIZE
#
# This function is executed by the worker thread of the script.
#
# conn: The endpoint connection of the Pipe used to communicate with the main thread.
#
def worker_function(conn):
msg = conn.recv()
if isinstance(msg, int):
conn.send(take_snapshot(msg, int(PLANET_INIT + msg * PLANET_ANGLE)))
worker_function(conn)
return
if isinstance(msg, str):
if msg == 'stop':
return
print('Something went wrong')
#
# This function initializes the worker threads.
#
# conns: The endpoints of the pipes to the main thread. This list is initially empty and filled by the function.
# processes: The processes created by the function are collected in this list.
#
def initialize_workers(conns, processes):
for i in range(NUMBER_OF_WORKERS):
parent_connection, child_connection = Pipe()
process = Process(target=worker_function, args=(child_connection,))
processes.append(process)
conns.append(parent_connection)
print('Workers are initialized')
#
# This function starts the worker threads.
#
# processes: The list of processes to be started.
#
def start_workers(processes):
for i in range(NUMBER_OF_WORKERS):
processes[i].start()
print('Workers have started')
#
# This function delegates the different frames to be rendered to the worker threads.
#
# conns: The pipe connections to the workers.
# counter: The current frame to be rendered.
#
def delegate_workers(conns, counter):
for i in range(NUMBER_OF_WORKERS):
if counter == IMAGE_SIZE:
break
conns[i].send(counter)
counter += 1
#
# This function collects the rendered images from the worker threads.
#
# conns: The pipe connections to the worker threads.
# counter: The current expected frame.
# video: The video writer used to write frames.
#
def collect_from_workers(conns, counter, video, transit):
for i in range(NUMBER_OF_WORKERS):
if counter == IMAGE_SIZE:
break
frame = conns[i].recv()
transit[counter] = frame.mean()
video.write(frame)
counter += 1
#
# This function stops all the worker threads and cleans up after them.
#
# conns: The pipe connections to the threads.
# processes: The list of processes to be killed.
#
def stop_workers(conns, processes):
for i in range(NUMBER_OF_WORKERS):
conns[i].send('stop')
for i in range(NUMBER_OF_WORKERS):
processes[i].join()
print('Workers have been stopped')
|
pyrep.py | import numpy as np
from pyrep.backend import sim, utils
from pyrep.objects.object import Object
from pyrep.objects.shape import Shape
from pyrep.textures.texture import Texture
from pyrep.errors import PyRepError
import os
import sys
import time
import threading
from threading import Lock
from typing import Tuple, List
import warnings
class PyRep(object):
"""Used for interfacing with the CoppeliaSim simulation.
Can be used for starting, stopping, and stepping the simulation. As well
as getting, and creating scene objects and robots.
"""
def __init__(self):
self.running = False
self._process = None
self._robot_to_count = {}
self.connected = False
self._ui_thread = None
self._responsive_ui_thread = None
self._step_lock = Lock()
self._init_thread_id = None
self._shutting_down = False
self._handles_to_objects = {}
if 'COPPELIASIM_ROOT' not in os.environ:
raise PyRepError(
'COPPELIASIM_ROOT not defined. See installation instructions.')
self._vrep_root = os.environ['COPPELIASIM_ROOT']
if not os.path.exists(self._vrep_root):
raise PyRepError(
'COPPELIASIM_ROOT was not a correct path. '
'See installation instructions')
def _run_ui_thread(self, scene_file: str, headless: bool) -> None:
# Need this otherwise extensions will not be loaded
os.chdir(self._vrep_root)
options = sim.sim_gui_headless if headless else sim.sim_gui_all
sim.simExtLaunchUIThread(options=options, scene=scene_file,
pyrep_root=self._vrep_root)
def _run_responsive_ui_thread(self) -> None:
while True:
if not self.running:
with self._step_lock:
if self._shutting_down or sim.simExtGetExitRequest():
break
sim.simExtStep(False)
time.sleep(0.01)
# If the exit request was from the UI, then call shutdown, otherwise
# shutdown caused this thread to terminate.
if not self._shutting_down:
self.shutdown()
def launch(self, scene_file="", headless=False, responsive_ui=False,
blocking=False) -> None:
"""Launches CoppeliaSim.
Launches the UI thread, waits until the UI thread has finished, this
results in the current thread becoming the simulation thread.
:param scene_file: The scene file to load. Empty string for empty scene.
:param headless: Run CoppeliaSim in simulation mode.
:param responsive_ui: If True, then a separate thread will be created to
asynchronously step the UI of CoppeliaSim. Note, that will reduce
the responsiveness of the simulation thread.
:param blocking: Causes CoppeliaSim to launch as if running the default
c++ client application. This is causes the function to block.
For most users, this will be set to False.
"""
abs_scene_file = os.path.abspath(scene_file)
if len(scene_file) > 0 and not os.path.isfile(abs_scene_file):
raise PyRepError('Scene file does not exist: %s' % scene_file)
cwd = os.getcwd()
self._ui_thread = threading.Thread(target=self._run_ui_thread,
args=(abs_scene_file, headless))
self._ui_thread.daemon = True
self._ui_thread.start()
while not sim.simExtCanInitSimThread():
time.sleep(0.1)
sim.simExtSimThreadInit()
time.sleep(0.2) # Stops CoppeliaSim crashing if restarted too quickly.
if blocking:
while not sim.simExtGetExitRequest():
sim.simExtStep()
self.shutdown()
elif responsive_ui:
self._responsive_ui_thread = threading.Thread(
target=self._run_responsive_ui_thread)
self._responsive_ui_thread.daemon = True
try:
self._responsive_ui_thread.start()
except (KeyboardInterrupt, SystemExit):
if not self._shutting_down:
self.shutdown()
sys.exit()
self.step()
else:
self.step()
os.chdir(cwd) # Go back to the previous cwd
def script_call(self, function_name_at_script_name: str,
script_handle_or_type: int,
ints=(), floats=(), strings=(), bytes='') -> (
Tuple[List[int], List[float], List[str], str]):
"""Calls a script function (from a plugin, the main client application,
or from another script). This represents a callback inside of a script.
:param function_name_at_script_name: A string representing the function
name and script name, e.g. myFunctionName@theScriptName. When the
script is not associated with an object, then just specify the
function name.
:param script_handle_or_type: The handle of the script, otherwise the
type of the script.
:param ints: The input ints to the script.
:param floats: The input floats to the script.
:param strings: The input strings to the script.
:param bytes: The input bytes to the script (as a string).
:return: Any number of return values from the called Lua function.
"""
return utils.script_call(
function_name_at_script_name, script_handle_or_type, ints, floats,
strings, bytes)
def shutdown(self) -> None:
"""Shuts down the CoppeliaSim simulation.
"""
if self._ui_thread is None:
raise PyRepError(
'CoppeliaSim has not been launched. Call launch first.')
if self._ui_thread is not None:
self._shutting_down = True
self.stop()
self.step_ui()
sim.simExtPostExitRequest()
sim.simExtSimThreadDestroy()
self._ui_thread.join()
if self._responsive_ui_thread is not None:
self._responsive_ui_thread.join()
# CoppeliaSim crashes if new instance opened too quickly after shutdown.
# TODO: A small sleep stops this for now.
time.sleep(0.1)
self._ui_thread = None
self._shutting_down = False
def start(self) -> None:
"""Starts the physics simulation if it is not already running.
"""
if self._ui_thread is None:
raise PyRepError(
'CoppeliaSim has not been launched. Call launch first.')
if not self.running:
sim.simStartSimulation()
self.running = True
def stop(self) -> None:
"""Stops the physics simulation if it is running.
"""
if self._ui_thread is None:
raise PyRepError(
'CoppeliaSim has not been launched. Call launch first.')
if self.running:
sim.simStopSimulation()
self.running = False
# Need this so the UI updates
[self.step() for _ in range(5)] # type: ignore
def step(self) -> None:
"""Execute the next simulation step.
If the physics simulation is not running, then this will only update
the UI.
"""
with self._step_lock:
sim.simExtStep()
def step_ui(self) -> None:
"""Update the UI.
This will not execute the next simulation step, even if the physics
simulation is running.
This is only applicable when PyRep was launched without a responsive UI.
"""
with self._step_lock:
sim.simExtStep(False)
def set_simulation_timestep(self, dt: float) -> None:
"""Sets the simulation time step. Default is 0.05.
:param dt: The time step value in seconds.
"""
sim.simSetFloatParameter(sim.sim_floatparam_simulation_time_step, dt)
if not np.allclose(self.get_simulation_timestep(), dt):
warnings.warn('Could not change simulation timestep. You may need '
'to change it to "custom dt" using simulation '
'settings dialog.')
def get_simulation_timestep(self) -> float:
"""Gets the simulation time step.
:return: The time step value in seconds.
"""
return sim.simGetSimulationTimeStep()
def set_configuration_tree(self, config_tree: bytes) -> None:
"""Restores configuration information previously retrieved.
Configuration information (object relative positions/orientations,
joint/path values) can be retrieved with
:py:meth:`Object.get_configuration_tree`. Dynamically simulated
objects will implicitly be reset before the command is applied
(i.e. similar to calling :py:meth:`Object.reset_dynamic_object` just
before).
:param config_tree: The configuration tree to restore.
"""
sim.simSetConfigurationTree(config_tree)
def group_objects(self, objects: List[Shape]) -> Shape:
"""Groups several shapes into a compound shape (or simple shape).
:param objects: The list of shapes to group.
:return: A single grouped shape.
"""
handles = [o.get_handle() for o in objects]
handle = sim.simGroupShapes(handles)
return Shape(handle)
def merge_objects(self, objects: List[Shape]) -> Shape:
"""Merges several shapes into a compound shape (or simple shape).
:param objects: The list of shapes to group.
:return: A single merged shape.
"""
handles = [o.get_handle() for o in objects]
# FIXME: sim.simGroupShapes(merge=True) won't return correct handle,
# so we use name to find correct handle of the merged shape.
name = objects[-1].get_name()
sim.simGroupShapes(handles, merge=True)
return Shape(name)
def export_scene(self, filename: str) -> None:
"""Saves the current scene.
:param filename: scene filename. The filename extension is required
("ttt").
"""
sim.simSaveScene(filename)
def import_model(self, filename: str) -> Object:
""" Loads a previously saved model.
:param filename: model filename. The filename extension is required
("ttm"). An optional "@copy" can be appended to the filename, in
which case the model's objects will be named/renamed as if an
associated script was attached to the model.
:return: The imported model.
"""
handle = sim.simLoadModel(filename)
return utils.to_type(handle)
def create_texture(self, filename: str, interpolate=True, decal_mode=False,
repeat_along_u=False, repeat_along_v=False
) -> Tuple[Shape, Texture]:
"""Creates a planar shape that is textured.
:param filename: Path to the texture to load.
:param interpolate: Adjacent texture pixels are not interpolated.
:param decal_mode: Texture is applied as a decal (its appearance
won't be influenced by light conditions).
:param repeat_along_u: Texture will be repeated along the U direction.
:param repeat_along_v: Texture will be repeated along the V direction.
:return: A tuple containing the textured plane and the texture.
"""
options = 0
if not interpolate:
options |= 1
if decal_mode:
options |= 2
if repeat_along_u:
options |= 3
if repeat_along_v:
options |= 4
handle = sim.simCreateTexture(filename, options)
s = Shape(handle)
return s, s.get_texture()
def get_objects_in_tree(self, root_object=None, *args, **kwargs
) -> List[Object]:
"""Retrieves the objects in a given hierarchy tree.
:param root_object: The root object in the tree. Pass None to retrieve
all objects in the configuration tree. :py:class:`Object` or `int`.
:param object_type: The object type to retrieve.
One of :py:class:`.ObjectType`.
:param exclude_base: Exclude the tree base from the returned list.
:param first_generation_only: Include in the returned list only the
object's first children. Otherwise, entire hierarchy is returned.
:return: A list of objects in the hierarchy tree.
"""
return Object._get_objects_in_tree(root_object, *args, **kwargs)
|
gigapixel.py | # Gigapixel controller
# This must run as root (sudo python gigapixel.py) due to framebuffer, etc.
#
# http://www.adafruit.com/products/998 (Raspberry Pi Model B)
# http://www.adafruit.com/products/1601 (PiTFT Mini Kit)
#
# Prerequisite tutorials: aside from the basic Raspbian setup and PiTFT setup
# http://learn.adafruit.com/adafruit-pitft-28-inch-resistive-touchscreen-display-raspberry-pi
#
# gigapixel.py by Anand Kadiyala
# based on lapse.py by David Hunt
# BSD license, all text above must be included in any redistribution.
import wiringpi2
import atexit
import cPickle as pickle
import errno
import fnmatch
import io
import os
import pygame
import threading
import signal
import sys
from pygame.locals import *
from subprocess import call
from time import sleep
from datetime import datetime, timedelta
# UI classes ---------------------------------------------------------------
# Icon is a very simple bitmap class, just associates a name and a pygame
# image (PNG loaded from icons directory) for each.
# There isn't a globally-declared fixed list of Icons. Instead, the list
# is populated at runtime from the contents of the 'icons' directory.
class Icon:
def __init__(self, name):
self.name = name
try:
self.bitmap = pygame.image.load(iconPath + '/' + name + '.png')
except:
pass
# Button is a simple tappable screen region. Each has:
# - bounding rect ((X,Y,W,H) in pixels)
# - optional background color and/or Icon (or None), always centered
# - optional foreground Icon, always centered
# - optional single callback function
# - optional single value passed to callback
# Occasionally Buttons are used as a convenience for positioning Icons
# but the taps are ignored. Stacking order is important; when Buttons
# overlap, lowest/first Button in list takes precedence when processing
# input, and highest/last Button is drawn atop prior Button(s). This is
# used, for example, to center an Icon by creating a passive Button the
# width of the full screen, but with other buttons left or right that
# may take input precedence (e.g. the Effect labels & buttons).
# After Icons are loaded at runtime, a pass is made through the global
# buttons[] list to assign the Icon objects (from names) to each Button.
class Button:
def __init__(self, rect, **kwargs):
self.rect = rect # Bounds
self.color = None # Background fill color, if any
self.iconBg = None # Background Icon (atop color fill)
self.iconFg = None # Foreground Icon (atop background)
self.bg = None # Background Icon name
self.fg = None # Foreground Icon name
self.callback = None # Callback function
self.value = None # Value passed to callback
for key, value in kwargs.iteritems():
if key == 'color': self.color = value
elif key == 'bg' : self.bg = value
elif key == 'fg' : self.fg = value
elif key == 'cb' : self.callback = value
elif key == 'value': self.value = value
def selected(self, pos):
x1 = self.rect[0]
y1 = self.rect[1]
x2 = x1 + self.rect[2] - 1
y2 = y1 + self.rect[3] - 1
if ((pos[0] >= x1) and (pos[0] <= x2) and
(pos[1] >= y1) and (pos[1] <= y2)):
if self.callback:
if self.value is None: self.callback()
else: self.callback(self.value)
return True
return False
def draw(self, screen):
if self.color:
screen.fill(self.color, self.rect)
if self.iconBg:
screen.blit(self.iconBg.bitmap,
(self.rect[0]+(self.rect[2]-self.iconBg.bitmap.get_width())/2,
self.rect[1]+(self.rect[3]-self.iconBg.bitmap.get_height())/2))
if self.iconFg:
screen.blit(self.iconFg.bitmap,
(self.rect[0]+(self.rect[2]-self.iconFg.bitmap.get_width())/2,
self.rect[1]+(self.rect[3]-self.iconFg.bitmap.get_height())/2))
def setBg(self, name):
if name is None:
self.iconBg = None
else:
for i in icons:
if name == i.name:
self.iconBg = i
break
# UI callbacks -------------------------------------------------------------
# These are defined before globals because they're referenced by items in
# the global buttons[] list.
def motorCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global motorRunning
global motorDirection
global motorpin
global motorpinA
global motorpinB
global motorpinC
global motorpinD
if n == 1:
motorDirection = 1
motorpin = motorpinA
if motorRunning == 0:
motorRunning = 1
gpio.digitalWrite(motorpin,gpio.HIGH)
else:
motorRunning = 0
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
elif n == 2:
motorDirection = 0
motorpin = motorpinB
if motorRunning == 0:
motorRunning = 1
gpio.digitalWrite(motorpin,gpio.HIGH)
else:
motorRunning = 0
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
elif n == 3:
motorDirection = 1
motorpin = motorpinC
if motorRunning == 0:
motorRunning = 1
gpio.digitalWrite(motorpin,gpio.HIGH)
else:
motorRunning = 0
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
elif n == 4:
motorDirection = 0
motorpin = motorpinD
if motorRunning == 0:
motorRunning = 1
gpio.digitalWrite(motorpin,gpio.HIGH)
else:
motorRunning = 0
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
def numericCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global numberstring
if n < 10:
numberstring = numberstring + str(n)
elif n == 10:
numberstring = numberstring[:-1]
elif n == 11:
screenMode = 1
elif n == 12:
screenMode = returnScreen
numeric = int(numberstring)
v[dict_idx] = numeric
def settingCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
screenMode += n
if screenMode < 1: screenMode = len(buttons) - 1
elif screenMode >= len(buttons): screenMode = 1
def valuesCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global returnScreen
global numberstring
global numeric
global v
global dict_idx
if n == -1:
screenMode = 0
saveSettings()
if n == 1:
dict_idx='Pulse'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
elif n == 2:
dict_idx='Interval'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
elif n == 3:
dict_idx='Images'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
elif n == 4:
dict_idx='Directions'
screenMode = 3
returnScreen = 1
elif n == 5:
dict_idx='Rows'
numberstring = str(v[dict_idx])
screenMode = 2
returnScreen = 1
def viewCallback(n): # Viewfinder buttons
global screenMode, screenModePrior
if n is 0: # Gear icon
screenMode = 1
def doneCallback(): # Exit settings
global screenMode
if screenMode > 0:
saveSettings()
screenMode = 0 # Switch back to main window
def startCallback(n): # start/Stop the timelapse thread
global t, busy, threadExited
global currentframe
global currentrow
if n == 1:
if busy == False:
if (threadExited == True):
# Re-instanciate the object for the next start
t = threading.Thread(target=timeLapse)
threadExited = False
t.start()
if n == 0:
if busy == True:
busy = False
t.join()
currentframe = 0
currentrow = 0
# Re-instanciate the object for the next time around.
t = threading.Thread(target=timeLapse)
def timeLapse():
global v
global settling_time
global shutter_length
global motorpin
global shutterpin
global backlightpin
global busy, threadExited
global currentframe
global currentrow
busy = True
for j in range(1, v['Rows'] + 1 ):
if busy == False:
break
currentrow = j
# disable the backlight, critical for night timelapses, also saves power
os.system("echo '0' > /sys/class/gpio/gpio252/value")
gpio.digitalWrite(shutterpin,gpio.HIGH)
sleep(shutter_length)
gpio.digitalWrite(shutterpin,gpio.LOW)
# enable the backlight
os.system("echo '1' > /sys/class/gpio/gpio252/value")
interval = float(v['Interval'])/1000.0
if (interval > shutter_length):
sleep(interval - shutter_length)
for i in range( 1 , v['Images'] + 1 ):
if busy == False:
break
currentframe = i
gpio.digitalWrite(motorpin,gpio.HIGH)
pulse = float(v['Pulse'])/1000.0
sleep(pulse)
gpio.digitalWrite(motorpin,gpio.LOW)
sleep(settling_time)
# disable the backlight, critical for night timelapses, also saves power
os.system("echo '0' > /sys/class/gpio/gpio252/value")
gpio.digitalWrite(shutterpin,gpio.HIGH)
sleep(shutter_length)
gpio.digitalWrite(shutterpin,gpio.LOW)
# enable the backlight
os.system("echo '1' > /sys/class/gpio/gpio252/value")
interval = float(v['Interval'])/1000.0
if (interval > shutter_length):
sleep(interval - shutter_length)
gpio.digitalWrite(motorpinC, gpio.HIGH)
pulse=float(v['Pulse'])/1000.0
sleep(pulse)
gpio.digitalWrite(motorpinC, gpio.LOW)
if motorpin == motorpinA:
motorpin=motorpinB
elif motorpin == motorpinB:
motorpin=motorpinA
currentframe = 0
currentrow = 0
busy = False
threadExited = True
def signal_handler(signal, frame):
print 'got SIGTERM'
pygame.quit()
sys.exit()
# Global stuff -------------------------------------------------------------
t = threading.Thread(target=timeLapse)
busy = False
threadExited = False
screenMode = 0 # Current screen mode; default = viewfinder
screenModePrior = -1 # Prior screen mode (for detecting changes)
iconPath = 'icons' # Subdirectory containing UI bitmaps (PNG format)
numeric = 0 # number from numeric keypad
numberstring = "0"
motorRunning = 0
motorDirection = 0
returnScreen = 0
shutterpin = 17
motorpinA = 18
motorpinB = 27
motorpinC = 23
motorpinD = 22
motorpin = motorpinA
backlightpin = 252
currentframe = 0
currentrow = 0
framecount = 100
settling_time = 0.2
shutter_length = 0.2
interval_delay = 0.2
dict_idx = "Interval"
v = { "Pulse": 100,
"Interval": 3000,
"Images": 150,
"Rows": 3}
icons = [] # This list gets populated at startup
# buttons[] is a list of lists; each top-level list element corresponds
# to one screen mode (e.g. viewfinder, image playback, storage settings),
# and each element within those lists corresponds to one UI button.
# There's a little bit of repetition (e.g. prev/next buttons are
# declared for each settings screen, rather than a single reusable
# set); trying to reuse those few elements just made for an ugly
# tangle of code elsewhere.
buttons = [
# Screen mode 0 is main view screen of current status
[Button(( 5,180,120, 60), bg='start', cb=startCallback, value=1),
Button((130,180, 60, 60), bg='cog', cb=viewCallback, value=0),
Button((195,180,120, 60), bg='stop', cb=startCallback, value=0)],
# Screen 1 for changing values and setting motor direction
[Button((280, 0, 40, 40), bg='cogsmall', cb=valuesCallback, value=1),
Button((280, 40, 40, 40), bg='cogsmall', cb=valuesCallback, value=2),
Button((280,80, 40, 40), bg='cogsmall', cb=valuesCallback, value=3),
Button((280,120, 40, 40), bg='cogsmall', cb=valuesCallback, value=5),
Button(( 0,180,160, 60), bg='ok', cb=valuesCallback, value=-1),
Button((160,180, 160, 60), bg='directions', cb=valuesCallback, value=4)],
# Screen 2 for numeric input
[Button(( 0, 0,320, 60), bg='box'),
Button((180,120, 60, 60), bg='0', cb=numericCallback, value=0),
Button(( 0,180, 60, 60), bg='1', cb=numericCallback, value=1),
Button((120,180, 60, 60), bg='3', cb=numericCallback, value=3),
Button(( 60,180, 60, 60), bg='2', cb=numericCallback, value=2),
Button(( 0,120, 60, 60), bg='4', cb=numericCallback, value=4),
Button(( 60,120, 60, 60), bg='5', cb=numericCallback, value=5),
Button((120,120, 60, 60), bg='6', cb=numericCallback, value=6),
Button(( 0, 60, 60, 60), bg='7', cb=numericCallback, value=7),
Button(( 60, 60, 60, 60), bg='8', cb=numericCallback, value=8),
Button((120, 60, 60, 60), bg='9', cb=numericCallback, value=9),
Button((240,120, 80, 60), bg='del', cb=numericCallback, value=10),
Button((180,180,140, 60), bg='ok', cb=numericCallback, value=12),
Button((180, 60,140, 60), bg='cancel',cb=numericCallback, value=11)],
# Screen 3 for setting motor direction
[Button((60, 60, 60, 60), bg='up', cb=motorCallback, value=3),
Button((0, 120, 60, 60), bg='left', cb=motorCallback, value=2),
Button((120, 120, 60, 60), bg='right', cb=motorCallback, value=1),
Button(( 60, 180, 60, 60), bg='down', cb=motorCallback, value=4),
Button((160,180,140, 60), bg='ok', cb=numericCallback, value=12)]
]
# Assorted utility functions -----------------------------------------------
def saveSettings():
global v
try:
outfile = open('lapse.pkl', 'wb')
# Use a dictionary (rather than pickling 'raw' values) so
# the number & order of things can change without breaking.
pickle.dump(v, outfile)
outfile.close()
except:
pass
def loadSettings():
global v
try:
infile = open('lapse.pkl', 'rb')
v = pickle.load(infile)
infile.close()
except:
pass
# Initialization -----------------------------------------------------------
# Init framebuffer/touchscreen environment variables
os.putenv('SDL_VIDEODRIVER', 'fbcon')
os.putenv('SDL_FBDEV' , '/dev/fb1')
os.putenv('SDL_MOUSEDRV' , 'TSLIB')
os.putenv('SDL_MOUSEDEV' , '/dev/input/touchscreen')
# Init pygame and screen
print "Initting..."
pygame.init()
print "Setting Mouse invisible..."
pygame.mouse.set_visible(False)
print "Setting fullscreen..."
modes = pygame.display.list_modes(16)
screen = pygame.display.set_mode(modes[0], FULLSCREEN, 16)
print "Loading Icons..."
# Load all icons at startup.
for file in os.listdir(iconPath):
if fnmatch.fnmatch(file, '*.png'):
icons.append(Icon(file.split('.')[0]))
# Assign Icons to Buttons, now that they're loaded
print"Assigning Buttons"
for s in buttons: # For each screenful of buttons...
for b in s: # For each button on screen...
for i in icons: # For each icon...
if b.bg == i.name: # Compare names; match?
b.iconBg = i # Assign Icon to Button
b.bg = None # Name no longer used; allow garbage collection
if b.fg == i.name:
b.iconFg = i
b.fg = None
# Set up GPIO pins
print "Init GPIO pins..."
gpio = wiringpi2.GPIO(wiringpi2.GPIO.WPI_MODE_GPIO)
gpio.pinMode(shutterpin,gpio.OUTPUT)
gpio.pinMode(motorpinA,gpio.OUTPUT)
gpio.pinMode(motorpinB,gpio.OUTPUT)
gpio.pinMode(motorpinC,gpio.OUTPUT)
gpio.pinMode(motorpinD,gpio.OUTPUT)
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
# I couldnt seem to get at pin 252 for the backlight using the usual method above,
# but this seems to work
os.system("echo 252 > /sys/class/gpio/export")
os.system("echo 'out' > /sys/class/gpio/gpio252/direction")
os.system("echo '1' > /sys/class/gpio/gpio252/value")
print"Load Settings"
loadSettings() # Must come last; fiddles with Button/Icon states
print "loading background.."
img = pygame.image.load("icons/GigapixelPi.png")
if img is None or img.get_height() < 240: # Letterbox, clear background
screen.fill(0)
if img:
screen.blit(img,
((320 - img.get_width() ) / 2,
(240 - img.get_height()) / 2))
pygame.display.update()
sleep(2)
# Main loop ----------------------------------------------------------------
signal.signal(signal.SIGTERM, signal_handler)
print "mainloop.."
while(True):
# Process touchscreen input
while True:
for event in pygame.event.get():
if(event.type is MOUSEBUTTONDOWN):
pos = pygame.mouse.get_pos()
for b in buttons[screenMode]:
if b.selected(pos): break
elif(event.type is MOUSEBUTTONUP):
motorRunning = 0
gpio.digitalWrite(motorpinA,gpio.LOW)
gpio.digitalWrite(motorpinB,gpio.LOW)
gpio.digitalWrite(motorpinC,gpio.LOW)
gpio.digitalWrite(motorpinD,gpio.LOW)
if screenMode >= 0 or screenMode != screenModePrior: break
if img is None or img.get_height() < 240: # Letterbox, clear background
screen.fill(0)
if img:
screen.blit(img,
((320 - img.get_width() ) / 2,
(240 - img.get_height()) / 2))
# Overlay buttons on display and update
for i,b in enumerate(buttons[screenMode]):
b.draw(screen)
if screenMode == 3:
myfont = pygame.font.SysFont("Arial", 50)
label = myfont.render("Position", 1, (255,255,255))
screen.blit(label, (10, 2))
if screenMode == 2:
myfont = pygame.font.SysFont("Arial", 50)
label = myfont.render(numberstring, 1, (255,255,255))
screen.blit(label, (10, 2))
if screenMode == 1:
myfont = pygame.font.SysFont("Arial", 30)
label = myfont.render("Pulse:" , 1, (255,255,255))
screen.blit(label, (10, 0))
label = myfont.render("Interval:" , 1, (255,255,255))
screen.blit(label, (10, 40))
label = myfont.render("Columns:" , 1, (255,255,255))
screen.blit(label, (10,80))
label = myfont.render("Rows:" , 1, (255,255,255))
screen.blit(label, (10,120))
label = myfont.render(str(v['Pulse']) + "ms" , 1, (255,255,255))
screen.blit(label, (130, 0))
label = myfont.render(str(v['Interval']) + "ms" , 1, (255,255,255))
screen.blit(label, (130, 40))
label = myfont.render(str(v['Images']) , 1, (255,255,255))
screen.blit(label, (130,80))
label = myfont.render(str(v['Rows']) , 1, (255,255,255))
screen.blit(label, (130,120))
if screenMode == 0:
myfont = pygame.font.SysFont("Arial", 30)
label = myfont.render("Pulse:" , 1, (255,255,255))
screen.blit(label, (10, 10))
label = myfont.render("Interval:" , 1, (255,255,255))
screen.blit(label, (10, 50))
label = myfont.render("Columns:" , 1, (255,255,255))
screen.blit(label, (10, 90))
label = myfont.render("Rows:" , 1, (255,255,255))
screen.blit(label, (10,130))
label = myfont.render(str(v['Pulse']) + "ms" , 1, (255,255,255))
screen.blit(label, (160, 10))
label = myfont.render(str(v['Interval']) + "ms" , 1, (255,255,255))
screen.blit(label, (160, 50))
label = myfont.render(str(currentframe) + " of " + str(v['Images']) , 1, (255,255,255))
screen.blit(label, (160, 90))
label = myfont.render(str(currentrow) + " of " + str(v['Rows']) , 1, (255,255,255))
screen.blit(label, (160, 130))
#intervalLength = float((v['Pulse'] + v['Interval'] + (settling_time*1000) + (shutter_length*1000)))
#remaining = float((intervalLength * (v['Images'] - currentframe)) / 1000)
#sec = timedelta(seconds=int(remaining))
#d = datetime(1,1,1) + sec
#remainingStr = "%dh%dm%ds" % (d.hour, d.minute, d.second)
#label = myfont.render(remainingStr , 1, (255,255,255))
#screen.blit(label, (160, 130))
pygame.display.update()
screenModePrior = screenMode
|
__main__.py | """
Multi-threaded match-making game server
"""
import os
import random
from datetime import datetime
import threading
from collections import defaultdict
from referee.log import StarLog
from referee.game import play, IllegalActionException, COLOURS, NUM_PLAYERS
from battleground.protocol import DisconnectException, ProtocolException
from battleground.protocol import Connection, MessageType as M
from battleground.protocol import DEFAULT_SERVER_PORT
from cpu import PlayerRandomMixture
from cpu import PlayerGreedyArmy
# The following channels are reserved, and choosing them results in playing
# a game with server-controlled players:
SPECIAL_CHANNELS = {
"random": (lambda: ServerPlayer(PlayerRandomMixture, "randy")),
"greedy": (lambda: ServerPlayer(PlayerRandomMixture, "greedo")),
}
# Print at a higher level of verbosity, including some debugging information
DEBUG = False # The matchmaking system seems to be working well from 2019.
# # # #
# Main thread: listen for incoming connections.
#
#
def main():
out = StarLog(
level=1 + DEBUG, timefn=lambda: f"Thread-0 {datetime.now()}"
)
out.comment("initialising server", depth=-1)
# set up a shared matchmaking pool
pool = MatchmakingPool(
num_players=NUM_PLAYERS, special_channels=SPECIAL_CHANNELS
)
# listen for connections incoming on PORT:
try:
# Host of "" allows all incoming connections on the chosen port
connections = Connection.iter_listen(
host="", port=DEFAULT_SERVER_PORT
)
out.comment(f"listening on port {DEFAULT_SERVER_PORT}...")
for connection, address in connections:
# repeatedly accept a new connection, and hand off to a new thread
out.comment("new client connected: ", address)
out.comment("starting a new thread to handle this client...")
handler = threading.Thread(
target=servant, args=(connection, pool)
)
handler.daemon = True # so that new thread exits when main exits
handler.start()
except KeyboardInterrupt:
print() # end line
out.comment("bye!")
# # # #
# Worker thread: Coordinate the matchmaking process and, if the client is the
# player that allows a game to begin, coordinate that game to conclusion.
#
def servant(connection, pool):
# (Each thread gets own print function which includes its thread number)
timefn = lambda: f"{threading.current_thread().name} {datetime.now()}"
out = StarLog(level=1 + DEBUG, timefn=timefn)
out.comment("hello, world!")
# # #
# Initiate connection
#
# At your service, client! Let us begin the protocol
# First, could you kindly send me a PLAY request containing your
# name and matchmaking channel?
out.comment("begin communication with player", depth=-1)
out.comment("waiting for PLAY request...")
try:
playmsg = connection.recv(M.PLAY)
out.comment("successfully received PLAY request:", playmsg)
out.comment("sending OKAY back.")
connection.send(M.OKAY)
except DisconnectException:
out.comment("client disconnected. bye!")
connection.disconnect()
return
except ProtocolException as e:
out.comment("protocol error! that was unexpected...? bye!")
connection.disconnect()
return
# Now that you're officially a player, let's wrap you up in an object so
# that we won't forget your name.
new_player = NetworkPlayer(connection, playmsg["name"])
# And we'll need to note that channel for matchmaking purposes!
channel = playmsg["channel"]
# # #
# Conduct matchmaking
#
# Okay then. Now, if it pleases you just to wait one moment, I'll look for
# some suitable opponents for you to play with...
out.comment("looking for opponents...", depth=-1)
try:
players = pool.match(channel, new_player, out)
out.comment("opponents found!")
except NotEnoughPlayers:
# I'm afraid this is as far as I can take you, good sir/madam.
# If you wait here for just a short time, I'm sure another thread
# will come by and pick you up quite soon.
# It has been my eternal pleasure. Farewell~ Your humble servant.
out.comment("leaving in pool for another thread. bye~!")
return
# # #
# Initialise all players, prepare for game
#
# Splendid! Between the few of you, we have enough players for a game!
# Who will take the first turn? Let us cast the proverbial die:
out.comment("randomly assigning colours to players...")
random.shuffle(players)
cols_players = list(zip(COLOURS, players))
# Then, shall we introduce you to one another?
col_name_map = {colour: player.name for colour, player in cols_players}
for colour, player in cols_players:
player.game(col_name_map, out.comment)
# What name shall we give to this glorious playing?
player_names = "_and_".join(p.name for p in players)
timestamp = str(datetime.now())[:19].replace(" ", "_").replace(":", "-")
game_name = f"logs/game_at_{timestamp}_with_{player_names}.txt"
# Attempt to make sure there is a 'logs' folder ready for the game log
try:
os.mkdir("logs")
except:
pass
# # #
# Play game, handle result
#
# Without further ado, let us begin!
try:
with open(game_name, 'w') as log_file:
result = play(
players,
out_function=out.comment,
print_state=False,
log_filename=game_name,
log_file=log_file,
)
# What a delightful result! I hope that was an enjoyable game
# for all of you. Let's share the final result.
out.comment("game over!", depth=-1)
out.comment(result)
out.comment("sending out result...")
for player in players:
player.game_over(result=result)
except IllegalActionException:
# Ah! The game has ended early. We had better
# make sure everyone is on the same page:
out.comment("game error", depth=-1)
out.comment("game error: invalid action")
for player in players:
player.game_over(result="game error: invalid action")
except DisconnectException:
# In the unfortunate event of a disconnection, we had better
# make sure everyone is on the same page:
out.comment("connection error", depth=-1)
out.comment("a client disconnected")
for player in players:
try:
player.error(reason="opponent disconnected")
except BrokenPipeError:
# this connection must have been the one that reset; skip it
continue
except ProtocolException as e:
out.comment("protocol error!", depth=-1)
out.comment(e)
out.comment("a client did something unexpected")
for player in players:
player.error(reason="opponent broke protocol")
# # #
# Terminate all players
#
# One way or another, that's the end of this meeting. Until next time, my
# good friends! It has been my deepest pleasure~
out.comment("disconnection", depth=-1)
out.comment("disconnecting players...")
for player in players:
player.disconnect()
out.comment("end of thread. bye~")
# # #
# Player wrappers
#
class NetworkPlayer:
"""
A Player wrapper for network-controlled players
"""
def __init__(self, connection, name):
self.connection = connection
self.name = name
self.player_str = f"{self.name} (not yet initialised)"
def ping(self, timeout=None):
self.connection.send(M.OKAY)
self.connection.recv(M.OKAY, timeout=timeout)
def game(self, colour_name_map, log_function):
self.log = log_function
self.log(self.player_str, "sending GAME")
self.connection.send(M.GAME, **colour_name_map)
def init(self, colour):
self.colour = colour
self.player_str = f"{self.name} ({colour})"
self.log(self.player_str, "sending INIT")
self.connection.send(M.INIT, colour=colour)
self.connection.recv(M.OKAY)
def action(self):
self.log(self.player_str, "sending TURN")
self.connection.send(M.TURN)
self.log(self.player_str, "waiting for ACTN")
actnmsg = self.connection.recv(M.ACTN)
self.log(self.player_str, "received ACTN:", actnmsg)
return actnmsg["action"]
def update(self, opponent_action, player_action):
self.log(self.player_str, "sending UPD8")
self.connection.send(
M.UPD8,
opponent_action=opponent_action,
player_action=player_action,
)
self.log(self.player_str, "waiting for OKAY")
self.connection.recv(M.OKAY)
def game_over(self, result):
self.log(self.player_str, "sending OVER")
self.connection.send(M.OVER, result=result)
def error(self, reason):
self.log(self.player_str, "sending ERRO")
self.connection.send(M.ERRO, reason=reason)
def disconnect(self):
self.log(self.player_str, "disconnecting")
self.connection.disconnect()
class ServerPlayer:
"""
A Player wrapper for locally-controlled players
"""
def __init__(self, Player, name):
self.Player = Player
self.name = name
def game(self, _colour_name_map, log_function):
self.log = log_function
def init(self, colour):
self.colour = colour
self.player_str = f"{self.name} ({colour})"
self.log(self.player_str, "initialising", colour)
self.player = self.Player(colour)
def action(self):
self.log(self.player_str, "asking for action")
action = self.player.action()
self.log(self.player_str, "got:", action)
return action
def update(self, opponent_action, player_action):
self.log(
self.player_str,
"updating with",
player_action,
opponent_action,
)
self.player.update(
opponent_action=opponent_action,
player_action=player_action,
)
def game_over(self, result):
pass
def error(self, reason):
pass
def disconnect(self):
pass
# # #
# Matchmaking code
#
class MatchmakingPool:
"""
A collection of per-channel waiting lists, with concurrent access
control.
Submit your player to a channel with the match method, and receive
either a NotEnoughPlayers exception or a list of num_players
previously deposited.
Notes:
* Thread safe (I think)
* Does not automatically clear stale connections out of channel waiting
lists until a new player is submitted to that channel. Therefore, an
attack exists where a client can run up memory usage by repeatedly
submitting players to obscure channels, and then disconnecting.
"""
def __init__(self, num_players, special_channels):
self._lock = threading.RLock()
self._waiting = defaultdict(list)
self.num_players = num_players
self.special_channels = special_channels
def match(self, channel, new_player, out):
"""
Submit a 'new_player' (Player wrapper) to look for games on 'channel'.
If there are already players waiting from previous match calls, or if
'channel' is a special channel, then return a full list of players
(including 'new_player').
If there are not enough players yet, leave 'new_player' in the pool
for a future match call and raise a NotEnoughPlayers exception.
"""
# if it's a special channel, we don't need to wait for players,
# the server can provide some:
if channel in self.special_channels:
return [self.special_channels[channel](), new_player]
# otherwise, we do need to match-make as usual:
with self._lock:
out.debug("matchmaking pool before filter:", self._waiting)
# clean out any players who have since disconnected
channel_waiting = self._filter(channel, out)
# deposit THIS new player in the queue too,
channel_waiting.append(new_player)
# okay, are there enough players waiting to play a game?
if len(channel_waiting) < self.num_players:
# no; alert the caller
out.comment("not enough players!")
out.debug("pool after filter:", self._waiting)
raise NotEnoughPlayers()
else:
# yes! extract this queue from the waiting pool
del self._waiting[channel]
out.comment("match found!")
out.debug("pool after filter:", self._waiting)
# and return these players to the caller!
return channel_waiting
def _filter(self, channel, out=None):
with self._lock:
still_waiting = []
for player in self._waiting[channel]:
try:
player.ping(timeout=10)
# contact! they're still online! re-add them to the pool:
still_waiting.append(player)
except (
OSError,
BrokenPipeError, # the connection has gone stale?
DisconnectException, # the client closed connection
ProtocolException, # the client... did what?
) as e:
# in any case, close this connection and don't keep this
# client in the pool.
if out is not None:
out.comment(
"ditching client",
player,
"due to",
e.__class__.__name__,
e,
)
player.connection.disconnect()
self._waiting[channel] = still_waiting
return still_waiting
class NotEnoughPlayers(Exception):
"""
For when there are not enough players waiting in a particular channel
"""
if __name__ == "__main__":
main()
|
mainwindow.py | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developped and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
from collections import OrderedDict
from enum import Enum
import errno
import gc
import logging
import os
import os.path as osp
import re
import shutil
import signal
import socket
import glob
import subprocess
import sys
import threading
import traceback
import importlib
logger = logging.getLogger(__name__)
#==============================================================================
# Keeping a reference to the original sys.exit before patching it
#==============================================================================
ORIGINAL_SYS_EXIT = sys.exit
#==============================================================================
# Check requirements
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Windows only: support for hiding console window when started with python.exe
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
is_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Qt imports
#==============================================================================
from qtpy import API, PYQT5
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QByteArray, QCoreApplication, QPoint, QSize, Qt,
QThread, QTimer, QUrl, Signal, Slot,
qInstallMessageHandler)
from qtpy.QtGui import QColor, QDesktopServices, QIcon, QKeySequence, QPixmap
from qtpy.QtWidgets import (QAction, QApplication, QDesktopWidget, QDockWidget,
QMainWindow, QMenu, QMessageBox, QShortcut,
QSplashScreen, QStyleFactory, QWidget, QCheckBox)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
# To catch font errors in QtAwesome
from qtawesome.iconic_font import FontError
#==============================================================================
# Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must
# be set before creating the application.
#==============================================================================
from spyder.config.manager import CONF
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
#==============================================================================
# Get CLI options and set OpenGL backend. This attibute must
# be set before creating the application. See spyder-ide/spyder#11227
#==============================================================================
from spyder.app.utils import set_opengl_implementation
from spyder.app.cli_options import get_options
from spyder.config.base import running_under_pytest
# Get CLI options/args and make them available for future use.
# Ignore args if running tests or Spyder will try and fail to parse pytests's.
if running_under_pytest():
sys_argv = [sys.argv[0]]
CLI_OPTIONS, CLI_ARGS = get_options(sys_argv)
else:
CLI_OPTIONS, CLI_ARGS = get_options()
# **** Set OpenGL implementation to use ****
if CLI_OPTIONS.opengl_implementation:
option = CLI_OPTIONS.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
#==============================================================================
# Create our QApplication instance here because it's needed to render the
# splash screen created below
#==============================================================================
from spyder.utils.qthelpers import qapplication
from spyder.config.base import get_image_path
MAIN_APP = qapplication()
if PYQT5:
APP_ICON = QIcon(get_image_path("spyder.svg"))
else:
APP_ICON = QIcon(get_image_path("spyder.png"))
MAIN_APP.setWindowIcon(APP_ICON)
#==============================================================================
# Create splash screen out of MainWindow to reduce perceived startup time.
#==============================================================================
from spyder.config.base import _, get_image_path, DEV
if not running_under_pytest():
SPLASH = QSplashScreen(QPixmap(get_image_path('splash.svg')))
SPLASH_FONT = SPLASH.font()
SPLASH_FONT.setPixelSize(10)
SPLASH.setFont(SPLASH_FONT)
SPLASH.show()
SPLASH.showMessage(_("Initializing..."),
int(Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute),
QColor(Qt.white))
QApplication.processEvents()
else:
SPLASH = None
#==============================================================================
# Local utility imports
#==============================================================================
from spyder import (__version__, __project_url__, __forum_url__,
__trouble_url__, __website_url__, get_versions)
from spyder.app.utils import (get_python_doc_path, delete_lsp_log_files,
qt_message_handler, setup_logging)
from spyder.config.base import (get_conf_path, get_module_source_path, STDERR,
get_debug_level, MAC_APP_NAME, get_home_dir,
running_in_mac_app, get_module_path,
reset_config_files)
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.utils import IMPORT_EXT, is_anaconda, is_gtk_desktop
from spyder import dependencies
from spyder.py3compat import (is_text_string, to_text_string,
PY3, qbytearray_to_str, configparser as cp)
from spyder.utils import encoding, programs
from spyder.utils import icon_manager as ima
from spyder.utils.programs import is_module_installed
from spyder.utils.misc import select_port, getcwd_or_home, get_python_executable
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
from spyder.config.gui import is_dark_font_color
#==============================================================================
# Local gui imports
#==============================================================================
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
try:
from spyder.utils.environ import WinUserEnvDialog
except ImportError:
WinUserEnvDialog = None # analysis:ignore
from spyder.utils.qthelpers import (create_action, add_actions, get_icon,
add_shortcut_to_tooltip,
create_module_bookmark_actions,
create_program_action, DialogManager,
create_python_script_action, file_uri,
MENU_SEPARATOR, set_menu_icons)
from spyder.otherplugins import get_spyderplugins_mods
from spyder.app import tour
from spyder.app.solver import find_external_plugins, solve_plugin_dependencies
# Spyder API Imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.plugins import Plugins, SpyderPluginV2, SpyderDockablePlugin
#==============================================================================
# Third-party library imports
#==============================================================================
import qdarkstyle
#==============================================================================
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
#==============================================================================
CWD = getcwd_or_home()
#==============================================================================
# Install Qt messaage handler
#==============================================================================
qInstallMessageHandler(qt_message_handler)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = QMainWindow.AllowTabbedDocks|QMainWindow.AllowNestedDocks
CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
BOOKMARKS = (
('Python2', "https://docs.python.org/2/index.html",
_("Python2 documentation")),
('Python3', "https://docs.python.org/3/index.html",
_("Python3 documentation")),
('numpy', "https://docs.scipy.org/doc/",
_("Numpy and Scipy documentation")),
('matplotlib', "https://matplotlib.org/contents.html",
_("Matplotlib documentation")),
('PyQt5',
"https://www.riverbankcomputing.com/static/Docs/PyQt5/",
_("PyQt5 Reference Guide")),
('PyQt5',
"https://www.riverbankcomputing.com/static/Docs/PyQt5/module_index.html",
_("PyQt5 API Reference")),
('winpython', "https://winpython.github.io/",
_("WinPython"))
)
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
sig_setup_finished = Signal()
all_actions_defined = Signal()
# type: (OrderedDict, OrderedDict)
sig_pythonpath_changed = Signal(object, object)
sig_main_interpreter_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # Related to interactive tour
sig_moved = Signal("QMoveEvent") # Related to interactive tour
sig_layout_setup_ready = Signal(object) # Related to default layouts
# --- Plugin handling methods
# ------------------------------------------------------------------------
def get_plugin(self, plugin_name):
"""
Return a plugin instance by providing the plugin class.
"""
for name, plugin in self._PLUGINS.items():
if plugin_name == name:
return plugin
else:
raise SpyderAPIError('Plugin "{}" not found!'.format(plugin_name))
def show_status_message(self, message, timeout):
"""
Show a status message in Spyder Main Window.
"""
status_bar = self.statusBar()
if status_bar.isVisible():
status_bar.showMessage(message, timeout)
def show_plugin_compatibility_message(self, message):
"""
Show a compatibility message.
"""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle(_('Compatibility Check'))
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show()
def add_plugin(self, plugin):
"""
Add plugin to plugins dictionary.
"""
self._PLUGINS[plugin.CONF_SECTION] = plugin
def register_plugin(self, plugin):
"""
Register a plugin in Spyder Main Window.
"""
self.set_splash(_("Loading {}...".format(plugin.get_name())))
logger.info("Loading {}...".format(plugin.NAME))
# Check plugin compatibility
is_compatible, message = plugin.check_compatibility()
plugin.is_compatible = is_compatible
plugin.get_description()
if not is_compatible:
self.show_compatibility_message(message)
return
# Signals
plugin.sig_focus_changed.connect(self.plugin_focus_changed)
plugin.sig_free_memory_requested.connect(self.free_memory)
plugin.sig_quit_requested.connect(self.close)
plugin.sig_restart_requested.connect(self.restart)
plugin.sig_redirect_stdio_requested.connect(
self.redirect_internalshell_stdio)
plugin.sig_status_message_requested.connect(self.show_status_message)
if isinstance(plugin, SpyderDockablePlugin):
plugin.sig_switch_to_plugin_requested.connect(
self.switch_to_plugin)
plugin.sig_update_ancestor_requested.connect(
lambda: plugin.set_ancestor(self))
# Register plugin
plugin._register()
plugin.register()
if isinstance(plugin, SpyderDockablePlugin):
# Add dockwidget
self.add_dockwidget(plugin)
# Update margins
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
self.add_plugin(plugin)
logger.info("Registering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
if getattr(action, 'register_shortcut', True):
if isinstance(action_name, Enum):
action_name = action_name.value
self.register_shortcut(action, context, action_name)
if isinstance(plugin, SpyderDockablePlugin):
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = None
self.register_shortcut(plugin.toggle_view_action, context, name)
if shortcut is not None:
sc = QShortcut(QKeySequence(shortcut), self,
lambda: self.switch_to_plugin(plugin))
plugin._shortcut = sc
self.register_shortcut(sc, context, name)
toolbars = plugin.get_application_toolbars()
for toolbar in toolbars:
self.toolbarslist.append(toolbar)
def unregister_plugin(self, plugin):
"""
Unregister a plugin from the Spyder Main Window.
"""
logger.info("Unloading {}...".format(plugin.NAME))
# Disconnect all slots
signals = [
plugin.sig_quit_requested,
plugin.sig_redirect_stdio,
plugin.sig_status_message_requested,
]
for signal in signals:
try:
signal.disconnect()
except TypeError:
pass
# Unregister shortcuts for actions
logger.info("Unregistering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
self.unregister_shortcut(action, context, action_name)
# Unregister switch to shortcut
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except Exception:
pass
if shortcut is not None:
self.unregister_shortcut(
plugin._shortcut,
context,
"Switch to {}".format(plugin.CONF_SECTION),
)
# Remove dockwidget
logger.info("Removing {} dockwidget...".format(plugin.NAME))
self.remove_dockwidget(plugin)
plugin.unregister()
plugin._unregister()
def create_plugin_conf_widget(self, plugin):
"""
Create configuration dialog box page widget.
"""
config_dialog = self.prefs_dialog_instance
if plugin.CONF_WIDGET_CLASS is not None and config_dialog is not None:
conf_widget = plugin.CONF_WIDGET_CLASS(plugin, config_dialog)
conf_widget.initialize()
return conf_widget
def switch_to_plugin(self, plugin, force_focus=None):
"""
Switch to this plugin.
Notes
-----
This operation unmaximizes the current plugin (if any), raises
this plugin to view (if it's hidden) and gives it focus (if
possible).
"""
try:
# New API
if (self.last_plugin is not None
and self.last_plugin.get_widget().is_maximized
and self.last_plugin is not plugin):
self.maximize_dockwidget()
except AttributeError:
# Old API
if (self.last_plugin is not None and self.last_plugin._ismaximized
and self.last_plugin is not plugin):
self.maximize_dockwidget()
try:
# New API
if not plugin.toggle_view_action.isChecked():
plugin.toggle_view_action.setChecked(True)
plugin.get_widget().is_visible = False
except AttributeError:
# Old API
if not plugin._toggle_view_action.isChecked():
plugin._toggle_view_action.setChecked(True)
plugin._widget._is_visible = False
plugin.change_visibility(True, force_focus=force_focus)
def remove_dockwidget(self, plugin):
"""
Remove a plugin QDockWidget from the main window.
"""
self.removeDockWidget(plugin.dockwidget)
self.widgetlist.remove(plugin)
def tabify_plugin(self, plugin):
"""
Tabify the plugin using the list of possible TABIFY options.
Only do this if the dockwidget does not have more dockwidgets
in the same position and if the plugin is using the New API.
"""
def tabify_helper(plugin, next_to_plugins):
for next_to_plugin in next_to_plugins:
try:
self.tabify_plugins(next_to_plugin, plugin)
break
except SpyderAPIError as err:
logger.error(err)
# If TABIFY not defined use the [Console]
tabify = getattr(plugin, 'TABIFY', [self.get_plugin(Plugins.Console)])
if not isinstance(tabify, list):
next_to_plugins = [tabify]
else:
next_to_plugins = tabify
# Get the actual plugins from the names
next_to_plugins = [self.get_plugin(p) for p in next_to_plugins]
# First time plugin starts
if plugin.get_conf_option('first_time', True):
if (isinstance(plugin, SpyderDockablePlugin)
and plugin.NAME != Plugins.Console):
logger.info(
"Tabify {} dockwidget for the first time...".format(
plugin.NAME))
tabify_helper(plugin, next_to_plugins)
plugin.set_conf_option('enable', True)
plugin.set_conf_option('first_time', False)
else:
# This is needed to ensure new plugins are placed correctly
# without the need for a layout reset.
logger.info("Tabify {} dockwidget...".format(plugin.NAME))
# Check if plugin has no other dockwidgets in the same position
if not bool(self.tabifiedDockWidgets(plugin.dockwidget)):
tabify_helper(plugin, next_to_plugins)
def __init__(self, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if running_under_pytest():
self._proxy_style = None
else:
from spyder.utils.qthelpers import SpyderProxyStyle
# None is needed, see: https://bugreports.qt.io/browse/PYSIDE-922
self._proxy_style = SpyderProxyStyle(None)
if PYQT5:
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.dialog_manager = DialogManager()
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
if options.project is not None:
self.open_project = osp.normpath(osp.join(CWD, options.project))
else:
self.open_project = None
self.window_title = options.window_title
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
# Handle Spyder path
self.path = ()
self.not_active_path = ()
self.project_path = ()
# New API
self._APPLICATION_TOOLBARS = OrderedDict()
self._STATUS_WIDGETS = OrderedDict()
self._PLUGINS = OrderedDict()
# Plugins
self.console = None
self.workingdirectory = None
self.editor = None
self.explorer = None
self.help = None
self.onlinehelp = None
self.projects = None
self.outlineexplorer = None
self.historylog = None
self.ipyconsole = None
self.variableexplorer = None
self.plots = None
self.findinfiles = None
self.thirdparty_plugins = []
# Tour # TODO: Should I consider it a plugin?? or?
self.tour = None
self.tours_available = None
# File switcher
self.switcher = None
# Check for updates Thread and Worker, refereces needed to prevent
# segfaulting
self.check_updates_action = None
self.thread_updates = None
self.worker_updates = None
self.give_updates_feedback = True
# Preferences
from spyder.preferences.appearance import AppearanceConfigPage
from spyder.preferences.general import MainConfigPage
from spyder.preferences.shortcuts import ShortcutsConfigPage
from spyder.preferences.runconfig import RunConfigPage
from spyder.preferences.maininterpreter import MainInterpreterConfigPage
self.general_prefs = [MainConfigPage, AppearanceConfigPage,
ShortcutsConfigPage, MainInterpreterConfigPage,
RunConfigPage]
self.prefs_index = None
self.prefs_dialog_size = None
self.prefs_dialog_instance = None
# Quick Layouts and Dialogs
from spyder.preferences.layoutdialog import (LayoutSaveDialog,
LayoutSettingsDialog)
self.dialog_layout_save = LayoutSaveDialog
self.dialog_layout_settings = LayoutSettingsDialog
# Actions
self.lock_interface_action = None
self.show_toolbars_action = None
self.close_dockwidget_action = None
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
self.maximize_action = None
self.fullscreen_action = None
# Menu bars
self.file_menu = None
self.file_menu_actions = []
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
self.tools_menu = None
self.tools_menu_actions = []
self.external_tools_menu = None # We must keep a reference to this,
# otherwise the external tools menu is lost after leaving setup method
self.external_tools_menu_actions = []
self.view_menu = None
self.plugins_menu = None
self.plugins_menu_actions = []
self.toolbars_menu = None
self.help_menu = None
self.help_menu_actions = []
# Status bar widgets
self.conda_status = None
self.mem_status = None
self.cpu_status = None
self.clock_status = None
# Toolbars
self.visible_toolbars = []
self.toolbarslist = []
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.edit_toolbar = None
self.edit_toolbar_actions = []
self.search_toolbar = None
self.search_toolbar_actions = []
self.source_toolbar = None
self.source_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.layout_toolbar = None
self.layout_toolbar_actions = []
self.menus = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
# Set window title
self.set_window_title()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_travis = os.environ.get('TEST_CI_APP', None)
if test_travis is not None:
global MAIN_APP
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(MAIN_APP.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = SPLASH
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.interface_locked = CONF.get('main', 'panes_locked')
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
self.state_before_maximizing = None
self.current_quick_layout = None
self.previous_layout_settings = None # TODO: related to quick layouts
self.last_plugin = None
self.fullscreen_flag = None # isFullscreen does not work as expected
# The following flag remember the maximized state even when
# the window is in fullscreen mode:
self.maximized_flag = None
# The following flag is used to restore window's geometry when
# toggling out of fullscreen mode in Windows.
self.saved_normal_geometry = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Keep track of dpi message
self.show_dpi_message = True
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See spyder-ide/spyder#4132.
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError as e:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: #555555\'><b>netsh winsock reset"
"</b></span><br>"))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# Apply preferences
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
#---- Window setup
def create_toolbar(self, title, object_name, iconsize=24):
"""Create and return toolbar with *title* and *object_name*"""
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setIconSize(QSize(iconsize, iconsize))
self.toolbarslist.append(toolbar)
return toolbar
def setup(self):
"""Setup main window"""
logger.info("*** Start of MainWindow setup ***")
logger.info("Updating PYTHONPATH")
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = qdarkstyle.load_stylesheet_from_environment()
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = qdarkstyle.load_stylesheet_from_environment()
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
else:
css_path = CSS_PATH
else:
css_path = CSS_PATH
logger.info("Creating core actions...")
self.close_dockwidget_action = create_action(
self, icon=ima.icon('close_pane'),
text=_("Close current pane"),
triggered=self.close_current_dockwidget,
context=Qt.ApplicationShortcut
)
self.register_shortcut(self.close_dockwidget_action, "_",
"Close pane")
self.lock_interface_action = create_action(
self,
(_("Unlock panes and toolbars") if self.interface_locked else
_("Lock panes and toolbars")),
icon=ima.icon('lock' if self.interface_locked else 'lock_open'),
triggered=lambda checked:
self.toggle_lock(not self.interface_locked),
context=Qt.ApplicationShortcut)
self.register_shortcut(self.lock_interface_action, "_",
"Lock unlock panes")
# custom layouts shortcuts
self.toggle_next_layout_action = create_action(self,
_("Use next layout"),
triggered=self.toggle_next_layout,
context=Qt.ApplicationShortcut)
self.toggle_previous_layout_action = create_action(self,
_("Use previous layout"),
triggered=self.toggle_previous_layout,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.toggle_next_layout_action, "_",
"Use next layout")
self.register_shortcut(self.toggle_previous_layout_action, "_",
"Use previous layout")
# Switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_switcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_shortcut_to_tip=True)
self.file_toolbar_actions = [self.file_switcher_action,
self.symbol_finder_action]
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action]
logger.info("Creating toolbars...")
# File menu/toolbar
self.file_menu = self.menuBar().addMenu(_("&File"))
self.file_toolbar = self.create_toolbar(_("File toolbar"),
"file_toolbar")
# Edit menu/toolbar
self.edit_menu = self.menuBar().addMenu(_("&Edit"))
self.edit_toolbar = self.create_toolbar(_("Edit toolbar"),
"edit_toolbar")
# Search menu/toolbar
self.search_menu = self.menuBar().addMenu(_("&Search"))
self.search_toolbar = self.create_toolbar(_("Search toolbar"),
"search_toolbar")
# Source menu/toolbar
self.source_menu = self.menuBar().addMenu(_("Sour&ce"))
self.source_toolbar = self.create_toolbar(_("Source toolbar"),
"source_toolbar")
# Run menu/toolbar
self.run_menu = self.menuBar().addMenu(_("&Run"))
self.run_toolbar = self.create_toolbar(_("Run toolbar"),
"run_toolbar")
# Debug menu/toolbar
self.debug_menu = self.menuBar().addMenu(_("&Debug"))
self.debug_toolbar = self.create_toolbar(_("Debug toolbar"),
"debug_toolbar")
# Consoles menu/toolbar
self.consoles_menu = self.menuBar().addMenu(_("C&onsoles"))
self.consoles_menu.aboutToShow.connect(
self.update_execution_state_kernel)
# Projects menu
self.projects_menu = self.menuBar().addMenu(_("&Projects"))
self.projects_menu.aboutToShow.connect(self.valid_project)
# Tools menu
self.tools_menu = self.menuBar().addMenu(_("&Tools"))
# View menu
self.view_menu = self.menuBar().addMenu(_("&View"))
# Help menu
self.help_menu = self.menuBar().addMenu(_("&Help"))
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
logger.info("Creating Tools menu...")
# Tools + External Tools
prefs_action = create_action(self, _("Pre&ferences"),
icon=ima.icon('configure'),
triggered=self.show_preferences,
context=Qt.ApplicationShortcut)
self.register_shortcut(prefs_action, "_", "Preferences",
add_shortcut_to_tip=True)
spyder_path_action = create_action(self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.show_path_manager,
tip=_("Python Path Manager"),
menurole=QAction.ApplicationSpecificRole)
reset_spyder_action = create_action(
self, _("Reset Spyder to factory defaults"),
triggered=self.reset_spyder)
self.tools_menu_actions = [prefs_action, spyder_path_action]
if WinUserEnvDialog is not None:
winenv_action = create_action(self,
_("Current user environment variables..."),
icon='win_env.png',
tip=_("Show and edit current user environment "
"variables in Windows registry "
"(i.e. for all sessions)"),
triggered=self.win_env)
self.tools_menu_actions.append(winenv_action)
from spyder.plugins.completion.kite.utils.install import (
check_if_kite_installed)
is_kite_installed, kite_path = check_if_kite_installed()
if not is_kite_installed:
install_kite_action = create_action(
self, _("Install Kite completion engine"),
icon=get_icon('kite', adjust_for_interface=True),
triggered=self.show_kite_installation)
self.tools_menu_actions.append(install_kite_action)
self.tools_menu_actions += [MENU_SEPARATOR, reset_spyder_action]
if get_debug_level() >= 3:
self.menu_lsp_logs = QMenu(_("LSP logs"))
self.menu_lsp_logs.aboutToShow.connect(self.update_lsp_logs)
self.tools_menu_actions += [self.menu_lsp_logs]
# External Tools submenu
self.external_tools_menu = QMenu(_("External Tools"))
self.external_tools_menu_actions = []
# WinPython control panel
self.wp_action = create_action(self, _("WinPython control panel"),
icon=get_icon('winpython.svg'),
triggered=lambda:
programs.run_python_script('winpython', 'controlpanel'))
if os.name == 'nt' and is_module_installed('winpython'):
self.external_tools_menu_actions.append(self.wp_action)
# Qt-related tools
additact = []
for name in ("designer-qt4", "designer"):
qtdact = create_program_action(self, _("Qt Designer"), name)
if qtdact:
break
for name in ("linguist-qt4", "linguist"):
qtlact = create_program_action(self, _("Qt Linguist"), "linguist")
if qtlact:
break
args = ['-no-opengl'] if os.name == 'nt' else []
for act in (qtdact, qtlact):
if act:
additact.append(act)
if additact and is_module_installed('winpython'):
self.external_tools_menu_actions += [None] + additact
# Guidata and Sift
logger.info("Creating guidata and sift entries...")
gdgq_act = []
# Guidata and Guiqwt don't support PyQt5 yet and they fail
# with an AssertionError when imported using those bindings
# (see spyder-ide/spyder#2274)
try:
from guidata import configtools
from guidata import config # analysis:ignore
guidata_icon = configtools.get_icon('guidata.svg')
guidata_act = create_python_script_action(self,
_("guidata examples"), guidata_icon,
"guidata",
osp.join("tests", "__init__"))
gdgq_act += [guidata_act]
except:
pass
try:
from guidata import configtools
from guiqwt import config # analysis:ignore
guiqwt_icon = configtools.get_icon('guiqwt.svg')
guiqwt_act = create_python_script_action(self,
_("guiqwt examples"), guiqwt_icon, "guiqwt",
osp.join("tests", "__init__"))
if guiqwt_act:
gdgq_act += [guiqwt_act]
sift_icon = configtools.get_icon('sift.svg')
sift_act = create_python_script_action(self, _("Sift"),
sift_icon, "guiqwt", osp.join("tests", "sift"))
if sift_act:
gdgq_act += [sift_act]
except:
pass
if gdgq_act:
self.external_tools_menu_actions += [None] + gdgq_act
# Maximize current plugin
self.maximize_action = create_action(self, '',
triggered=self.maximize_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.maximize_action, "_", "Maximize pane")
self.__update_maximize_action()
# Fullscreen mode
self.fullscreen_action = create_action(self,
_("Fullscreen mode"),
triggered=self.toggle_fullscreen,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.fullscreen_action, "_",
"Fullscreen mode", add_shortcut_to_tip=True)
# Main toolbar
self.main_toolbar_actions = [self.maximize_action,
self.fullscreen_action,
None,
prefs_action, spyder_path_action]
self.main_toolbar = self.create_toolbar(_("Main toolbar"),
"main_toolbar")
# Switcher instance
logger.info("Loading switcher...")
self.create_switcher()
# Internal console plugin
message = _(
"Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"
)
CONF.set('internal_console', 'message', message)
CONF.set('internal_console', 'multithreaded', self.multithreaded)
CONF.set('internal_console', 'profile', self.profile)
CONF.set('internal_console', 'commands', [])
CONF.set('internal_console', 'namespace', {})
CONF.set('internal_console', 'show_internal_errors', True)
from spyder.plugins.console.plugin import Console
self.console = Console(self, configuration=CONF)
self.console.set_exit_function(self.closing)
self.register_plugin(self.console)
# TODO: Load and register the rest of the plugins using new API
# Code completion client initialization
self.set_splash(_("Starting code completion manager..."))
from spyder.plugins.completion.plugin import CompletionManager
self.completions = CompletionManager(self)
# Working directory plugin
logger.info("Loading working directory...")
from spyder.plugins.workingdirectory.plugin import WorkingDirectory
self.workingdirectory = WorkingDirectory(self, self.init_workdir, main=self)
self.workingdirectory.register_plugin()
self.toolbarslist.append(self.workingdirectory.toolbar)
self.add_plugin(self.workingdirectory)
# Outline explorer widget
if CONF.get('outline_explorer', 'enable'):
self.set_splash(_("Loading outline explorer..."))
from spyder.plugins.outlineexplorer.plugin import OutlineExplorer
self.outlineexplorer = OutlineExplorer(self)
self.outlineexplorer.register_plugin()
self.add_plugin(self.outlineexplorer)
if is_anaconda():
from spyder.widgets.status import CondaStatus
self.conda_status = CondaStatus(self, status,
icon=ima.icon('environment'))
self.conda_status.update_interpreter(self.get_main_interpreter())
# Editor plugin
self.set_splash(_("Loading editor..."))
from spyder.plugins.editor.plugin import Editor
self.editor = Editor(self)
self.editor.register_plugin()
self.add_plugin(self.editor)
# Start code completion client
self.set_splash(_("Launching code completion client for Python..."))
self.completions.start()
self.completions.start_client(language='python')
# Populating file menu entries
quit_action = create_action(self, _("&Quit"),
icon=ima.icon('exit'),
tip=_("Quit"),
triggered=self.console.quit,
context=Qt.ApplicationShortcut)
self.register_shortcut(quit_action, "_", "Quit")
restart_action = create_action(self, _("&Restart"),
icon=ima.icon('restart'),
tip=_("Restart"),
triggered=self.restart,
context=Qt.ApplicationShortcut)
self.register_shortcut(restart_action, "_", "Restart")
file_actions = [
self.file_switcher_action,
self.symbol_finder_action,
None,
]
if sys.platform == 'darwin':
file_actions.extend(self.editor.tab_navigation_actions + [None])
file_actions.extend([restart_action, quit_action])
self.file_menu_actions += file_actions
self.set_splash("")
# Namespace browser
self.set_splash(_("Loading namespace browser..."))
from spyder.plugins.variableexplorer.plugin import VariableExplorer
self.variableexplorer = VariableExplorer(self)
self.variableexplorer.register_plugin()
self.add_plugin(self.variableexplorer)
# Figure browser
self.set_splash(_("Loading figure browser..."))
from spyder.plugins.plots.plugin import Plots
self.plots = Plots(self)
self.plots.register_plugin()
self.add_plugin(self.plots)
# History log widget
if CONF.get('historylog', 'enable'):
self.set_splash(_("Loading history plugin..."))
from spyder.plugins.history.plugin import HistoryLog
self.historylog = HistoryLog(self)
self.historylog.register_plugin()
self.add_plugin(self.historylog)
# IPython console
self.set_splash(_("Loading IPython console..."))
from spyder.plugins.ipythonconsole.plugin import IPythonConsole
self.ipyconsole = IPythonConsole(self, css_path=css_path)
self.ipyconsole.register_plugin()
self.add_plugin(self.ipyconsole)
# Help plugin
if CONF.get('help', 'enable'):
from spyder.plugins.help.plugin import Help
self.help = Help(self, configuration=CONF)
self.register_plugin(self.help)
# Explorer
if CONF.get('explorer', 'enable'):
self.set_splash(_("Loading file explorer..."))
from spyder.plugins.explorer.plugin import Explorer
self.explorer = Explorer(self)
self.explorer.register_plugin()
self.add_plugin(self.explorer)
# Online help widget
if CONF.get('onlinehelp', 'enable'):
self.set_splash(_("Loading online help..."))
from spyder.plugins.onlinehelp.plugin import OnlineHelp
self.onlinehelp = OnlineHelp(self)
self.onlinehelp.register_plugin()
self.add_plugin(self.onlinehelp)
# Project explorer widget
self.set_splash(_("Loading project explorer..."))
from spyder.plugins.projects.plugin import Projects
self.projects = Projects(self)
self.projects.register_plugin()
self.project_path = self.projects.get_pythonpath(at_start=True)
self.add_plugin(self.projects)
# Find in files
if CONF.get('find_in_files', 'enable'):
from spyder.plugins.findinfiles.plugin import FindInFiles
self.findinfiles = FindInFiles(self)
self.findinfiles.register_plugin()
self.add_plugin(self.findinfiles)
# Load other plugins (former external plugins)
# TODO: Use this bucle to load all internall plugins and remove
# duplicated code
# Breakpoints
if CONF.get('breakpoints', 'enable'):
from spyder.plugins.breakpoints.plugin import Breakpoints
self.breakpoints = Breakpoints(self, configuration=CONF)
self.register_plugin(self.breakpoints)
self.thirdparty_plugins.append(self.breakpoints)
other_plugins = ['profiler', 'pylint']
for plugin_name in other_plugins:
if CONF.get(plugin_name, 'enable'):
module = importlib.import_module(
'spyder.plugins.{}'.format(plugin_name))
plugin = module.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
self.add_plugin(plugin)
# Third-party plugins
from spyder import dependencies
self.set_splash(_("Loading third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
if hasattr(plugin, 'COMPLETION_CLIENT_NAME'):
self.completions.register_completion_plugin(plugin)
else:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
# Add to dependencies dialog
module = mod.__name__
name = module.replace('_', '-')
if plugin.DESCRIPTION:
description = plugin.DESCRIPTION
else:
description = plugin.get_plugin_title()
dependencies.add(module, name, description,
'', None, kind=dependencies.PLUGIN)
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
# New API: Load and register external plugins
external_plugins = find_external_plugins()
plugin_deps = solve_plugin_dependencies(external_plugins.values())
for plugin_class in plugin_deps:
if issubclass(plugin_class, SpyderPluginV2):
try:
plugin_instance = plugin_class(
self,
configuration=CONF,
)
self.register_plugin(plugin_instance)
except Exception as error:
print("%s: %s" % (plugin_class, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Setting up main window..."))
# Help menu
trouble_action = create_action(self,
_("Troubleshooting..."),
triggered=self.trouble_guide)
dep_action = create_action(self, _("Dependencies..."),
triggered=self.show_dependencies,
icon=ima.icon('advanced'))
report_action = create_action(self,
_("Report issue..."),
icon=ima.icon('bug'),
triggered=self.report_issue)
support_action = create_action(self,
_("Spyder support..."),
triggered=self.google_group)
self.check_updates_action = create_action(self,
_("Check for updates..."),
triggered=self.check_updates)
# Spyder documentation
spyder_doc = 'https://docs.spyder-ide.org/'
doc_action = create_action(self, _("Spyder documentation"),
icon=ima.icon('DialogHelpButton'),
triggered=lambda:
programs.start_file(spyder_doc))
self.register_shortcut(doc_action, "_",
"spyder documentation")
if self.help is not None:
tut_action = create_action(self, _("Spyder tutorial"),
triggered=self.help.show_tutorial)
else:
tut_action = None
shortcuts_action = create_action(self, _("Shortcuts Summary"),
shortcut="Meta+F1",
triggered=self.show_shortcuts_dialog)
#----- Tours
self.tour = tour.AnimatedTour(self)
self.tours_menu = QMenu(_("Interactive tours"), self)
self.tour_menu_actions = []
# TODO: Only show intro tour for now. When we are close to finish
# 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(0)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
def trigger(i=i, self=self): # closure needed!
return lambda: self.show_tour(i)
temp_action = create_action(self, tour_name, tip="",
triggered=trigger())
self.tour_menu_actions += [temp_action]
self.tours_menu.addActions(self.tour_menu_actions)
self.help_menu_actions = [doc_action, tut_action, shortcuts_action,
self.tours_menu,
MENU_SEPARATOR, trouble_action,
report_action, dep_action,
self.check_updates_action, support_action,
MENU_SEPARATOR]
# Python documentation
if get_python_doc_path() is not None:
pydoc_act = create_action(self, _("Python documentation"),
triggered=lambda:
programs.start_file(get_python_doc_path()))
self.help_menu_actions.append(pydoc_act)
# IPython documentation
if self.help is not None:
ipython_menu = QMenu(_("IPython documentation"), self)
intro_action = create_action(self, _("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(self, _("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(self, _("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(ipython_menu, (intro_action, guiref_action,
quickref_action))
self.help_menu_actions.append(ipython_menu)
# Windows-only: documentation located in sys.prefix/Doc
ipm_actions = []
def add_ipm_action(text, path):
"""Add installed Python module doc action to help submenu"""
# QAction.triggered works differently for PySide and PyQt
path = file_uri(path)
if not API == 'pyside':
slot=lambda _checked, path=path: programs.start_file(path)
else:
slot=lambda path=path: programs.start_file(path)
action = create_action(self, text,
icon='%s.png' % osp.splitext(path)[1][1:],
triggered=slot)
ipm_actions.append(action)
sysdocpth = osp.join(sys.prefix, 'Doc')
if osp.isdir(sysdocpth): # exists on Windows, except frozen dist.
for docfn in os.listdir(sysdocpth):
pt = r'([a-zA-Z\_]*)(doc)?(-dev)?(-ref)?(-user)?.(chm|pdf)'
match = re.match(pt, docfn)
if match is not None:
pname = match.groups()[0]
if pname not in ('Python', ):
add_ipm_action(pname, osp.join(sysdocpth, docfn))
# Installed Python modules submenu (Windows only)
if ipm_actions:
pymods_menu = QMenu(_("Installed Python modules"), self)
add_actions(pymods_menu, ipm_actions)
self.help_menu_actions.append(pymods_menu)
# Online documentation
web_resources = QMenu(_("Online documentation"), self)
webres_actions = create_module_bookmark_actions(self,
self.BOOKMARKS)
webres_actions.insert(2, None)
webres_actions.insert(5, None)
webres_actions.insert(8, None)
add_actions(web_resources, webres_actions)
self.help_menu_actions.append(web_resources)
# Qt assistant link
if sys.platform.startswith('linux') and not PYQT5:
qta_exe = "assistant-qt4"
else:
qta_exe = "assistant"
qta_act = create_program_action(self, _("Qt documentation"),
qta_exe)
if qta_act:
self.help_menu_actions += [qta_act, None]
# About Spyder
about_action = create_action(self,
_("About %s...") % "Spyder",
icon=ima.icon('MessageBoxInformation'),
triggered=self.about)
self.help_menu_actions += [MENU_SEPARATOR, about_action]
# Status bar widgets
from spyder.widgets.status import MemoryStatus, CPUStatus, ClockStatus
self.mem_status = MemoryStatus(self, status)
self.cpu_status = CPUStatus(self, status)
self.clock_status = ClockStatus(self, status)
self.apply_statusbar_settings()
# ----- View
# View menu
self.plugins_menu = QMenu(_("Panes"), self)
self.toolbars_menu = QMenu(_("Toolbars"), self)
self.quick_layout_menu = QMenu(_("Window layouts"), self)
self.quick_layout_set_menu()
self.view_menu.addMenu(self.plugins_menu) # Panes
add_actions(self.view_menu, (self.lock_interface_action,
self.close_dockwidget_action,
self.maximize_action,
MENU_SEPARATOR))
self.show_toolbars_action = create_action(self,
_("Show toolbars"),
triggered=self.show_toolbars,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.show_toolbars_action, "_",
"Show toolbars")
self.view_menu.addMenu(self.toolbars_menu)
self.view_menu.addAction(self.show_toolbars_action)
add_actions(self.view_menu, (MENU_SEPARATOR,
self.quick_layout_menu,
self.toggle_previous_layout_action,
self.toggle_next_layout_action,
MENU_SEPARATOR,
self.fullscreen_action))
if set_attached_console_visible is not None:
cmd_act = create_action(self,
_("Attached console window (debugging)"),
toggled=set_attached_console_visible)
cmd_act.setChecked(is_attached_console_visible())
add_actions(self.view_menu, (MENU_SEPARATOR, cmd_act))
# Adding external tools action to "Tools" menu
if self.external_tools_menu_actions:
external_tools_act = create_action(self, _("External Tools"))
external_tools_act.setMenu(self.external_tools_menu)
self.tools_menu_actions += [None, external_tools_act]
# Filling out menu/toolbar entries:
add_actions(self.file_menu, self.file_menu_actions)
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
add_actions(self.tools_menu, self.tools_menu_actions)
add_actions(self.external_tools_menu,
self.external_tools_menu_actions)
add_actions(self.help_menu, self.help_menu_actions)
add_actions(self.main_toolbar, self.main_toolbar_actions)
add_actions(self.file_toolbar, self.file_toolbar_actions)
add_actions(self.edit_toolbar, self.edit_toolbar_actions)
add_actions(self.search_toolbar, self.search_toolbar_actions)
add_actions(self.source_toolbar, self.source_toolbar_actions)
add_actions(self.debug_toolbar, self.debug_toolbar_actions)
add_actions(self.run_toolbar, self.run_toolbar_actions)
# Apply all defined shortcuts (plugins + 3rd-party plugins)
self.apply_shortcuts()
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
# Window set-up
logger.info("Setting up window...")
self.setup_layout(default=False)
if self.splash is not None:
self.splash.hide()
# Enabling tear off for all menus except help menu
if CONF.get('main', 'tear_off_menus'):
for child in self.menuBar().children():
if isinstance(child, QMenu) and child != self.help_menu:
child.setTearOffEnabled(True)
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
def setup_menus(self):
"""Setup menus."""
# Update menus list
default_menus = [self.file_menu, self.edit_menu, self.search_menu,
self.source_menu, self.run_menu, self.debug_menu,
self.consoles_menu, self.projects_menu,
self.tools_menu, self.view_menu, self.help_menu]
self.menus = self.menus + default_menus
# Show and hide shortcuts and icons in menus for macOS
if sys.platform == 'darwin':
for menu in self.menus:
if menu is not None:
menu.aboutToShow.connect(
lambda menu=menu: self.show_shortcuts(menu))
menu.aboutToHide.connect(
lambda menu=menu: self.hide_shortcuts(menu))
menu.aboutToShow.connect(
lambda menu=menu: set_menu_icons(menu, False))
menu.aboutToShow.connect(self.hide_options_menus)
def update_lsp_logs(self):
"""Create an action for each lsp log file."""
self.menu_lsp_logs.clear()
lsp_logs = []
regex = re.compile(r'.*_.*_(\d+)[.]log')
files = glob.glob(osp.join(get_conf_path('lsp_logs'), '*.log'))
for f in files:
action = create_action(self, f, triggered=self.editor.load)
action.setData(f)
lsp_logs.append(action)
add_actions(self.menu_lsp_logs, lsp_logs)
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
self.restore_scrollbar_position.emit()
logger.info('Deleting previous Spyder instance LSP logs...')
delete_lsp_log_files()
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# In MacOS X 10.7 our app is not displayed after initialized (I don't
# know why because this doesn't happen when started from the terminal),
# so we need to resort to this hack to make it appear.
if running_in_mac_app():
idx = __file__.index(MAC_APP_NAME)
app_path = __file__[:idx]
subprocess.call(['open', app_path + MAC_APP_NAME])
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emmited by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Create Plugins and toolbars submenus
self.create_plugins_menu()
self.create_toolbars_menu()
# Update toolbar visibility status
self.toolbars_visible = CONF.get('main', 'toolbars_visible')
self.load_last_visible_toolbars()
# Update lock status
self.toggle_lock(self.interface_locked)
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
if not self.ipyconsole._isvisible:
self.historylog.add_history(get_conf_path('history.py'))
if self.open_project:
self.projects.open_project(self.open_project)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files()
# Connect Editor to Kite completions plugin status
self.editor.kite_completions_file_status()
# Connect Editor debug action with Console
self.ipyconsole.sig_pdb_state.connect(self.editor.update_pdb_state)
# Setup menus
self.setup_menus()
# Check for spyder updates
if DEV is None and CONF.get('main', 'check_updates_on_startup'):
self.give_updates_feedback = False
self.check_updates(startup=True)
# Show dialog with missing dependencies
if not running_under_pytest():
self.report_missing_dependencies()
# Raise the menuBar to the top of the main window widget's stack
# Fixes spyder-ide/spyder#3887.
self.menuBar().raise_()
self.is_setting_up = False
# Handle DPI scale and window changes to show a restart message
# Handle DPI scale and window changes to show a restart message.
# Don't activate this functionality on macOS because it's being
# triggered in the wrong situations.
# See spyder-ide/spyder#11846
if not sys.platform == 'darwin':
window = self.window().windowHandle()
window.screenChanged.connect(self.handle_new_screen)
self.screen = self.window().windowHandle().screen()
self.current_dpi = self.screen.logicalDotsPerInch()
self.screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
# Notify that the setup of the mainwindow was finished
self.sig_setup_finished.emit()
def handle_new_screen(self, screen):
"""Connect DPI signals for new screen."""
try:
self.screen.logicalDotsPerInchChanged.disconnect(
self.show_dpi_change_message)
except (TypeError, RuntimeError):
# See spyder-ide/spyder#11903 and spyder-ide/spyder#11997
pass
self.screen = screen
self.screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
if self.current_dpi != screen.logicalDotsPerInch():
self.show_dpi_change_message(screen.logicalDotsPerInch())
def show_dpi_change_message(self, dpi):
"""Show message to restart Spyder since the DPI scale changed."""
self.screen.logicalDotsPerInchChanged.disconnect(
self.show_dpi_change_message)
if self.current_dpi == dpi:
# Reconnect DPI scale changes to show a restart message
self.screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
return
if not self.show_dpi_message:
return
# Check the window state to not show the message if the window
# is in fullscreen mode.
window = self.window().windowHandle()
if (window.windowState() == Qt.WindowFullScreen and
sys.platform == 'darwin'):
return
dismiss_box = QCheckBox(
_("Hide this message during the current session")
)
msgbox = QMessageBox(self)
msgbox.setIcon(QMessageBox.Warning)
msgbox.setText(
_("A monitor scale change was detected. <br><br>"
"We recommend restarting Spyder to ensure that it's properly "
"displayed. If you don't want to do that, please be sure to "
"activate the option<br><br><tt>Enable auto high DPI scaling"
"</tt><br><br>in <tt>Preferences > General > Interface</tt>, "
"in case Spyder is not displayed correctly.<br><br>"
"Do you want to restart Spyder?"))
restart_button = msgbox.addButton(_('Restart now'), QMessageBox.NoRole)
dismiss_button = msgbox.addButton(_('Dismiss'), QMessageBox.NoRole)
msgbox.setCheckBox(dismiss_box)
msgbox.setDefaultButton(dismiss_button)
msgbox.exec_()
if dismiss_box.isChecked():
self.show_dpi_message = False
if msgbox.clickedButton() == restart_button:
# Activate HDPI auto-scaling option since is needed for a proper
# display when using OS scaling
CONF.set('main', 'normal_screen_resolution', False)
CONF.set('main', 'high_dpi_scaling', True)
CONF.set('main', 'high_dpi_custom_scale_factor', False)
self.restart()
else:
# Reconnect DPI scale changes to show a restart message
# also update current dpi for future checks
self.current_dpi = dpi
self.screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
def report_missing_dependencies(self):
"""Show a QMessageBox with a list of missing hard dependencies"""
# Declare dependencies before trying to detect the missing ones
dependencies.declare_dependencies()
missing_deps = dependencies.missing_dependencies()
if missing_deps:
# We change '<br>' by '\n', in order to replace the '<'
# that appear in our deps by '<' (to not break html
# formatting) and finally we restore '<br>' again.
missing_deps = (missing_deps.replace('<br>', '\n').
replace('<', '<').replace('\n', '<br>'))
QMessageBox.critical(self, _('Error'),
_("<b>You have missing dependencies!</b>"
"<br><br><tt>%s</tt><br>"
"<b>Please install them to avoid this message.</b>"
"<br><br>"
"<i>Note</i>: Spyder could work without some of these "
"dependencies, however to have a smooth experience when "
"using Spyder we <i>strongly</i> recommend you to install "
"all the listed missing dependencies.<br><br>"
"Failing to install these dependencies might result in bugs. "
"Please be sure that any found bugs are not the direct "
"result of missing dependencies, prior to reporting a new "
"issue."
) % missing_deps, QMessageBox.Ok)
def load_window_settings(self, prefix, default=False, section='main'):
"""Load window layout settings from userconfig-based configuration
with *prefix*, under *section*
default: if True, do not restore inner layout"""
get_func = CONF.get_default if default else CONF.get
window_size = get_func(section, prefix+'size')
prefs_dialog_size = get_func(section, prefix+'prefs_dialog_size')
if default:
hexstate = None
else:
hexstate = get_func(section, prefix+'state', None)
pos = get_func(section, prefix+'position')
# It's necessary to verify if the window/position value is valid
# with the current screen. See spyder-ide/spyder#3748.
width = pos[0]
height = pos[1]
screen_shape = QApplication.desktop().geometry()
current_width = screen_shape.width()
current_height = screen_shape.height()
if current_width < width or current_height < height:
pos = CONF.get_default(section, prefix+'position')
is_maximized = get_func(section, prefix+'is_maximized')
is_fullscreen = get_func(section, prefix+'is_fullscreen')
return hexstate, window_size, prefs_dialog_size, pos, is_maximized, \
is_fullscreen
def get_window_settings(self):
"""Return current window settings
Symetric to the 'set_window_settings' setter"""
window_size = (self.window_size.width(), self.window_size.height())
is_fullscreen = self.isFullScreen()
if is_fullscreen:
is_maximized = self.maximized_flag
else:
is_maximized = self.isMaximized()
pos = (self.window_position.x(), self.window_position.y())
prefs_dialog_size = (self.prefs_dialog_size.width(),
self.prefs_dialog_size.height())
hexstate = qbytearray_to_str(self.saveState())
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""Set window settings
Symetric to the 'get_window_settings' accessor"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
# Window layout
if hexstate:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True)
def save_current_window_settings(self, prefix, section='main',
none_state=False):
"""Save current window settings with *prefix* in
the userconfig-based configuration, under *section*"""
win_size = self.window_size
prefs_size = self.prefs_dialog_size
CONF.set(section, prefix+'size', (win_size.width(), win_size.height()))
CONF.set(section, prefix+'prefs_dialog_size',
(prefs_size.width(), prefs_size.height()))
CONF.set(section, prefix+'is_maximized', self.isMaximized())
CONF.set(section, prefix+'is_fullscreen', self.isFullScreen())
pos = self.window_position
CONF.set(section, prefix+'position', (pos.x(), pos.y()))
self.maximize_dockwidget(restore=True)# Restore non-maximized layout
if none_state:
CONF.set(section, prefix + 'state', None)
else:
qba = self.saveState()
CONF.set(section, prefix + 'state', qbytearray_to_str(qba))
CONF.set(section, prefix+'statusbar',
not self.statusBar().isHidden())
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets"""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
# --- Layouts
def setup_layout(self, default=False):
"""Setup window layout"""
prefix = 'window' + '/'
settings = self.load_window_settings(prefix, default)
hexstate = settings[0]
self.first_spyder_run = False
if hexstate is None:
# First Spyder execution:
self.setWindowState(Qt.WindowMaximized)
self.first_spyder_run = True
self.setup_default_layouts('default', settings)
# Now that the initial setup is done, copy the window settings,
# except for the hexstate in the quick layouts sections for the
# default layouts.
# Order and name of the default layouts is found in config.py
section = 'quick_layouts'
get_func = CONF.get_default if default else CONF.get
order = get_func(section, 'order')
# restore the original defaults if reset layouts is called
if default:
CONF.set(section, 'active', order)
CONF.set(section, 'order', order)
CONF.set(section, 'names', order)
for index, name, in enumerate(order):
prefix = 'layout_{0}/'.format(index)
self.save_current_window_settings(prefix, section,
none_state=True)
# store the initial layout as the default in spyder
prefix = 'layout_default/'
section = 'quick_layouts'
self.save_current_window_settings(prefix, section, none_state=True)
self.current_quick_layout = 'default'
# Regenerate menu
self.quick_layout_set_menu()
self.set_window_settings(*settings)
# Old API
for plugin in (self.widgetlist + self.thirdparty_plugins):
try:
plugin._initialize_plugin_in_mainwindow_layout()
except AttributeError:
pass
except Exception as error:
print("%s: %s" % (plugin, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
# New API: Tabify at the end
for plugin in (self.widgetlist + self.thirdparty_plugins):
if isinstance(plugin, SpyderDockablePlugin):
self.tabify_plugin(plugin)
def setup_default_layouts(self, index, settings):
"""Setup default layouts when run for the first time."""
self.setUpdatesEnabled(False)
first_spyder_run = bool(self.first_spyder_run) # Store copy
if first_spyder_run:
self.set_window_settings(*settings)
else:
if self.last_plugin:
if self.last_plugin._ismaximized:
self.maximize_dockwidget(restore=True)
if not (self.isMaximized() or self.maximized_flag):
self.showMaximized()
min_width = self.minimumWidth()
max_width = self.maximumWidth()
base_width = self.width()
self.setFixedWidth(base_width)
# IMPORTANT: order has to be the same as defined in the config file
MATLAB, RSTUDIO, VERTICAL, HORIZONTAL = range(self.DEFAULT_LAYOUTS)
# Define widgets locally
editor = self.editor
console_ipy = self.ipyconsole
console_int = self.console
outline = self.outlineexplorer
explorer_project = self.projects
explorer_file = self.explorer
explorer_variable = self.variableexplorer
plots = self.plots
history = self.historylog
finder = self.findinfiles
help_plugin = self.help
helper = self.onlinehelp
plugins = self.thirdparty_plugins
# Stored for tests
global_hidden_widgets = [finder, console_int, explorer_project,
helper] + plugins
global_hidden_toolbars = [self.source_toolbar, self.edit_toolbar,
self.search_toolbar]
# Layout definition
# --------------------------------------------------------------------
# Layouts are organized by columns, each column is organized by rows.
# Widths have to accumulate to 100 (except if hidden), height per
# column has to accumulate to 100 as well
# Spyder Default Initial Layout
s_layout = {
'widgets': [
# Column 0
[[explorer_project]],
# Column 1
[[editor]],
# Column 2
[[outline]],
# Column 3
[[help_plugin, explorer_variable, plots, # Row 0
helper, explorer_file, finder] + plugins,
[console_int, console_ipy, history]] # Row 1
],
'width fraction': [15, # Column 0 width
45, # Column 1 width
5, # Column 2 width
45], # Column 3 width
'height fraction': [[100], # Column 0, row heights
[100], # Column 1, row heights
[100], # Column 2, row heights
[46, 54]], # Column 3, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# RStudio
r_layout = {
'widgets': [
# column 0
[[editor], # Row 0
[console_ipy, console_int]], # Row 1
# column 1
[[explorer_variable, plots, history, # Row 0
outline, finder] + plugins,
[explorer_file, explorer_project, # Row 1
help_plugin, helper]]
],
'width fraction': [55, # Column 0 width
45], # Column 1 width
'height fraction': [[55, 45], # Column 0, row heights
[55, 45]], # Column 1, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# Matlab
m_layout = {
'widgets': [
# column 0
[[explorer_file, explorer_project],
[outline]],
# column 1
[[editor],
[console_ipy, console_int]],
# column 2
[[explorer_variable, plots, finder] + plugins,
[history, help_plugin, helper]]
],
'width fraction': [10, # Column 0 width
45, # Column 1 width
45], # Column 2 width
'height fraction': [[55, 45], # Column 0, row heights
[55, 45], # Column 1, row heights
[55, 45]], # Column 2, row heights
'hidden widgets': global_hidden_widgets,
'hidden toolbars': [],
}
# Vertically split
v_layout = {
'widgets': [
# column 0
[[editor], # Row 0
[console_ipy, console_int, explorer_file, # Row 1
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [100], # Column 0 width
'height fraction': [[55, 45]], # Column 0, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# Horizontally split
h_layout = {
'widgets': [
# column 0
[[editor]], # Row 0
# column 1
[[console_ipy, console_int, explorer_file, # Row 0
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [55, # Column 0 width
45], # Column 1 width
'height fraction': [[100], # Column 0, row heights
[100]], # Column 1, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': []
}
# Layout selection
layouts = {
'default': s_layout,
RSTUDIO: r_layout,
MATLAB: m_layout,
VERTICAL: v_layout,
HORIZONTAL: h_layout,
}
layout = layouts[index]
# Remove None from widgets layout
widgets_layout = layout['widgets']
widgets_layout_clean = []
for column in widgets_layout:
clean_col = []
for row in column:
clean_row = [w for w in row if w is not None]
if clean_row:
clean_col.append(clean_row)
if clean_col:
widgets_layout_clean.append(clean_col)
# Flatten widgets list
widgets = []
for column in widgets_layout_clean:
for row in column:
for widget in row:
widgets.append(widget)
# Make every widget visible
for widget in widgets:
widget.toggle_view(True)
try:
# New API
if widget.toggle_view_action:
widget.toggle_view_action.setChecked(True)
except AttributeError:
# Old API
if widget._toggle_view_action:
widget._toggle_view_action.setChecked(True)
# We use both directions to ensure proper update when moving from
# 'Horizontal Split' to 'Spyder Default'
# This also seems to help on random cases where the display seems
# 'empty'
for direction in (Qt.Vertical, Qt.Horizontal):
# Arrange the widgets in one direction
for idx in range(len(widgets) - 1):
first, second = widgets[idx], widgets[idx+1]
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
direction)
# Arrange the widgets in the other direction
for column in widgets_layout_clean:
for idx in range(len(column) - 1):
first_row, second_row = column[idx], column[idx+1]
self.splitDockWidget(first_row[0].dockwidget,
second_row[0].dockwidget,
Qt.Vertical)
# Tabify
for column in widgets_layout_clean:
for row in column:
for idx in range(len(row) - 1):
first, second = row[idx], row[idx+1]
self.tabify_plugins(first, second)
# Raise front widget per row
row[0].dockwidget.show()
row[0].dockwidget.raise_()
# Set dockwidget widths
width_fractions = layout['width fraction']
if len(width_fractions) > 1:
_widgets = [col[0][0].dockwidget for col in widgets_layout]
self.resizeDocks(_widgets, width_fractions, Qt.Horizontal)
# Set dockwidget heights
height_fractions = layout['height fraction']
for idx, column in enumerate(widgets_layout_clean):
if len(column) > 1:
_widgets = [row[0].dockwidget for row in column]
self.resizeDocks(_widgets, height_fractions[idx], Qt.Vertical)
# Hide toolbars
hidden_toolbars = global_hidden_toolbars + layout['hidden toolbars']
for toolbar in hidden_toolbars:
if toolbar is not None:
toolbar.close()
# Hide widgets
hidden_widgets = layout['hidden widgets']
for widget in hidden_widgets:
if widget is not None:
widget.dockwidget.close()
if first_spyder_run:
self.first_spyder_run = False
else:
self.setMinimumWidth(min_width)
self.setMaximumWidth(max_width)
if not (self.isMaximized() or self.maximized_flag):
self.showMaximized()
self.setUpdatesEnabled(True)
self.sig_layout_setup_ready.emit(layout)
return layout
@Slot()
def toggle_previous_layout(self):
""" """
self.toggle_layout('previous')
@Slot()
def toggle_next_layout(self):
""" """
self.toggle_layout('next')
def toggle_layout(self, direction='next'):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
if len(active) == 0:
return
layout_index = ['default']
for name in order:
if name in active:
layout_index.append(names.index(name))
current_layout = self.current_quick_layout
dic = {'next': 1, 'previous': -1}
if current_layout is None:
# Start from default
current_layout = 'default'
if current_layout in layout_index:
current_index = layout_index.index(current_layout)
else:
current_index = 0
new_index = (current_index + dic[direction]) % len(layout_index)
self.quick_layout_switch(layout_index[new_index])
def quick_layout_set_menu(self):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
ql_actions = []
ql_actions = [create_action(self, _('Spyder Default Layout'),
triggered=lambda:
self.quick_layout_switch('default'))]
for name in order:
if name in active:
index = names.index(name)
# closure required so lambda works with the default parameter
def trigger(i=index, self=self):
return lambda: self.quick_layout_switch(i)
qli_act = create_action(self, name, triggered=trigger())
# closure above replaces the following which stopped working
# qli_act = create_action(self, name, triggered=lambda i=index:
# self.quick_layout_switch(i)
ql_actions += [qli_act]
self.ql_save = create_action(self, _("Save current layout"),
triggered=lambda:
self.quick_layout_save(),
context=Qt.ApplicationShortcut)
self.ql_preferences = create_action(self, _("Layout preferences"),
triggered=lambda:
self.quick_layout_settings(),
context=Qt.ApplicationShortcut)
self.ql_reset = create_action(self, _('Reset to spyder default'),
triggered=self.reset_window_layout)
self.register_shortcut(self.ql_save, "_", "Save current layout")
self.register_shortcut(self.ql_preferences, "_", "Layout preferences")
ql_actions += [None]
ql_actions += [self.ql_save, self.ql_preferences, self.ql_reset]
self.quick_layout_menu.clear()
add_actions(self.quick_layout_menu, ql_actions)
if len(order) == 0:
self.ql_preferences.setEnabled(False)
else:
self.ql_preferences.setEnabled(True)
@Slot()
def reset_window_layout(self):
"""Reset window layout to default"""
answer = QMessageBox.warning(self, _("Warning"),
_("Window layout will be reset to default settings: "
"this affects window position, size and dockwidgets.\n"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.setup_layout(default=True)
def quick_layout_save(self):
"""Save layout dialog"""
get = CONF.get
set_ = CONF.set
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
dlg = self.dialog_layout_save(self, names)
if dlg.exec_():
name = dlg.combo_box.currentText()
if name in names:
answer = QMessageBox.warning(self, _("Warning"),
_("Layout <b>%s</b> will be \
overwritten. Do you want to \
continue?") % name,
QMessageBox.Yes | QMessageBox.No)
index = order.index(name)
else:
answer = True
if None in names:
index = names.index(None)
names[index] = name
else:
index = len(names)
names.append(name)
order.append(name)
# Always make active a new layout even if it overwrites an inactive
# layout
if name not in active:
active.append(name)
if answer:
self.save_current_window_settings('layout_{}/'.format(index),
section='quick_layouts')
set_('quick_layouts', 'names', names)
set_('quick_layouts', 'order', order)
set_('quick_layouts', 'active', active)
self.quick_layout_set_menu()
def quick_layout_settings(self):
"""Layout settings dialog"""
get = CONF.get
set_ = CONF.set
section = 'quick_layouts'
names = get(section, 'names')
order = get(section, 'order')
active = get(section, 'active')
dlg = self.dialog_layout_settings(self, names, order, active)
if dlg.exec_():
set_(section, 'names', dlg.names)
set_(section, 'order', dlg.order)
set_(section, 'active', dlg.active)
self.quick_layout_set_menu()
def quick_layout_switch(self, index):
"""Switch to quick layout number *index*"""
section = 'quick_layouts'
try:
settings = self.load_window_settings('layout_{}/'.format(index),
section=section)
(hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen) = settings
# The defaults layouts will always be regenerated unless there was
# an overwrite, either by rewriting with same name, or by deleting
# and then creating a new one
if hexstate is None:
# The value for hexstate shouldn't be None for a custom saved
# layout (ie, where the index is greater than the number of
# defaults). See spyder-ide/spyder#6202.
if index != 'default' and index >= self.DEFAULT_LAYOUTS:
QMessageBox.critical(
self, _("Warning"),
_("Error opening the custom layout. Please close"
" Spyder and try again. If the issue persists,"
" then you must use 'Reset to Spyder default' "
"from the layout menu."))
return
self.setup_default_layouts(index, settings)
except cp.NoOptionError:
QMessageBox.critical(self, _("Warning"),
_("Quick switch layout #%s has not yet "
"been defined.") % str(index))
return
# TODO: is there any real use in calling the previous layout
# setting?
# self.previous_layout_settings = self.get_window_settings()
self.set_window_settings(*settings)
self.current_quick_layout = index
# make sure the flags are correctly set for visible panes
for plugin in (self.widgetlist + self.thirdparty_plugins):
action = plugin._toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
# --- Show/Hide toolbars
def _update_show_toolbars_action(self):
"""Update the text displayed in the menu entry."""
if self.toolbars_visible:
text = _("Hide toolbars")
tip = _("Hide toolbars")
else:
text = _("Show toolbars")
tip = _("Show toolbars")
self.show_toolbars_action.setText(text)
self.show_toolbars_action.setToolTip(tip)
def save_visible_toolbars(self):
"""Saves the name of the visible toolbars in the .ini file."""
toolbars = []
for toolbar in self.visible_toolbars:
toolbars.append(toolbar.objectName())
CONF.set('main', 'last_visible_toolbars', toolbars)
def get_visible_toolbars(self):
"""Collects the visible toolbars."""
toolbars = []
for toolbar in self.toolbarslist:
if toolbar.toggleViewAction().isChecked():
toolbars.append(toolbar)
self.visible_toolbars = toolbars
def load_last_visible_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbarslist:
dic[toolbar.objectName()] = toolbar
toolbars = []
for name in toolbars_names:
if name in dic:
toolbars.append(dic[name])
self.visible_toolbars = toolbars
else:
self.get_visible_toolbars()
self._update_show_toolbars_action()
@Slot()
def show_toolbars(self):
"""Show/Hides toolbars."""
value = not self.toolbars_visible
CONF.set('main', 'toolbars_visible', value)
if value:
self.save_visible_toolbars()
else:
self.get_visible_toolbars()
for toolbar in self.visible_toolbars:
toolbar.toggleViewAction().setChecked(value)
toolbar.setVisible(value)
self.toolbars_visible = value
self._update_show_toolbars_action()
# --- Other
def update_execution_state_kernel(self):
"""Handle execution state of the current console."""
try:
self.ipyconsole.update_execution_state_kernel()
except AttributeError:
return
def valid_project(self):
"""Handle an invalid active project."""
try:
path = self.projects.get_active_project_path()
except AttributeError:
return
if bool(path):
if not self.projects.is_valid_project(path):
if path:
QMessageBox.critical(
self,
_('Error'),
_("<b>{}</b> is no longer a valid Spyder project! "
"Since it is the current active project, it will "
"be closed automatically.").format(path))
self.projects.close_project()
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(action._shown_shortcut)
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.show_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(QKeySequence())
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.hide_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_options_menus(self):
"""Hide options menu when menubar is pressed in macOS."""
for plugin in self.widgetlist + self.thirdparty_plugins:
if plugin.CONF_SECTION == 'editor':
editorstack = self.editor.get_current_editorstack()
editorstack.menu.hide()
else:
try:
# New API
plugin.options_menu.hide()
except AttributeError:
# Old API
plugin._options_menu.hide()
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.editor import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if (not console and not_readonly and self.editor
and not self.editor.is_file_opened()):
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
if self.editor:
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
if len(self.search_menu_actions) > 3:
self.search_menu_actions[3].setEnabled(readwrite_editor)
def create_plugins_menu(self):
order = ['editor', 'ipython_console', 'variable_explorer',
'help', 'plots', None, 'explorer', 'outline_explorer',
'project_explorer', 'find_in_files', None, 'historylog',
'profiler', 'breakpoints', 'pylint', None,
'onlinehelp', 'internal_console', None]
for plugin in self.widgetlist:
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
try:
name = plugin.CONF_SECTION
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
actions = order[:]
for action in order:
if type(action) is str:
actions.remove(action)
self.plugins_menu_actions = actions
add_actions(self.plugins_menu, actions)
def create_toolbars_menu(self):
order = ['file_toolbar', 'run_toolbar', 'debug_toolbar',
'main_toolbar', 'Global working directory', None,
'search_toolbar', 'edit_toolbar', 'source_toolbar']
for toolbar in self.toolbarslist:
action = toolbar.toggleViewAction()
name = toolbar.objectName()
try:
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
add_actions(self.toolbars_menu, order)
def createPopupMenu(self):
menu = QMenu('', self)
actions = self.help_menu_actions[:3] + \
[None, self.help_menu_actions[-1]]
add_actions(menu, actions)
return menu
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message,
int(Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute),
QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: Remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(
self.last_focused_widget):
plugin.change_visibility(True)
except AttributeError:
# Old API
if plugin.isAncestorOf(self.last_focused_widget):
plugin._visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
if not self.completions.closing_plugin(cancelable):
return False
for plugin in (self.widgetlist + self.thirdparty_plugins):
# New API
try:
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError:
pass
# Old API
try:
plugin._close_window()
if not plugin.closing_plugin(cancelable):
return False
except AttributeError:
pass
# Save window settings *after* closing all plugin windows, in order
# to show them in their previous locations in the next session.
# Fixes spyder-ide/spyder#12139
prefix = 'window' + '/'
self.save_current_window_settings(prefix)
self.dialog_manager.close_all()
if self.toolbars_visible:
self.save_visible_toolbars()
self.completions.shutdown()
self.already_closed = True
return True
def add_dockwidget(self, plugin):
"""
Add a plugin QDockWidget to the main window.
"""
try:
# New API
if plugin.is_compatible:
dockwidget, location = plugin.create_dockwidget(self)
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(
dockwidget.features()
| QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
except AttributeError:
# Old API
if plugin._is_compatible:
dockwidget, location = plugin._create_dockwidget()
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(
dockwidget.features()
| QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
@Slot()
def close_current_dockwidget(self):
widget = QApplication.focusWidget()
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(widget):
plugin._toggle_view_action.setChecked(False)
break
except AttributeError:
# Old API
if plugin.isAncestorOf(widget):
plugin._toggle_view_action.setChecked(False)
break
def toggle_lock(self, value):
"""Lock/Unlock dockwidgets and toolbars"""
self.interface_locked = value
CONF.set('main', 'panes_locked', value)
self.lock_interface_action.setIcon(
ima.icon('lock' if self.interface_locked else 'lock_open'))
self.lock_interface_action.setText(
_("Unlock panes and toolbars") if self.interface_locked else
_("Lock panes and toolbars"))
# Apply lock to panes
for plugin in (self.widgetlist + self.thirdparty_plugins):
if self.interface_locked:
if plugin.dockwidget.isFloating():
plugin.dockwidget.setFloating(False)
plugin.dockwidget.remove_title_bar()
else:
plugin.dockwidget.set_title_bar()
# Apply lock to toolbars
for toolbar in self.toolbarslist:
if self.interface_locked:
toolbar.setMovable(False)
else:
toolbar.setMovable(True)
def __update_maximize_action(self):
if self.state_before_maximizing is None:
text = _("Maximize current pane")
tip = _("Maximize current pane")
icon = ima.icon('maximize')
else:
text = _("Restore current pane")
tip = _("Restore pane to its original size")
icon = ima.icon('unmaximize')
self.maximize_action.setText(text)
self.maximize_action.setIcon(icon)
self.maximize_action.setToolTip(tip)
@Slot()
@Slot(bool)
def maximize_dockwidget(self, restore=False):
"""Shortcut: Ctrl+Alt+Shift+M
First call: maximize current dockwidget
Second call (or restore=True): restore original window layout"""
if self.state_before_maximizing is None:
if restore:
return
# Select plugin to maximize
self.state_before_maximizing = self.saveState()
focus_widget = QApplication.focusWidget()
for plugin in (self.widgetlist + self.thirdparty_plugins):
plugin.dockwidget.hide()
try:
# New API
if plugin.get_widget().isAncestorOf(focus_widget):
self.last_plugin = plugin
except Exception:
# Old API
if plugin.isAncestorOf(focus_widget):
self.last_plugin = plugin
# Only plugins that have a dockwidget are part of widgetlist,
# so last_plugin can be None after the above "for" cycle.
# For example, this happens if, after Spyder has started, focus
# is set to the Working directory toolbar (which doesn't have
# a dockwidget) and then you press the Maximize button
if self.last_plugin is None:
# Using the Editor as default plugin to maximize
self.last_plugin = self.editor
# Maximize last_plugin
self.last_plugin.dockwidget.toggleViewAction().setDisabled(True)
try:
# New API
self.setCentralWidget(self.last_plugin.get_widget())
except AttributeError:
# Old API
self.setCentralWidget(self.last_plugin)
self.last_plugin._ismaximized = True
# Workaround to solve an issue with editor's outline explorer:
# (otherwise the whole plugin is hidden and so is the outline explorer
# and the latter won't be refreshed if not visible)
try:
# New API
self.last_plugin.get_widget().show()
self.last_plugin.change_visibility(True)
except AttributeError:
# Old API
self.last_plugin.show()
self.last_plugin._visibility_changed(True)
if self.last_plugin is self.editor:
# Automatically show the outline if the editor was maximized:
self.addDockWidget(Qt.RightDockWidgetArea,
self.outlineexplorer.dockwidget)
self.outlineexplorer.dockwidget.show()
else:
# Restore original layout (before maximizing current dockwidget)
try:
# New API
self.last_plugin.dockwidget.setWidget(
self.last_plugin.get_widget())
except AttributeError:
# Old API
self.last_plugin.dockwidget.setWidget(self.last_plugin)
self.last_plugin.dockwidget.toggleViewAction().setEnabled(True)
self.setCentralWidget(None)
try:
# New API
self.last_plugin.get_widget().is_maximized = False
except AttributeError:
# Old API
self.last_plugin._ismaximized = False
self.restoreState(self.state_before_maximizing)
self.state_before_maximizing = None
try:
# New API
self.last_plugin.get_widget().get_focus_widget().setFocus()
except AttributeError:
# Old API
self.last_plugin.get_focus_widget().setFocus()
self.__update_maximize_action()
def __update_fullscreen_action(self):
if self.fullscreen_flag:
icon = ima.icon('window_nofullscreen')
else:
icon = ima.icon('window_fullscreen')
if is_text_string(icon):
icon = get_icon(icon)
self.fullscreen_action.setIcon(icon)
@Slot()
def toggle_fullscreen(self):
if self.fullscreen_flag:
self.fullscreen_flag = False
if os.name == 'nt':
self.setWindowFlags(
self.windowFlags()
^ (Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint))
self.setGeometry(self.saved_normal_geometry)
self.showNormal()
if self.maximized_flag:
self.showMaximized()
else:
self.maximized_flag = self.isMaximized()
self.fullscreen_flag = True
self.saved_normal_geometry = self.normalGeometry()
if os.name == 'nt':
# Due to limitations of the Windows DWM, compositing is not
# handled correctly for OpenGL based windows when going into
# full screen mode, so we need to use this workaround.
# See spyder-ide/spyder#4291.
self.setWindowFlags(self.windowFlags()
| Qt.FramelessWindowHint
| Qt.WindowStaysOnTopHint)
screen_number = QDesktopWidget().screenNumber(self)
if screen_number < 0:
screen_number = 0
r = QApplication.desktop().screenGeometry(screen_number)
self.setGeometry(
r.left() - 1, r.top() - 1, r.width() + 2, r.height() + 2)
self.showNormal()
else:
self.showFullScreen()
self.__update_fullscreen_action()
def add_to_toolbar(self, toolbar, widget):
"""Add widget actions to toolbar"""
actions = widget.toolbar_actions
if actions is not None:
add_actions(toolbar, actions)
@Slot()
def about(self):
"""Create About Spyder dialog with general information."""
versions = get_versions()
# Show Git revision for development version
revlink = ''
if versions['revision']:
rev = versions['revision']
revlink = " (<a href='https://github.com/spyder-ide/spyder/"\
"commit/%s'>Commit: %s</a>)" % (rev, rev)
# Get current font properties
font = self.font()
font_family = font.family()
font_size = font.pointSize()
if sys.platform == 'darwin':
font_size -= 2
msgBox = QMessageBox(self)
msgBox.setText(
"""
<div style='font-family: "{font_family}";
font-size: {font_size}pt;
font-weight: normal;
'>
<p>
<b>Spyder {spyder_ver}</b> {revision}
<br>
The Scientific Python Development Environment |
<a href="{website_url}">Spyder-IDE.org</a>
<br>
Copyright © 2009-2019 Spyder Project Contributors and
<a href="{github_url}/blob/master/AUTHORS.txt">others</a>.
<br>
Distributed under the terms of the
<a href="{github_url}/blob/master/LICENSE.txt">MIT License</a>.
</p>
<p>
Created by Pierre Raybaut; current maintainer is Carlos Cordoba.
Developed by the
<a href="{github_url}/graphs/contributors">international
Spyder community</a>. Many thanks to all the Spyder beta testers
and dedicated users.
</p>
<p>For help with Spyder errors and crashes, please read our
<a href="{trouble_url}">Troubleshooting Guide</a>, and for bug
reports and feature requests, visit our
<a href="{github_url}">Github site</a>. For project discussion,
see our <a href="{forum_url}">Google Group</a>.
</p>
<p>
This project is part of a larger effort to promote and
facilitate the use of Python for scientific and engineering
software development.
The popular Python distributions
<a href="https://www.anaconda.com/download/">Anaconda</a> and
<a href="https://winpython.github.io/">WinPython</a>
also contribute to this plan.
</p>
<p>
Python {python_ver} {bitness}-bit | Qt {qt_ver} |
{qt_api} {qt_api_ver} | {os_name} {os_ver}
</p>
<p><small>Certain source files under other compatible permissive
licenses and/or originally by other authors.
Spyder 3 theme icons derived from
<a href="https://fontawesome.com/">Font Awesome</a> 4.7
(© 2016 David Gandy; SIL OFL 1.1) and
<a href="http://materialdesignicons.com/">Material Design</a>
(© 2014 Austin Andrews; SIL OFL 1.1).
Most Spyder 2 theme icons sourced from the
<a href="https://www.everaldo.com">Crystal Project iconset</a>
(© 2006-2007 Everaldo Coelho; LGPL 2.1+).
Other icons from
<a href="http://p.yusukekamiyamane.com/">Yusuke Kamiyamane</a>
(© 2013 Yusuke Kamiyamane; CC-BY 3.0),
the <a href="http://www.famfamfam.com/lab/icons/silk/">FamFamFam
Silk icon set</a> 1.3 (© 2006 Mark James; CC-BY 2.5), and
the <a href="https://www.kde.org/">KDE Oxygen icons</a>
(© 2007 KDE Artists; LGPL 3.0+).</small>
</p>
<p>
See the <a href="{github_url}/blob/master/NOTICE.txt">NOTICE</a>
file for full legal information.
</p>
</div>
""".format(
spyder_ver=versions['spyder'],
revision=revlink,
website_url=__website_url__,
github_url=__project_url__,
trouble_url=__trouble_url__,
forum_url=__forum_url__,
python_ver=versions['python'],
bitness=versions['bitness'],
qt_ver=versions['qt'],
qt_api=versions['qt_api'],
qt_api_ver=versions['qt_api_ver'],
os_name=versions['system'],
os_ver=versions['release'],
font_family=font_family,
font_size=font_size,
)
)
msgBox.setWindowTitle(_("About %s") % "Spyder")
msgBox.setStandardButtons(QMessageBox.Ok)
from spyder.config.gui import is_dark_interface
if is_dark_interface():
icon_filename = "spyder.svg"
else:
icon_filename = "spyder_dark.svg"
app_icon = QIcon(get_image_path(icon_filename))
msgBox.setIconPixmap(app_icon.pixmap(QSize(64, 64)))
msgBox.setTextInteractionFlags(
Qt.LinksAccessibleByMouse | Qt.TextSelectableByMouse)
msgBox.exec_()
@Slot()
def show_dependencies(self):
"""Show Spyder's Dependencies dialog box"""
from spyder.widgets.dependencies import DependenciesDialog
dlg = DependenciesDialog(self)
dlg.set_data(dependencies.DEPENDENCIES)
dlg.exec_()
def render_issue(self, description='', traceback=''):
"""Render issue before sending it to Github"""
# Get component versions
versions = get_versions()
# Get git revision for development version
revision = ''
if versions['revision']:
revision = versions['revision']
# Make a description header in case no description is supplied
if not description:
description = "### What steps reproduce the problem?"
# Make error section from traceback and add appropriate reminder header
if traceback:
error_section = ("### Traceback\n"
"```python-traceback\n"
"{}\n"
"```".format(traceback))
else:
error_section = ''
issue_template = """\
## Description
{description}
{error_section}
## Versions
* Spyder version: {spyder_version} {commit}
* Python version: {python_version}
* Qt version: {qt_version}
* {qt_api_name} version: {qt_api_version}
* Operating System: {os_name} {os_version}
### Dependencies
```
{dependencies}
```
""".format(description=description,
error_section=error_section,
spyder_version=versions['spyder'],
commit=revision,
python_version=versions['python'],
qt_version=versions['qt'],
qt_api_name=versions['qt_api'],
qt_api_version=versions['qt_api_ver'],
os_name=versions['system'],
os_version=versions['release'],
dependencies=dependencies.status())
return issue_template
@Slot()
def report_issue(self, body=None, title=None, open_webpage=False):
"""Report a Spyder issue to github, generating body text if needed."""
if body is None:
from spyder.widgets.reporterror import SpyderErrorDialog
report_dlg = SpyderErrorDialog(self, is_report=True)
report_dlg.set_color_scheme(CONF.get('appearance', 'selected'))
report_dlg.show()
else:
if open_webpage:
if PY3:
from urllib.parse import quote
else:
from urllib import quote # analysis:ignore
from qtpy.QtCore import QUrlQuery
url = QUrl(__project_url__ + '/issues/new')
query = QUrlQuery()
query.addQueryItem("body", quote(body))
if title:
query.addQueryItem("title", quote(title))
url.setQuery(query)
QDesktopServices.openUrl(url)
@Slot()
def trouble_guide(self):
"""Open Spyder troubleshooting guide in a web browser."""
url = QUrl(__trouble_url__)
QDesktopServices.openUrl(url)
@Slot()
def google_group(self):
"""Open Spyder Google Group in a web browser."""
url = QUrl(__forum_url__)
QDesktopServices.openUrl(url)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.editor import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
if state:
self.console.redirect_stds()
else:
self.console.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in IPython console and eventually set focus
to the Editor.
"""
console = self.ipyconsole
console.switch_to_plugin()
console.execute_code(lines)
if focus_to_editor:
self.editor.switch_to_plugin()
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
fname = encoding.to_unicode_from_fs(fname)
if osp.exists(osp.join(CWD, fname)):
fpath = osp.join(CWD, fname)
elif osp.exists(fname):
fpath = fname
else:
return
if osp.isfile(fpath):
self.open_file(fpath, external=True)
elif osp.isdir(fpath):
QMessageBox.warning(
self, _("Error"),
_('To open <code>{fpath}</code> as a project with Spyder, '
'please use <code>spyder -p "{fname}"</code>.')
.format(fpath=osp.normpath(fpath), fname=fname)
)
# --- Path Manager
# ------------------------------------------------------------------------
def load_python_path(self):
"""Load path stored in Spyder configuration folder."""
if osp.isfile(self.SPYDER_PATH):
path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = tuple(name for name in path if osp.isdir(name))
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
not_active_path, _x = encoding.readlines(
self.SPYDER_NOT_ACTIVE_PATH)
self.not_active_path = tuple(name for name in not_active_path
if osp.isdir(name))
def save_python_path(self, new_path_dict):
"""
Save path in Spyder configuration folder.
`new_path_dict` is an OrderedDict that has the new paths as keys and
the state as values. The state is `True` for active and `False` for
inactive.
"""
path = [p for p in new_path_dict]
not_active_path = [p for p in new_path_dict if not new_path_dict[p]]
try:
encoding.writelines(path, self.SPYDER_PATH)
encoding.writelines(not_active_path, self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError as e:
logger.error(str(e))
CONF.set('main', 'spyder_pythonpath', self.get_spyder_pythonpath())
def get_spyder_pythonpath_dict(self):
"""
Return Spyder PYTHONPATH.
The returned ordered dictionary has the paths as keys and the state
as values. The state is `True` for active and `False` for inactive.
Example:
OrderedDict([('/some/path, True), ('/some/other/path, False)])
"""
self.load_python_path()
path_dict = OrderedDict()
for path in self.path:
path_dict[path] = path not in self.not_active_path
for path in self.project_path:
path_dict[path] = True
return path_dict
def get_spyder_pythonpath(self):
"""
Return Spyder PYTHONPATH.
"""
path_dict = self.get_spyder_pythonpath_dict()
path = [k for k, v in path_dict.items() if v]
return path
def update_python_path(self, new_path_dict):
"""Update python path on Spyder interpreter and kernels."""
# Load previous path
path_dict = self.get_spyder_pythonpath_dict()
# Save path
if path_dict != new_path_dict:
# It doesn't include the project_path
self.save_python_path(new_path_dict)
# Load new path
new_path_dict_p = self.get_spyder_pythonpath_dict() # Includes project
# Update Spyder interpreter
for path in path_dict:
while path in sys.path:
sys.path.remove(path)
for path, active in reversed(new_path_dict_p.items()):
if active:
sys.path.insert(1, path)
# Any plugin that needs to do some work based on this sigal should
# connect to it on plugin registration
self.sig_pythonpath_changed.emit(path_dict, new_path_dict_p)
@Slot()
def show_path_manager(self):
"""Show path manager dialog."""
from spyder.widgets.pathmanager import PathManager
read_only_path = tuple(self.projects.get_pythonpath())
dialog = PathManager(self, self.path, read_only_path,
self.not_active_path, sync=True)
self._path_manager = dialog
dialog.sig_path_changed.connect(self.update_python_path)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.show()
def pythonpath_changed(self):
"""Project's PYTHONPATH contribution has changed."""
self.project_path = tuple(self.projects.get_pythonpath())
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
@Slot()
def win_env(self):
"""Show Windows current user environment variables."""
self.dialog_manager.show(WinUserEnvDialog(self))
# --- Kite
def show_kite_installation(self):
"""Show installation dialog for Kite."""
self.completions.get_client('kite').show_installation_dialog()
#---- Preferences
def apply_settings(self):
"""Apply settings changed in 'Preferences' dialog box"""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes spyder-ide/spyder#2036.
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
else:
style_name = CONF.get('appearance', 'windows_style',
self.default_style)
style = QStyleFactory.create(style_name)
if style is not None:
style.setProperty('name', style_name)
qapp.setStyle(style)
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
if CONF.get('main', 'animated_docks'):
default = default|QMainWindow.AnimatedDocks
self.setDockOptions(default)
self.apply_panes_settings()
self.apply_statusbar_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings."""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.dockwidget.FEATURES
if CONF.get('main', 'vertical_dockwidget_titlebars'):
features = features | QDockWidget.DockWidgetVerticalTitleBar
plugin.dockwidget.setFeatures(features)
try:
# New API
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
except AttributeError:
# Old API
plugin._update_margins()
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
show_status_bar = CONF.get('main', 'show_status_bar')
self.statusBar().setVisible(show_status_bar)
if show_status_bar:
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage'),
(self.clock_status, 'clock')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
# Update conda status widget
if is_anaconda() and self.conda_status:
interpreter = self.get_main_interpreter()
self.conda_status.update_interpreter(interpreter)
else:
return
@Slot()
def show_preferences(self):
"""Edit Spyder preferences."""
from spyder.preferences.configdialog import ConfigDialog
def _dialog_finished(result_code):
"""Restore preferences dialog instance variable."""
self.prefs_dialog_instance = None
if self.prefs_dialog_instance is None:
dlg = ConfigDialog(self)
dlg.setStyleSheet("QTabWidget::tab-bar {"
"alignment: left;}")
self.prefs_dialog_instance = dlg
# Setup
if self.prefs_dialog_size is not None:
dlg.resize(self.prefs_dialog_size)
for PrefPageClass in self.general_prefs:
widget = PrefPageClass(dlg, main=self)
widget.initialize()
dlg.add_page(widget)
widget = self.completions._create_configwidget(dlg, self)
if widget is not None:
dlg.add_page(widget)
for completion_plugin in self.completions.clients.values():
completion_plugin = completion_plugin['plugin']
widget = completion_plugin._create_configwidget(dlg, self)
if widget is not None:
dlg.add_page(widget)
for plugin in [self.workingdirectory, self.editor,
self.projects, self.ipyconsole,
self.historylog, self.help, self.variableexplorer,
self.onlinehelp, self.explorer, self.findinfiles
] + self.thirdparty_plugins:
if plugin is not None:
# New API
try:
self.create_plugin_conf_widget(plugin)
except AttributeError:
pass
except Exception:
# Avoid a crash at startup if a plugin's config
# page fails to load.
traceback.print_exc(file=sys.stderr)
if getattr(plugin, 'CONF_WIDGET_CLASS', None):
try:
widget = self.create_plugin_conf_widget(plugin)
if widget is not None:
dlg.add_page(widget)
except Exception:
# Avoid a crash at startup if a plugin's config
# page fails to load.
traceback.print_exc(file=sys.stderr)
# Old API
try:
widget = plugin._create_configwidget(dlg, self)
if widget is not None:
dlg.add_page(widget)
except AttributeError:
pass
except Exception:
# Avoid a crash at startup if a plugin's config
# page fails to load.
traceback.print_exc(file=sys.stderr)
if self.prefs_index is not None:
dlg.set_current_index(self.prefs_index)
# Check settings and show dialog
dlg.show()
dlg.check_all_settings()
# Signals
dlg.finished.connect(_dialog_finished)
dlg.pages_widget.currentChanged.connect(
self.__preference_page_changed)
dlg.size_change.connect(self.set_prefs_size)
else:
self.prefs_dialog_instance.show()
self.prefs_dialog_instance.activateWindow()
self.prefs_dialog_instance.raise_()
self.prefs_dialog_instance.setFocus()
def __preference_page_changed(self, index):
"""Preference page index has changed."""
self.prefs_index = index
def set_prefs_size(self, size):
"""Save preferences dialog size"""
self.prefs_dialog_size = size
#---- Shortcuts
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
"""
Register QAction or QShortcut to Spyder main application,
with shortcut (context, name, default)
"""
self.shortcut_data.append((qaction_or_qshortcut, context,
name, add_shortcut_to_tip, plugin_name))
def unregister_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
"""
Unregister QAction or QShortcut from Spyder main application.
"""
data = (qaction_or_qshortcut, context, name, add_shortcut_to_tip,
plugin_name)
if data in self.shortcut_data:
self.shortcut_data.remove(data)
def apply_shortcuts(self):
"""Apply shortcuts settings to all widgets/plugins."""
toberemoved = []
# TODO: Check shortcut existence based on action existence, so that we
# can update shortcut names without showing the old ones on the
# preferences
for index, (qobject, context, name, add_shortcut_to_tip,
plugin_name) in enumerate(self.shortcut_data):
try:
shortcut_sequence = CONF.get_shortcut(context, name,
plugin_name)
except (cp.NoSectionError, cp.NoOptionError):
# If shortcut does not exist, save it to CONF. This is an
# action for which there is no shortcut assigned (yet) in
# the configuration
CONF.set_shortcut(context, name, '', plugin_name)
shortcut_sequence = ''
if shortcut_sequence:
keyseq = QKeySequence(shortcut_sequence)
try:
if isinstance(qobject, QAction):
if (sys.platform == 'darwin'
and qobject._shown_shortcut == 'missing'):
qobject._shown_shortcut = keyseq
else:
qobject.setShortcut(keyseq)
if add_shortcut_to_tip:
add_shortcut_to_tooltip(qobject, context, name)
elif isinstance(qobject, QShortcut):
qobject.setKey(keyseq)
except RuntimeError:
# Object has been deleted
toberemoved.append(index)
for index in sorted(toberemoved, reverse=True):
self.shortcut_data.pop(index)
# TODO: Update plugin API to include an update shortcuts method
# See: spyder-ide/spyder#6992
if self.help:
self.help.show_intro_message()
@Slot()
def show_shortcuts_dialog(self):
from spyder.widgets.shortcutssummary import ShortcutsSummaryDialog
dlg = ShortcutsSummaryDialog(None)
dlg.exec_()
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See spyder-ide/spyder#1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""
Quit and Restart Spyder application.
If reset True it allows to reset spyder on restart.
"""
# Get start path to use in restart script
spyder_start_directory = get_module_path('spyder')
restart_script = osp.join(spyder_start_directory, 'app', 'restart.py')
# Get any initial argument passed when spyder was started
# Note: Variables defined in bootstrap.py and spyder/app/start.py
env = os.environ.copy()
bootstrap_args = env.pop('SPYDER_BOOTSTRAP_ARGS', None)
spyder_args = env.pop('SPYDER_ARGS')
# Get current process and python running spyder
pid = os.getpid()
python = sys.executable
# Check if started with bootstrap.py
if bootstrap_args is not None:
spyder_args = bootstrap_args
is_bootstrap = True
else:
is_bootstrap = False
# Pass variables as environment variables (str) to restarter subprocess
env['SPYDER_ARGS'] = spyder_args
env['SPYDER_PID'] = str(pid)
env['SPYDER_IS_BOOTSTRAP'] = str(is_bootstrap)
env['SPYDER_RESET'] = str(reset)
if DEV:
repo_dir = osp.dirname(spyder_start_directory)
if os.name == 'nt':
env['PYTHONPATH'] = ';'.join([repo_dir])
else:
env['PYTHONPATH'] = ':'.join([repo_dir])
# Build the command and popen arguments depending on the OS
if os.name == 'nt':
# Hide flashing command prompt
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
shell = False
else:
startupinfo = None
shell = True
command = '"{0}" "{1}"'
command = command.format(python, restart_script)
try:
if self.closing(True):
subprocess.Popen(command, shell=shell, env=env,
startupinfo=startupinfo)
self.console.quit()
except Exception as error:
# If there is an error with subprocess, Spyder should not quit and
# the error can be inspected in the internal console
print(error) # spyder: test-skip
print(command) # spyder: test-skip
# ---- Interactive Tours
def show_tour(self, index):
"""Show interactive tour."""
self.maximize_dockwidget(restore=True)
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global Switcher
def open_switcher(self, symbol=False):
"""Open switcher dialog box."""
if self.switcher is not None and self.switcher.isVisible():
self.switcher.clear()
self.switcher.hide()
return
if symbol:
self.switcher.set_search_text('@')
else:
self.switcher.set_search_text('')
self.switcher.setup()
self.switcher.show()
# Note: The +6 pixel on the top makes it look better
delta_top = (self.toolbars_menu.geometry().height() +
self.menuBar().geometry().height() + 6)
self.switcher.set_position(delta_top)
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_switcher(symbol=True)
def create_switcher(self):
"""Create switcher dialog instance."""
if self.switcher is None:
from spyder.widgets.switcher import Switcher
self.switcher = Switcher(self)
return self.switcher
# ---- Check for Spyder Updates
def _check_updates_ready(self):
"""Called by WorkerUpdates when ready"""
from spyder.widgets.helperwidgets import MessageCheckBox
# feedback` = False is used on startup, so only positive feedback is
# given. `feedback` = True is used when after startup (when using the
# menu action, and gives feeback if updates are, or are not found.
feedback = self.give_updates_feedback
# Get results from worker
update_available = self.worker_updates.update_available
latest_release = self.worker_updates.latest_release
error_msg = self.worker_updates.error
url_r = __project_url__ + '/releases'
url_i = 'https://docs.spyder-ide.org/installation.html'
# Define the custom QMessageBox
box = MessageCheckBox(icon=QMessageBox.Information,
parent=self)
box.setWindowTitle(_("Spyder updates"))
box.set_checkbox_text(_("Check for updates on startup"))
box.setStandardButtons(QMessageBox.Ok)
box.setDefaultButton(QMessageBox.Ok)
# Adjust the checkbox depending on the stored configuration
section, option = 'main', 'check_updates_on_startup'
check_updates = CONF.get(section, option)
box.set_checked(check_updates)
if error_msg is not None:
msg = error_msg
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
else:
if update_available:
anaconda_msg = ''
if 'Anaconda' in sys.version or 'conda-forge' in sys.version:
anaconda_msg = _("<hr><b>IMPORTANT NOTE:</b> It seems "
"that you are using Spyder with "
"<b>Anaconda/Miniconda</b>. Please "
"<b>don't</b> use <code>pip</code> to "
"update it as that will probably break "
"your installation.<br><br>"
"Instead, please wait until new conda "
"packages are available and use "
"<code>conda</code> to perform the "
"update.<hr>")
msg = _("<b>Spyder %s is available!</b> <br><br>Please use "
"your package manager to update Spyder or go to our "
"<a href=\"%s\">Releases</a> page to download this "
"new version. <br><br>If you are not sure how to "
"proceed to update Spyder please refer to our "
" <a href=\"%s\">Installation</a> instructions."
"") % (latest_release, url_r, url_i)
msg += '<br>' + anaconda_msg
box.setText(msg)
box.set_check_visible(True)
box.exec_()
check_updates = box.is_checked()
elif feedback:
msg = _("Spyder is up to date.")
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
# Update checkbox based on user interaction
CONF.set(section, option, check_updates)
# Enable check_updates_action after the thread has finished
self.check_updates_action.setDisabled(False)
# Provide feeback when clicking menu if check on startup is on
self.give_updates_feedback = True
@Slot()
def check_updates(self, startup=False):
"""
Check for spyder updates on github releases using a QThread.
"""
from spyder.workers.updates import WorkerUpdates
# Disable check_updates_action while the thread is working
self.check_updates_action.setDisabled(True)
if self.thread_updates is not None:
self.thread_updates.terminate()
self.thread_updates = QThread(self)
self.worker_updates = WorkerUpdates(self, startup=startup)
self.worker_updates.sig_ready.connect(self._check_updates_ready)
self.worker_updates.sig_ready.connect(self.thread_updates.quit)
self.worker_updates.moveToThread(self.thread_updates)
self.thread_updates.started.connect(self.worker_updates.start)
self.thread_updates.start()
# --- Main interpreter
# ------------------------------------------------------------------------
def get_main_interpreter(self):
if CONF.get('main_interpreter', 'default'):
return sys.executable
else:
return CONF.get('main_interpreter', 'custom_interpreter')
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Utilities to create the 'main' function
#==============================================================================
def initialize():
"""Initialize Qt, patching sys.exit and eventually setting up ETS"""
# This doesn't create our QApplication, just holds a reference to
# MAIN_APP, created above to show our splash screen as early as
# possible
app = qapplication()
# --- Set application icon
app.setWindowIcon(APP_ICON)
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
if PYQT5:
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
# Selecting Qt4 backend for Enthought Tool Suite (if installed)
try:
from enthought.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
except ImportError:
pass
return app
class Spy(object):
"""
Inspect Spyder internals
Attributes:
app Reference to main QApplication object
window Reference to spyder.MainWindow widget
"""
def __init__(self, app, window):
self.app = app
self.window = window
def __dir__(self):
return list(self.__dict__.keys()) +\
[x for x in dir(self.__class__) if x[0] != '_']
def versions(self):
return get_versions()
def run_spyder(app, options, args):
"""
Create and show Spyder's main window
Start QApplication event loop
"""
#TODO: insert here
# Main window
main = MainWindow(options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.exit_interpreter()
except BaseException:
pass
raise
main.show()
main.post_visible_setup()
if main.console:
namespace = CONF.get('internal_console', 'namespace', {})
main.console.start_interpreter(namespace)
main.console.set_namespace_item('spy', Spy(app=app, window=main))
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if sys.platform == "darwin":
app.sig_open_external_file.connect(main.open_external_file)
app._has_started = True
if hasattr(app, '_pending_file_open'):
if args:
args = app._pending_file_open + args
else:
args = app._pending_file_open
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main():
"""Main function"""
# **** For Pytest ****
if running_under_pytest():
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = initialize()
window = run_spyder(app, CLI_OPTIONS, None)
return window
# **** Collect command line options ****
# Note regarding Options:
# It's important to collect options before monkey patching sys.exit,
# otherwise, argparse won't be able to exit if --help option is passed
options, args = (CLI_OPTIONS, CLI_ARGS)
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set debugging info ****
setup_logging(options)
# **** Create the application ****
app = initialize()
# **** Handle other options ****
if options.reset_config_files:
# <!> Remove all configuration files!
reset_config_files()
return
elif options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults()
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Read faulthandler log file ****
faulthandler_file = get_conf_path('faulthandler.log')
previous_crash = ''
if osp.exists(faulthandler_file):
with open(faulthandler_file, 'r') as f:
previous_crash = f.read()
# Remove file to not pick it up for next time.
try:
dst = get_conf_path('faulthandler.log.old')
shutil.move(faulthandler_file, dst)
except Exception:
pass
CONF.set('main', 'previous_crash', previous_crash)
# **** Create main window ****
mainwindow = None
try:
if PY3 and options.report_segfault:
import faulthandler
with open(faulthandler_file, 'w') as f:
faulthandler.enable(file=f)
mainwindow = run_spyder(app, options, args)
else:
mainwindow = run_spyder(app, options, args)
except FontError as fontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
if mainwindow is None:
# An exception occurred
if SPLASH is not None:
SPLASH.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
put.py | from __future__ import division
from future.utils import listitems, listvalues
from builtins import str
from builtins import object
import argparse
import arvados
import arvados.collection
import base64
import copy
import datetime
import errno
import fcntl
import hashlib
import json
import logging
import os
import pwd
import re
import signal
import socket
import sys
import tempfile
import threading
import time
import traceback
from apiclient import errors as apiclient_errors
from arvados._version import __version__
import arvados.commands._util as arv_cmd
CAUGHT_SIGNALS = [signal.SIGINT, signal.SIGQUIT, signal.SIGTERM]
api_client = None
upload_opts = argparse.ArgumentParser(add_help=False)
upload_opts.add_argument('--version', action='version',
version="%s %s" % (sys.argv[0], __version__),
help='Print version and exit.')
upload_opts.add_argument('paths', metavar='path', type=str, nargs='*',
help="""
Local file or directory. Default: read from standard input.
""")
_group = upload_opts.add_mutually_exclusive_group()
_group.add_argument('--max-manifest-depth', type=int, metavar='N',
default=-1, help=argparse.SUPPRESS)
_group.add_argument('--normalize', action='store_true',
help="""
Normalize the manifest by re-ordering files and streams after writing
data.
""")
_group.add_argument('--dry-run', action='store_true', default=False,
help="""
Don't actually upload files, but only check if any file should be
uploaded. Exit with code=2 when files are pending for upload.
""")
_group = upload_opts.add_mutually_exclusive_group()
_group.add_argument('--as-stream', action='store_true', dest='stream',
help="""
Synonym for --stream.
""")
_group.add_argument('--stream', action='store_true',
help="""
Store the file content and display the resulting manifest on
stdout. Do not write the manifest to Keep or save a Collection object
in Arvados.
""")
_group.add_argument('--as-manifest', action='store_true', dest='manifest',
help="""
Synonym for --manifest.
""")
_group.add_argument('--in-manifest', action='store_true', dest='manifest',
help="""
Synonym for --manifest.
""")
_group.add_argument('--manifest', action='store_true',
help="""
Store the file data and resulting manifest in Keep, save a Collection
object in Arvados, and display the manifest locator (Collection uuid)
on stdout. This is the default behavior.
""")
_group.add_argument('--as-raw', action='store_true', dest='raw',
help="""
Synonym for --raw.
""")
_group.add_argument('--raw', action='store_true',
help="""
Store the file content and display the data block locators on stdout,
separated by commas, with a trailing newline. Do not store a
manifest.
""")
upload_opts.add_argument('--update-collection', type=str, default=None,
dest='update_collection', metavar="UUID", help="""
Update an existing collection identified by the given Arvados collection
UUID. All new local files will be uploaded.
""")
upload_opts.add_argument('--use-filename', type=str, default=None,
dest='filename', help="""
Synonym for --filename.
""")
upload_opts.add_argument('--filename', type=str, default=None,
help="""
Use the given filename in the manifest, instead of the name of the
local file. This is useful when "-" or "/dev/stdin" is given as an
input file. It can be used only if there is exactly one path given and
it is not a directory. Implies --manifest.
""")
upload_opts.add_argument('--portable-data-hash', action='store_true',
help="""
Print the portable data hash instead of the Arvados UUID for the collection
created by the upload.
""")
upload_opts.add_argument('--replication', type=int, metavar='N', default=None,
help="""
Set the replication level for the new collection: how many different
physical storage devices (e.g., disks) should have a copy of each data
block. Default is to use the server-provided default (if any) or 2.
""")
upload_opts.add_argument('--threads', type=int, metavar='N', default=None,
help="""
Set the number of upload threads to be used. Take into account that
using lots of threads will increase the RAM requirements. Default is
to use 2 threads.
On high latency installations, using a greater number will improve
overall throughput.
""")
run_opts = argparse.ArgumentParser(add_help=False)
run_opts.add_argument('--project-uuid', metavar='UUID', help="""
Store the collection in the specified project, instead of your Home
project.
""")
run_opts.add_argument('--name', help="""
Save the collection with the specified name.
""")
_group = run_opts.add_mutually_exclusive_group()
_group.add_argument('--progress', action='store_true',
help="""
Display human-readable progress on stderr (bytes and, if possible,
percentage of total data size). This is the default behavior when
stderr is a tty.
""")
_group.add_argument('--no-progress', action='store_true',
help="""
Do not display human-readable progress on stderr, even if stderr is a
tty.
""")
_group.add_argument('--batch-progress', action='store_true',
help="""
Display machine-readable progress on stderr (bytes and, if known,
total data size).
""")
_group = run_opts.add_mutually_exclusive_group()
_group.add_argument('--resume', action='store_true', default=True,
help="""
Continue interrupted uploads from cached state (default).
""")
_group.add_argument('--no-resume', action='store_false', dest='resume',
help="""
Do not continue interrupted uploads from cached state.
""")
_group = run_opts.add_mutually_exclusive_group()
_group.add_argument('--follow-links', action='store_true', default=True,
dest='follow_links', help="""
Follow file and directory symlinks (default).
""")
_group.add_argument('--no-follow-links', action='store_false', dest='follow_links',
help="""
Do not follow file and directory symlinks.
""")
_group = run_opts.add_mutually_exclusive_group()
_group.add_argument('--cache', action='store_true', dest='use_cache', default=True,
help="""
Save upload state in a cache file for resuming (default).
""")
_group.add_argument('--no-cache', action='store_false', dest='use_cache',
help="""
Do not save upload state in a cache file for resuming.
""")
arg_parser = argparse.ArgumentParser(
description='Copy data from the local filesystem to Keep.',
parents=[upload_opts, run_opts, arv_cmd.retry_opt])
def parse_arguments(arguments):
args = arg_parser.parse_args(arguments)
if len(args.paths) == 0:
args.paths = ['-']
args.paths = ["-" if x == "/dev/stdin" else x for x in args.paths]
if len(args.paths) != 1 or os.path.isdir(args.paths[0]):
if args.filename:
arg_parser.error("""
--filename argument cannot be used when storing a directory or
multiple files.
""")
# Turn on --progress by default if stderr is a tty.
if (not (args.batch_progress or args.no_progress)
and os.isatty(sys.stderr.fileno())):
args.progress = True
# Turn off --resume (default) if --no-cache is used.
if not args.use_cache:
args.resume = False
if args.paths == ['-']:
if args.update_collection:
arg_parser.error("""
--update-collection cannot be used when reading from stdin.
""")
args.resume = False
args.use_cache = False
if not args.filename:
args.filename = 'stdin'
return args
class PathDoesNotExistError(Exception):
pass
class CollectionUpdateError(Exception):
pass
class ResumeCacheConflict(Exception):
pass
class ArvPutArgumentConflict(Exception):
pass
class ArvPutUploadIsPending(Exception):
pass
class ArvPutUploadNotPending(Exception):
pass
class FileUploadList(list):
def __init__(self, dry_run=False):
list.__init__(self)
self.dry_run = dry_run
def append(self, other):
if self.dry_run:
raise ArvPutUploadIsPending()
super(FileUploadList, self).append(other)
class ResumeCache(object):
CACHE_DIR = '.cache/arvados/arv-put'
def __init__(self, file_spec):
self.cache_file = open(file_spec, 'a+')
self._lock_file(self.cache_file)
self.filename = self.cache_file.name
@classmethod
def make_path(cls, args):
md5 = hashlib.md5()
md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost').encode())
realpaths = sorted(os.path.realpath(path) for path in args.paths)
md5.update(b'\0'.join([p.encode() for p in realpaths]))
if any(os.path.isdir(path) for path in realpaths):
md5.update(b'-1')
elif args.filename:
md5.update(args.filename.encode())
return os.path.join(
arv_cmd.make_home_conf_dir(cls.CACHE_DIR, 0o700, 'raise'),
md5.hexdigest())
def _lock_file(self, fileobj):
try:
fcntl.flock(fileobj, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
raise ResumeCacheConflict("{} locked".format(fileobj.name))
def load(self):
self.cache_file.seek(0)
return json.load(self.cache_file)
def check_cache(self, api_client=None, num_retries=0):
try:
state = self.load()
locator = None
try:
if "_finished_streams" in state and len(state["_finished_streams"]) > 0:
locator = state["_finished_streams"][0][1][0]
elif "_current_stream_locators" in state and len(state["_current_stream_locators"]) > 0:
locator = state["_current_stream_locators"][0]
if locator is not None:
kc = arvados.keep.KeepClient(api_client=api_client)
kc.head(locator, num_retries=num_retries)
except Exception as e:
self.restart()
except (ValueError):
pass
def save(self, data):
try:
new_cache_fd, new_cache_name = tempfile.mkstemp(
dir=os.path.dirname(self.filename))
self._lock_file(new_cache_fd)
new_cache = os.fdopen(new_cache_fd, 'r+')
json.dump(data, new_cache)
os.rename(new_cache_name, self.filename)
except (IOError, OSError, ResumeCacheConflict) as error:
try:
os.unlink(new_cache_name)
except NameError: # mkstemp failed.
pass
else:
self.cache_file.close()
self.cache_file = new_cache
def close(self):
self.cache_file.close()
def destroy(self):
try:
os.unlink(self.filename)
except OSError as error:
if error.errno != errno.ENOENT: # That's what we wanted anyway.
raise
self.close()
def restart(self):
self.destroy()
self.__init__(self.filename)
class ArvPutUploadJob(object):
CACHE_DIR = '.cache/arvados/arv-put'
EMPTY_STATE = {
'manifest' : None, # Last saved manifest checkpoint
'files' : {} # Previous run file list: {path : {size, mtime}}
}
def __init__(self, paths, resume=True, use_cache=True, reporter=None,
bytes_expected=None, name=None, owner_uuid=None,
ensure_unique_name=False, num_retries=None,
put_threads=None, replication_desired=None,
filename=None, update_time=60.0, update_collection=None,
logger=logging.getLogger('arvados.arv_put'), dry_run=False,
follow_links=True):
self.paths = paths
self.resume = resume
self.use_cache = use_cache
self.update = False
self.reporter = reporter
self.bytes_expected = bytes_expected
self.bytes_written = 0
self.bytes_skipped = 0
self.name = name
self.owner_uuid = owner_uuid
self.ensure_unique_name = ensure_unique_name
self.num_retries = num_retries
self.replication_desired = replication_desired
self.put_threads = put_threads
self.filename = filename
self._state_lock = threading.Lock()
self._state = None # Previous run state (file list & manifest)
self._current_files = [] # Current run file list
self._cache_file = None
self._collection_lock = threading.Lock()
self._remote_collection = None # Collection being updated (if asked)
self._local_collection = None # Collection from previous run manifest
self._file_paths = set() # Files to be updated in remote collection
self._stop_checkpointer = threading.Event()
self._checkpointer = threading.Thread(target=self._update_task)
self._checkpointer.daemon = True
self._update_task_time = update_time # How many seconds wait between update runs
self._files_to_upload = FileUploadList(dry_run=dry_run)
self._upload_started = False
self.logger = logger
self.dry_run = dry_run
self._checkpoint_before_quit = True
self.follow_links = follow_links
if not self.use_cache and self.resume:
raise ArvPutArgumentConflict('resume cannot be True when use_cache is False')
# Check for obvious dry-run responses
if self.dry_run and (not self.use_cache or not self.resume):
raise ArvPutUploadIsPending()
# Load cached data if any and if needed
self._setup_state(update_collection)
def start(self, save_collection):
"""
Start supporting thread & file uploading
"""
if not self.dry_run:
self._checkpointer.start()
try:
for path in self.paths:
# Test for stdin first, in case some file named '-' exist
if path == '-':
if self.dry_run:
raise ArvPutUploadIsPending()
self._write_stdin(self.filename or 'stdin')
elif not os.path.exists(path):
raise PathDoesNotExistError("file or directory '{}' does not exist.".format(path))
elif os.path.isdir(path):
# Use absolute paths on cache index so CWD doesn't interfere
# with the caching logic.
prefixdir = path = os.path.abspath(path)
if prefixdir != '/':
prefixdir += '/'
for root, dirs, files in os.walk(path, followlinks=self.follow_links):
# Make os.walk()'s dir traversing order deterministic
dirs.sort()
files.sort()
for f in files:
self._check_file(os.path.join(root, f),
os.path.join(root[len(prefixdir):], f))
else:
self._check_file(os.path.abspath(path),
self.filename or os.path.basename(path))
# If dry-mode is on, and got up to this point, then we should notify that
# there aren't any file to upload.
if self.dry_run:
raise ArvPutUploadNotPending()
# Remove local_collection's files that don't exist locally anymore, so the
# bytes_written count is correct.
for f in self.collection_file_paths(self._local_collection,
path_prefix=""):
if f != 'stdin' and f != self.filename and not f in self._file_paths:
self._local_collection.remove(f)
# Update bytes_written from current local collection and
# report initial progress.
self._update()
# Actual file upload
self._upload_started = True # Used by the update thread to start checkpointing
self._upload_files()
except (SystemExit, Exception) as e:
self._checkpoint_before_quit = False
# Log stack trace only when Ctrl-C isn't pressed (SIGINT)
# Note: We're expecting SystemExit instead of
# KeyboardInterrupt because we have a custom signal
# handler in place that raises SystemExit with the catched
# signal's code.
if isinstance(e, PathDoesNotExistError):
# We aren't interested in the traceback for this case
pass
elif not isinstance(e, SystemExit) or e.code != -2:
self.logger.warning("Abnormal termination:\n{}".format(
traceback.format_exc()))
raise
finally:
if not self.dry_run:
# Stop the thread before doing anything else
self._stop_checkpointer.set()
self._checkpointer.join()
if self._checkpoint_before_quit:
# Commit all pending blocks & one last _update()
self._local_collection.manifest_text()
self._update(final=True)
if save_collection:
self.save_collection()
if self.use_cache:
self._cache_file.close()
def save_collection(self):
if self.update:
# Check if files should be updated on the remote collection.
for fp in self._file_paths:
remote_file = self._remote_collection.find(fp)
if not remote_file:
# File don't exist on remote collection, copy it.
self._remote_collection.copy(fp, fp, self._local_collection)
elif remote_file != self._local_collection.find(fp):
# A different file exist on remote collection, overwrite it.
self._remote_collection.copy(fp, fp, self._local_collection, overwrite=True)
else:
# The file already exist on remote collection, skip it.
pass
self._remote_collection.save(num_retries=self.num_retries)
else:
self._local_collection.save_new(
name=self.name, owner_uuid=self.owner_uuid,
ensure_unique_name=self.ensure_unique_name,
num_retries=self.num_retries)
def destroy_cache(self):
if self.use_cache:
try:
os.unlink(self._cache_filename)
except OSError as error:
# That's what we wanted anyway.
if error.errno != errno.ENOENT:
raise
self._cache_file.close()
def _collection_size(self, collection):
"""
Recursively get the total size of the collection
"""
size = 0
for item in listvalues(collection):
if isinstance(item, arvados.collection.Collection) or isinstance(item, arvados.collection.Subcollection):
size += self._collection_size(item)
else:
size += item.size()
return size
def _update_task(self):
"""
Periodically called support task. File uploading is
asynchronous so we poll status from the collection.
"""
while not self._stop_checkpointer.wait(1 if not self._upload_started else self._update_task_time):
self._update()
def _update(self, final=False):
"""
Update cached manifest text and report progress.
"""
if self._upload_started:
with self._collection_lock:
self.bytes_written = self._collection_size(self._local_collection)
if self.use_cache:
if final:
manifest = self._local_collection.manifest_text()
else:
# Get the manifest text without comitting pending blocks
manifest = self._local_collection.manifest_text(strip=False,
normalize=False,
only_committed=True)
# Update cache
with self._state_lock:
self._state['manifest'] = manifest
if self.use_cache:
try:
self._save_state()
except Exception as e:
self.logger.error("Unexpected error trying to save cache file: {}".format(e))
else:
self.bytes_written = self.bytes_skipped
# Call the reporter, if any
self.report_progress()
def report_progress(self):
if self.reporter is not None:
self.reporter(self.bytes_written, self.bytes_expected)
def _write_stdin(self, filename):
output = self._local_collection.open(filename, 'wb')
self._write(sys.stdin, output)
output.close()
def _check_file(self, source, filename):
"""
Check if this file needs to be uploaded
"""
# Ignore symlinks when requested
if (not self.follow_links) and os.path.islink(source):
return
resume_offset = 0
should_upload = False
new_file_in_cache = False
# Record file path for updating the remote collection before exiting
self._file_paths.add(filename)
with self._state_lock:
# If no previous cached data on this file, store it for an eventual
# repeated run.
if source not in self._state['files']:
self._state['files'][source] = {
'mtime': os.path.getmtime(source),
'size' : os.path.getsize(source)
}
new_file_in_cache = True
cached_file_data = self._state['files'][source]
# Check if file was already uploaded (at least partially)
file_in_local_collection = self._local_collection.find(filename)
# If not resuming, upload the full file.
if not self.resume:
should_upload = True
# New file detected from last run, upload it.
elif new_file_in_cache:
should_upload = True
# Local file didn't change from last run.
elif cached_file_data['mtime'] == os.path.getmtime(source) and cached_file_data['size'] == os.path.getsize(source):
if not file_in_local_collection:
# File not uploaded yet, upload it completely
should_upload = True
elif file_in_local_collection.permission_expired():
# Permission token expired, re-upload file. This will change whenever
# we have a API for refreshing tokens.
should_upload = True
self._local_collection.remove(filename)
elif cached_file_data['size'] == file_in_local_collection.size():
# File already there, skip it.
self.bytes_skipped += cached_file_data['size']
elif cached_file_data['size'] > file_in_local_collection.size():
# File partially uploaded, resume!
resume_offset = file_in_local_collection.size()
self.bytes_skipped += resume_offset
should_upload = True
else:
# Inconsistent cache, re-upload the file
should_upload = True
self._local_collection.remove(filename)
self.logger.warning("Uploaded version of file '{}' is bigger than local version, will re-upload it from scratch.".format(source))
# Local file differs from cached data, re-upload it.
else:
if file_in_local_collection:
self._local_collection.remove(filename)
should_upload = True
if should_upload:
self._files_to_upload.append((source, resume_offset, filename))
def _upload_files(self):
for source, resume_offset, filename in self._files_to_upload:
with open(source, 'rb') as source_fd:
with self._state_lock:
self._state['files'][source]['mtime'] = os.path.getmtime(source)
self._state['files'][source]['size'] = os.path.getsize(source)
if resume_offset > 0:
# Start upload where we left off
output = self._local_collection.open(filename, 'ab')
source_fd.seek(resume_offset)
else:
# Start from scratch
output = self._local_collection.open(filename, 'wb')
self._write(source_fd, output)
output.close(flush=False)
def _write(self, source_fd, output):
while True:
data = source_fd.read(arvados.config.KEEP_BLOCK_SIZE)
if not data:
break
output.write(data)
def _my_collection(self):
return self._remote_collection if self.update else self._local_collection
def _setup_state(self, update_collection):
"""
Create a new cache file or load a previously existing one.
"""
# Load an already existing collection for update
if update_collection and re.match(arvados.util.collection_uuid_pattern,
update_collection):
try:
self._remote_collection = arvados.collection.Collection(update_collection)
except arvados.errors.ApiError as error:
raise CollectionUpdateError("Cannot read collection {} ({})".format(update_collection, error))
else:
self.update = True
elif update_collection:
# Collection locator provided, but unknown format
raise CollectionUpdateError("Collection locator unknown: '{}'".format(update_collection))
if self.use_cache:
# Set up cache file name from input paths.
md5 = hashlib.md5()
md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost').encode())
realpaths = sorted(os.path.realpath(path) for path in self.paths)
md5.update(b'\0'.join([p.encode() for p in realpaths]))
if self.filename:
md5.update(self.filename.encode())
cache_filename = md5.hexdigest()
cache_filepath = os.path.join(
arv_cmd.make_home_conf_dir(self.CACHE_DIR, 0o700, 'raise'),
cache_filename)
if self.resume and os.path.exists(cache_filepath):
self.logger.info("Resuming upload from cache file {}".format(cache_filepath))
self._cache_file = open(cache_filepath, 'a+')
else:
# --no-resume means start with a empty cache file.
self.logger.info("Creating new cache file at {}".format(cache_filepath))
self._cache_file = open(cache_filepath, 'w+')
self._cache_filename = self._cache_file.name
self._lock_file(self._cache_file)
self._cache_file.seek(0)
with self._state_lock:
if self.use_cache:
try:
self._state = json.load(self._cache_file)
if not set(['manifest', 'files']).issubset(set(self._state.keys())):
# Cache at least partially incomplete, set up new cache
self._state = copy.deepcopy(self.EMPTY_STATE)
except ValueError:
# Cache file empty, set up new cache
self._state = copy.deepcopy(self.EMPTY_STATE)
else:
self.logger.info("No cache usage requested for this run.")
# No cache file, set empty state
self._state = copy.deepcopy(self.EMPTY_STATE)
# Load the previous manifest so we can check if files were modified remotely.
self._local_collection = arvados.collection.Collection(self._state['manifest'], replication_desired=self.replication_desired, put_threads=self.put_threads)
def collection_file_paths(self, col, path_prefix='.'):
"""Return a list of file paths by recursively go through the entire collection `col`"""
file_paths = []
for name, item in listitems(col):
if isinstance(item, arvados.arvfile.ArvadosFile):
file_paths.append(os.path.join(path_prefix, name))
elif isinstance(item, arvados.collection.Subcollection):
new_prefix = os.path.join(path_prefix, name)
file_paths += self.collection_file_paths(item, path_prefix=new_prefix)
return file_paths
def _lock_file(self, fileobj):
try:
fcntl.flock(fileobj, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
raise ResumeCacheConflict("{} locked".format(fileobj.name))
def _save_state(self):
"""
Atomically save current state into cache.
"""
with self._state_lock:
# We're not using copy.deepcopy() here because it's a lot slower
# than json.dumps(), and we're already needing JSON format to be
# saved on disk.
state = json.dumps(self._state)
try:
new_cache = tempfile.NamedTemporaryFile(
mode='w+',
dir=os.path.dirname(self._cache_filename), delete=False)
self._lock_file(new_cache)
new_cache.write(state)
new_cache.flush()
os.fsync(new_cache)
os.rename(new_cache.name, self._cache_filename)
except (IOError, OSError, ResumeCacheConflict) as error:
self.logger.error("There was a problem while saving the cache file: {}".format(error))
try:
os.unlink(new_cache_name)
except NameError: # mkstemp failed.
pass
else:
self._cache_file.close()
self._cache_file = new_cache
def collection_name(self):
return self._my_collection().api_response()['name'] if self._my_collection().api_response() else None
def manifest_locator(self):
return self._my_collection().manifest_locator()
def portable_data_hash(self):
pdh = self._my_collection().portable_data_hash()
m = self._my_collection().stripped_manifest().encode()
local_pdh = '{}+{}'.format(hashlib.md5(m).hexdigest(), len(m))
if pdh != local_pdh:
logger.warning("\n".join([
"arv-put: API server provided PDH differs from local manifest.",
" This should not happen; showing API server version."]))
return pdh
def manifest_text(self, stream_name=".", strip=False, normalize=False):
return self._my_collection().manifest_text(stream_name, strip, normalize)
def _datablocks_on_item(self, item):
"""
Return a list of datablock locators, recursively navigating
through subcollections
"""
if isinstance(item, arvados.arvfile.ArvadosFile):
if item.size() == 0:
# Empty file locator
return ["d41d8cd98f00b204e9800998ecf8427e+0"]
else:
locators = []
for segment in item.segments():
loc = segment.locator
locators.append(loc)
return locators
elif isinstance(item, arvados.collection.Collection):
l = [self._datablocks_on_item(x) for x in listvalues(item)]
# Fast list flattener method taken from:
# http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python
return [loc for sublist in l for loc in sublist]
else:
return None
def data_locators(self):
with self._collection_lock:
# Make sure all datablocks are flushed before getting the locators
self._my_collection().manifest_text()
datablocks = self._datablocks_on_item(self._my_collection())
return datablocks
def expected_bytes_for(pathlist, follow_links=True):
# Walk the given directory trees and stat files, adding up file sizes,
# so we can display progress as percent
bytesum = 0
for path in pathlist:
if os.path.isdir(path):
for root, dirs, files in os.walk(path, followlinks=follow_links):
# Sum file sizes
for f in files:
filepath = os.path.join(root, f)
# Ignore symlinked files when requested
if (not follow_links) and os.path.islink(filepath):
continue
bytesum += os.path.getsize(filepath)
elif not os.path.isfile(path):
return None
else:
bytesum += os.path.getsize(path)
return bytesum
_machine_format = "{} {}: {{}} written {{}} total\n".format(sys.argv[0],
os.getpid())
def machine_progress(bytes_written, bytes_expected):
return _machine_format.format(
bytes_written, -1 if (bytes_expected is None) else bytes_expected)
def human_progress(bytes_written, bytes_expected):
if bytes_expected:
return "\r{}M / {}M {:.1%} ".format(
bytes_written >> 20, bytes_expected >> 20,
float(bytes_written) / bytes_expected)
else:
return "\r{} ".format(bytes_written)
def progress_writer(progress_func, outfile=sys.stderr):
def write_progress(bytes_written, bytes_expected):
outfile.write(progress_func(bytes_written, bytes_expected))
return write_progress
def exit_signal_handler(sigcode, frame):
sys.exit(-sigcode)
def desired_project_uuid(api_client, project_uuid, num_retries):
if not project_uuid:
query = api_client.users().current()
elif arvados.util.user_uuid_pattern.match(project_uuid):
query = api_client.users().get(uuid=project_uuid)
elif arvados.util.group_uuid_pattern.match(project_uuid):
query = api_client.groups().get(uuid=project_uuid)
else:
raise ValueError("Not a valid project UUID: {}".format(project_uuid))
return query.execute(num_retries=num_retries)['uuid']
def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
global api_client
logger = logging.getLogger('arvados.arv_put')
logger.setLevel(logging.INFO)
args = parse_arguments(arguments)
status = 0
if api_client is None:
api_client = arvados.api('v1')
# Determine the name to use
if args.name:
if args.stream or args.raw:
logger.error("Cannot use --name with --stream or --raw")
sys.exit(1)
elif args.update_collection:
logger.error("Cannot use --name with --update-collection")
sys.exit(1)
collection_name = args.name
else:
collection_name = "Saved at {} by {}@{}".format(
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC"),
pwd.getpwuid(os.getuid()).pw_name,
socket.gethostname())
if args.project_uuid and (args.stream or args.raw):
logger.error("Cannot use --project-uuid with --stream or --raw")
sys.exit(1)
# Determine the parent project
try:
project_uuid = desired_project_uuid(api_client, args.project_uuid,
args.retries)
except (apiclient_errors.Error, ValueError) as error:
logger.error(error)
sys.exit(1)
if args.progress:
reporter = progress_writer(human_progress)
elif args.batch_progress:
reporter = progress_writer(machine_progress)
else:
reporter = None
# If this is used by a human, and there's at least one directory to be
# uploaded, the expected bytes calculation can take a moment.
if args.progress and any([os.path.isdir(f) for f in args.paths]):
logger.info("Calculating upload size, this could take some time...")
bytes_expected = expected_bytes_for(args.paths, follow_links=args.follow_links)
try:
writer = ArvPutUploadJob(paths = args.paths,
resume = args.resume,
use_cache = args.use_cache,
filename = args.filename,
reporter = reporter,
bytes_expected = bytes_expected,
num_retries = args.retries,
replication_desired = args.replication,
put_threads = args.threads,
name = collection_name,
owner_uuid = project_uuid,
ensure_unique_name = True,
update_collection = args.update_collection,
logger=logger,
dry_run=args.dry_run,
follow_links=args.follow_links)
except ResumeCacheConflict:
logger.error("\n".join([
"arv-put: Another process is already uploading this data.",
" Use --no-cache if this is really what you want."]))
sys.exit(1)
except CollectionUpdateError as error:
logger.error("\n".join([
"arv-put: %s" % str(error)]))
sys.exit(1)
except ArvPutUploadIsPending:
# Dry run check successful, return proper exit code.
sys.exit(2)
except ArvPutUploadNotPending:
# No files pending for upload
sys.exit(0)
# Install our signal handler for each code in CAUGHT_SIGNALS, and save
# the originals.
orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler)
for sigcode in CAUGHT_SIGNALS}
if not args.dry_run and not args.update_collection and args.resume and writer.bytes_written > 0:
logger.warning("\n".join([
"arv-put: Resuming previous upload from last checkpoint.",
" Use the --no-resume option to start over."]))
if not args.dry_run:
writer.report_progress()
output = None
try:
writer.start(save_collection=not(args.stream or args.raw))
except arvados.errors.ApiError as error:
logger.error("\n".join([
"arv-put: %s" % str(error)]))
sys.exit(1)
except ArvPutUploadIsPending:
# Dry run check successful, return proper exit code.
sys.exit(2)
except ArvPutUploadNotPending:
# No files pending for upload
sys.exit(0)
except PathDoesNotExistError as error:
logger.error("\n".join([
"arv-put: %s" % str(error)]))
sys.exit(1)
if args.progress: # Print newline to split stderr from stdout for humans.
logger.info("\n")
if args.stream:
if args.normalize:
output = writer.manifest_text(normalize=True)
else:
output = writer.manifest_text()
elif args.raw:
output = ','.join(writer.data_locators())
else:
try:
if args.update_collection:
logger.info("Collection updated: '{}'".format(writer.collection_name()))
else:
logger.info("Collection saved as '{}'".format(writer.collection_name()))
if args.portable_data_hash:
output = writer.portable_data_hash()
else:
output = writer.manifest_locator()
except apiclient_errors.Error as error:
logger.error(
"arv-put: Error creating Collection on project: {}.".format(
error))
status = 1
# Print the locator (uuid) of the new collection.
if output is None:
status = status or 1
else:
stdout.write(output)
if not output.endswith('\n'):
stdout.write('\n')
for sigcode, orig_handler in listitems(orig_signal_handlers):
signal.signal(sigcode, orig_handler)
if status != 0:
sys.exit(status)
# Success!
return output
if __name__ == '__main__':
main()
|
auto_posting_thread.py | from app import app, helpers
from app.scheduledb import ScheduleDB
from app.scheduleCreator import create_schedule_text
from app import vkapi
from datetime import datetime, timedelta
import threading
from time import sleep
def auto_posting(current_time):
today = datetime.now()
week_type = (today.isocalendar()[1] + 1) % 2
if datetime.weekday(today) == 6:
today += timedelta(days=1)
week_type = (week_type + 1) % 2
day = [helpers.daysOfWeek[datetime.weekday(today)]]
# Выборка пользователей из базы у которых установлена отправка расписния на текущий день
with ScheduleDB(app.config) as db:
users = db.find_users_where(auto_posting_time=current_time, is_today=True)
if users is None:
return None
try:
count = 0
for user in users:
uid = user[0]
tag = user[1]
schedule = create_schedule_text(tag, day[0], week_type)
vkapi.send_message(uid, app.config['TOKEN'], schedule)
count += 1
if count > 20:
sleep(1)
# Логирование
except BaseException as e:
pass
# Выборка пользователей из базы у которых установлена отправка расписния на завтрашний день,
# если сегодня воскресенье, то расписание будет отправляться на понедельник.
if datetime.weekday(datetime.now()) != 6:
today += timedelta(days=1)
day = [helpers.daysOfWeek[datetime.weekday(today)]]
with ScheduleDB(app.config) as db:
users = db.find_users_where(auto_posting_time=current_time, is_today=False)
if users is None:
return None
try:
count = 0
for user in users:
uid = user[0]
tag = user[1]
schedule = create_schedule_text(tag, day[0], week_type)
vkapi.send_message(uid, app.config['TOKEN'], schedule)
count += 1
if count > 20:
sleep(1)
# Логирование
except BaseException as e:
pass
if __name__ == "__main__":
while True:
threading.Thread(target=auto_posting(datetime.now().time().strftime("%H:%M:00"))).start()
# Вычисляем разницу в секундах, между началом минуты и временем завершения потока
time_delta = datetime.now() - datetime.now().replace(second=0, microsecond=0)
# Поток засыпает на время равное количеству секунд до следующей минуты
sleep(60 - time_delta.seconds)
|
client_back.py | #!/usr/bin/env python3
"""
RSM Client
"""
__author__ = "Bruno Chianca Ferreira"
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Bruno Chianca Ferreira"
__email__ = "brunobcf@gmail.com"
import socket, random, sys, json, traceback, zlib, fcntl, time, threading, pickle, argparse, os, statistics, datetime
from apscheduler.schedulers.background import BackgroundScheduler
from classes.network import network_sockets
class RSMClient:
def __init__(self, endpoint, port, command, args, times, clients):
'Initializes the properties of the Node object'
self.name = "RSMClient"
self.max_packet = 65535 #max packet size to listen
self.endpoint = endpoint
self.port = port
self.command = command
self.keyvalue = args
self.read_results = [0,0]
self.read_timings = []
self.threads = []
self.times = times
self.size = 0
if self.command == 'READ':
for i in range(clients):
client_thread = threading.Thread(target=self.read_thread, args=())
client_thread.start()
self.threads.append(client_thread)
#self.read_thread()
if self.command == 'WRITE':
for i in range(clients):
write_thread = threading.Thread(target=self.write_thread, args=())
write_thread.start()
self.threads.append(write_thread)
for thread in self.threads:
thread.join()
self.print_results()
self.export_results()
#### NODE ###############################################################################
# END OF DEFAULT SETTINGS ###########################################################
def read_thread(self):
for i in range(self.times):
try:
start = time.monotonic_ns()
response = self._read(self.keyvalue[0])
end = time.monotonic_ns()
total = (end - start) / 1000000
self.read_timings.append(total)
response = pickle.loads(response)
#print(str(response) + " -> " + str(total) + " ms")
if response[0] == 'OK':
self.read_results[0] +=1
self.size = sys.getsizeof(response[1])
else:
self.read_results[1] +=1
except:
traceback.print_exc()
self.read_results[1] +=1
def write_thread(self):
for i in range(self.times):
try:
start = time.monotonic_ns()
response = self._write(self.keyvalue[0],self.keyvalue[1])
end = time.monotonic_ns()
total = (end - start) / 1000000
self.read_timings.append(total)
response = pickle.loads(response)
#print(str(response) + " -> " + str(total) + " ms")
if response[0] == 'OK':
self.read_results[0] +=1
else:
self.read_results[1] +=1
except:
traceback.print_exc()
self.read_results[1] +=1
def who_is_leader(self):
pass
def print_results(self):
print("Results")
print("####################### Reads ##################################33")
print()
print("Succesful: " + str(self.read_results[0]))
print("Failed: " + str(self.read_results[1]))
print("Mean latency: " + str(statistics.mean(self.read_timings)) + " ms")
print("Median latency: " + str(statistics.median(self.read_timings)) + " ms")
print("Std dev latency: " + str(statistics.stdev(self.read_timings)) + " ms")
print("Max latency: " + str(max(self.read_timings)) + " ms")
print("Min latency: " + str(min(self.read_timings)) + " ms")
print("Throughput: " + str(self.times / (sum(self.read_timings) / 1000)) + " ops/s")
def export_results(self):
if os.path.isfile("results.csv"):
export_file = open("results.csv","a")
else:
export_file = open("results.csv","w")
export_file.write('datetime;repetitions;size;mean latency;median latency;std dev latency; max latency; min latency; throughput\n') #datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
export_file.write(str(datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")) )
export_file.write(";" + str(self.times) )
export_file.write(";" + str(self.size) )
export_file.write(";" + str(statistics.mean(self.read_timings)) )
export_file.write(";" + str(statistics.median(self.read_timings)) )
export_file.write(";" + str(statistics.stdev(self.read_timings)) )
export_file.write(";" + str(max(self.read_timings)) )
export_file.write(";" + str(min(self.read_timings)) )
export_file.write(";" + str(self.times / (sum(self.read_timings) / 1000)) + "\n")
export_file.flush()
export_file.close()
def _create_id(self):
return zlib.crc32((str(time.time() * 1000)+ str('client') + str(random.randint(0,10000))).encode())
###############################################################################################
def _write(self, key, value):
bytes_to_send = pickle.dumps(['WRITE' , key, value])
response = network_sockets.TcpPersistent.send(self, self.endpoint, bytes_to_send, self._create_id())
return response
def _read(self, key):
bytes_to_send = pickle.dumps(['READ' , key])
response = network_sockets.TcpPersistent.send(self, self.endpoint, bytes_to_send, self._create_id())
return response
if __name__ == '__main__': #for main run the main function. This is only run when this main python file is called, not when imported as a class
try:
print("Genesis v." + __version__ + " - RSM client")
print()
parser = argparse.ArgumentParser(description='Some arguments are obligatory and must follow the correct order as indicated')
parser.add_argument("command", help="Command: read of write", choices=['read', 'write'])
parser.add_argument("key", help="New application name", nargs='?')
parser.add_argument("value", help="New application name", nargs='?')
parser.add_argument("-e", "--endpoint", type=str, help="End-point to connect", default="localhost")
parser.add_argument("-p", "--port", type=int, help="Communication port", default=56444)
parser.add_argument("-t", "--times", type=int, help="Repetitions", default=1)
parser.add_argument("-c", "--clients", type=int, help="Clients", default=1)
args = parser.parse_args()
if args.command.upper() == 'READ':
if args.key == None:
print('Missing key to read.')
sys.exit(0)
else:
key = args.key
value = 0
if args.command.upper() == 'WRITE':
if args.key == None:
print('Missing key to write.')
sys.exit(0)
else:
key = args.key
if args.value == None:
print('Missing value to write')
sys.exit(0)
else:
value = args.value
#print(args)
client = RSMClient(args.endpoint, args.port, args.command.upper(), [key,value], args.times, args.clients)
#############################################################################
except KeyboardInterrupt:
print("Interrupted by ctrl+c")
except:
traceback.print_exc()
|
test_api.py | import mock
import re
import socket
import threading
import time
import warnings
from unittest import TestCase
import pytest
from tests.test_tracer import get_dummy_tracer
from ddtrace.api import API, Response
from ddtrace.compat import iteritems, httplib, PY3
from ddtrace.internal.runtime.container import CGroupInfo
from ddtrace.vendor.six.moves import BaseHTTPServer, socketserver
class _BaseHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
error_message_format = '%(message)s\n'
error_content_type = 'text/plain'
@staticmethod
def log_message(format, *args): # noqa: A002
pass
class _APIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
self.send_error(200, 'OK')
class _TimeoutAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
# This server sleeps longer than our timeout
time.sleep(5)
class _ResetAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
return
_HOST = '0.0.0.0'
_TIMEOUT_PORT = 8743
_RESET_PORT = _TIMEOUT_PORT + 1
class UDSHTTPServer(socketserver.UnixStreamServer, BaseHTTPServer.HTTPServer):
def server_bind(self):
BaseHTTPServer.HTTPServer.server_bind(self)
def _make_uds_server(path, request_handler):
server = UDSHTTPServer(path, request_handler)
t = threading.Thread(target=server.serve_forever)
# Set daemon just in case something fails
t.daemon = True
t.start()
return server, t
@pytest.fixture
def endpoint_uds_server(tmp_path):
server, thread = _make_uds_server(str(tmp_path / 'uds_server_socket'), _APIEndpointRequestHandlerTest)
try:
yield server
finally:
server.shutdown()
thread.join()
def _make_server(port, request_handler):
server = BaseHTTPServer.HTTPServer((_HOST, port), request_handler)
t = threading.Thread(target=server.serve_forever)
# Set daemon just in case something fails
t.daemon = True
t.start()
return server, t
@pytest.fixture(scope='module')
def endpoint_test_timeout_server():
server, thread = _make_server(_TIMEOUT_PORT, _TimeoutAPIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
@pytest.fixture(scope='module')
def endpoint_test_reset_server():
server, thread = _make_server(_RESET_PORT, _ResetAPIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
class ResponseMock:
def __init__(self, content, status=200):
self.status = status
self.content = content
def read(self):
return self.content
def test_api_str():
api = API('localhost', 8126)
assert str(api) == 'localhost:8126'
api = API('localhost', 8126, '/path/to/uds')
assert str(api) == '/path/to/uds'
class APITests(TestCase):
def setUp(self):
# DEV: Mock here instead of in tests, before we have patched `httplib.HTTPConnection`
self.conn = mock.MagicMock(spec=httplib.HTTPConnection)
self.api = API('localhost', 8126)
def tearDown(self):
del self.api
del self.conn
def test_typecast_port(self):
api = API('localhost', u'8126')
self.assertEqual(api.port, 8126)
@mock.patch('logging.Logger.debug')
def test_parse_response_json(self, log):
tracer = get_dummy_tracer()
tracer.debug_logging = True
test_cases = {
'OK': dict(
js=None,
log='Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date',
),
'OK\n': dict(
js=None,
log='Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date',
),
'error:unsupported-endpoint': dict(
js=None,
log='Unable to parse Datadog Agent JSON response: .*? \'error:unsupported-endpoint\'',
),
42: dict( # int as key to trigger TypeError
js=None,
log='Unable to parse Datadog Agent JSON response: .*? 42',
),
'{}': dict(js={}),
'[]': dict(js=[]),
# Priority sampling "rate_by_service" response
('{"rate_by_service": '
'{"service:,env:":0.5, "service:mcnulty,env:test":0.9, "service:postgres,env:test":0.6}}'): dict(
js=dict(
rate_by_service={
'service:,env:': 0.5,
'service:mcnulty,env:test': 0.9,
'service:postgres,env:test': 0.6,
},
),
),
' [4,2,1] ': dict(js=[4, 2, 1]),
}
for k, v in iteritems(test_cases):
log.reset_mock()
r = Response.from_http_response(ResponseMock(k))
js = r.get_json()
assert v['js'] == js
if 'log' in v:
log.assert_called_once()
msg = log.call_args[0][0] % log.call_args[0][1:]
assert re.match(v['log'], msg), msg
@mock.patch('ddtrace.compat.httplib.HTTPConnection')
def test_put_connection_close(self, HTTPConnection):
"""
When calling API._put
we close the HTTPConnection we create
"""
HTTPConnection.return_value = self.conn
with warnings.catch_warnings(record=True) as w:
self.api._put('/test', '<test data>', 1)
self.assertEqual(len(w), 0, 'Test raised unexpected warnings: {0!r}'.format(w))
self.conn.request.assert_called_once()
self.conn.close.assert_called_once()
@mock.patch('ddtrace.compat.httplib.HTTPConnection')
def test_put_connection_close_exception(self, HTTPConnection):
"""
When calling API._put raises an exception
we close the HTTPConnection we create
"""
HTTPConnection.return_value = self.conn
# Ensure calling `request` raises an exception
self.conn.request.side_effect = Exception
with warnings.catch_warnings(record=True) as w:
with self.assertRaises(Exception):
self.api._put('/test', '<test data>', 1)
self.assertEqual(len(w), 0, 'Test raised unexpected warnings: {0!r}'.format(w))
self.conn.request.assert_called_once()
self.conn.close.assert_called_once()
def test_flush_connection_timeout_connect():
payload = mock.Mock()
payload.get_payload.return_value = 'foobar'
payload.length = 12
api = API(_HOST, 2019)
response = api._flush(payload)
if PY3:
assert isinstance(response, (OSError, ConnectionRefusedError)) # noqa: F821
else:
assert isinstance(response, socket.error)
assert response.errno in (99, 111)
def test_flush_connection_timeout(endpoint_test_timeout_server):
payload = mock.Mock()
payload.get_payload.return_value = 'foobar'
payload.length = 12
api = API(_HOST, _TIMEOUT_PORT)
response = api._flush(payload)
assert isinstance(response, socket.timeout)
def test_flush_connection_reset(endpoint_test_reset_server):
payload = mock.Mock()
payload.get_payload.return_value = 'foobar'
payload.length = 12
api = API(_HOST, _RESET_PORT)
response = api._flush(payload)
if PY3:
assert isinstance(response, (httplib.BadStatusLine, ConnectionResetError)) # noqa: F821
else:
assert isinstance(response, httplib.BadStatusLine)
def test_flush_connection_uds(endpoint_uds_server):
payload = mock.Mock()
payload.get_payload.return_value = 'foobar'
payload.length = 12
api = API(_HOST, 2019, uds_path=endpoint_uds_server.server_address)
response = api._flush(payload)
assert response.status == 200
@mock.patch('ddtrace.internal.runtime.container.get_container_info')
def test_api_container_info(get_container_info):
# When we have container information
# DEV: `get_container_info` will return a `CGroupInfo` with a `container_id` or `None`
info = CGroupInfo(container_id='test-container-id')
get_container_info.return_value = info
api = API(_HOST, 8126)
assert api._container_info is info
assert api._headers['Datadog-Container-Id'] == 'test-container-id'
# When we do not have container information
get_container_info.return_value = None
api = API(_HOST, 8126)
assert api._container_info is None
assert 'Datadog-Container-Id' not in api._headers
|
SVM-Process.py | import multiprocessing as mp
import time
import numpy as np
from sklearn import svm
def cvkfold(X, y, tuning_params, partitions, k):
n_tuning_params = tuning_params.shape[0]
partition = partitions[k]
Train = np.delete(np.arange(0, X.shape[0]), partition)
Test = partition
X_train = X[Train, :]
y_train = y[Train]
X_test = X[Test, :]
y_test = y[Test]
accuracies = np.zeros(n_tuning_params)
for i in range(0, n_tuning_params):
svc = svm.SVC(C = tuning_params[i], kernel = "linear")
accuracies[i] = svc.fit(X_train, y_train).score(X_test, y_test)
return accuracies
def log_results(X, y, tuning_params, partitions, k, arr):
Accuracies = cvkfold(X, y, tuning_params, partitions, k)
arr.append(Accuracies)
def process_processing(time_dict):
print("Running the Process Paralleize function. ")
with mp.Manager() as manager:
res_vals = manager.list()
st = time.time()
processes = []
test = np.loadtxt("optdigits.txt", delimiter=",")
X = test[:, 0:64]
y = test[:, 64]
K = 4
tuning_params = np.logspace(-6, -1, 10)
partitions = np.array_split(np.random.permutation([i for i in range(0, X.shape[0])]), 16)
for k in range(0, 4):
p = mp.Process(target=log_results, args=(X, y, tuning_params, partitions, 16, res_vals))
p.start()
processes.append(p)
for p in processes:
p.join()
results = res_vals
time_taken = time.time() - st
print(f'Process runs for {(time.time() - st)} seconds.')
final_results = []
for i in results:
mean_accuracy = np.mean(i)
final_results.append(mean_accuracy)
max_acc_folds = np.argmax(final_results)
max_acc = final_results[np.argmax(final_results)]
# print('CV max accuracy %0.6f.' % (max(CV_accuracy)))
print(f'The max CV accuracy was for {max_acc_folds + 1} folds and the accuracy was {max_acc}')
best_tuning_param = tuning_params[np.argmax(results[max_acc_folds])]
print('Best tuning param %0.6f.' % best_tuning_param)
time_dict["Process"] = time_taken
print("###########################################################################################################")
return time_dict
time_dict = {}
print(process_processing(time_dict)) |
mid_personxxx.py | #-------------------------------------#
# 调用摄像头或者视频进行检测
# 调用摄像头直接运行即可
# 调用视频可以将cv2.VideoCapture()指定路径
# 视频的保存并不难,可以百度一下看看
#-------------------------------------#
import threading
import rospy
from sensor_msgs.msg import CameraInfo, CompressedImage
from cv_bridge import CvBridge, CvBridgeError
from std_msgs.msg import Int64
import time
import os
import sys
import cv2
import numpy as np
from PIL import Image
from yolo import YOLO
yolo = YOLO()
bridge = CvBridge()
def send_right():
rospy.Subscriber('/mid_camera/color/image_raw/compressed', CompressedImage, ReceiveVideo_right)
rospy.spin()
def ReceiveVideo_right(data):
global cv_image
# print(1)
cv_image = bridge.compressed_imgmsg_to_cv2(data, 'bgr8')
# print(cv_image.shape)
# cv2.imshow('right', cv_image)
# k = cv2.waitKey(10)
# if k ==27: # 键盘上Esc键的键值
# cv2.destroyAllWindows()
# cv2.imshow('image', cv_image)
def main():
global cv_image, command_vel_pub_m
time.sleep(4)
fps = 0
while(True):
t1 = time.time()
# 读取某一帧
frame = cv2.cvtColor(cv_image,cv2.COLOR_BGR2RGB)
# 转变成Image
frame = Image.fromarray(np.uint8(frame))
# 进行检测
frame, bbox_list, label_list = yolo.detect_image(frame)
frame = np.array(frame)
# RGBtoBGR满足opencv显示格式
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)
fps = ( fps + (1./(time.time()-t1)) ) / 2
print("fps= %.2f"%(fps))
frame = cv2.putText(frame, "fps= %.2f"%(fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
print(frame.shape)
cv2.imshow("video",frame)
c= cv2.waitKey(1) & 0xff
if c==27:
break
if type(label_list) != int: # 没检测到物体的时候,bbox_list和label_list为1
num_of_obj = len(label_list)
#print('num_of_object:', num_of_obj)
#确定跟踪物体与图像中点的相对坐标
for i in range(num_of_obj):
if 'person' in label_list[i]:
object_center = (bbox_list[i][1]+bbox_list[i][3])*0.5
print('object_center: ',object_center)
if(320-50<object_center<320+50):
print('midmidmijdmimdasdadasdasdasimafafafegfagsergerdi')
command_STOP_pub_m.publish(1)
#motor1_move()
# elif 'dog' or 'cat' in label_list[i]:
# print("wangwang miaomiao")
# pass
else:
print('yolo未识别到任何物体')
pass
if __name__ == '__main__':
t_send_right = threading.Thread(target = send_right)
t_send_right.start()
t_main = threading.Thread(target=main)
t_main.start() |
rotate_data.py | import numpy as np
from scipy.spatial import cKDTree
from multiprocessing import Process, cpu_count, Queue, freeze_support, Array # , current_process, Array
import ctypes
import os, psutil, sys
def memlog(msg):
process = psutil.Process(os.getpid())
print('%s, RAM=%.4g GB'%(msg,process.memory_info()[0]/2.**30))
def rotate_data(pos, axis, vel=None, bvel=None):
r""" rotate the data points (3d) to a given direction. Returns data points (2d) at line of sight.
Parameter:
----------
pos : input data points in 3D.
axis : can be 'x', 'y', 'z' (must be 2D), or a list or numpy array of degrees
[alpha, beta, gamma], which will rotate the data points by $\alpha$ around
the x-axis, $\beta$ around the y-axis, and $\gamma$ around the z-axis.
or a numpy array with the rotation matrix directly, must be 3x3 matrix.
vel : 3D velocity of the input data points. Default: None, will return an empty list.
Otherwise, rotate_data will also return the velocity in the axis direction.
bvel : bulk velocity of the cluster in 3D, defualt None, resturn 0 for the bulk velocity
along line of sight. If it is not None, bulk velocity along line of sight will be return.
Notes:
--------
When you have vel is not None, the function will return two arrays: pos, vel in axis direction.
This function does not work with yt data currrently.
"""
nvel = []; nbvel = 0;
if isinstance(axis, type('')):
npos = np.copy(pos)
if axis.lower() == 'y': # x-z plane
npos[:, 1] = pos[:, 2]
npos[:, 2] = pos[:, 1]
if vel is not None:
nvel = vel[:, 1]
if bvel is not None:
nbvel = bvel[1]
elif axis.lower() == 'x': # y - z plane
npos[:, 0] = pos[:, 1]
npos[:, 1] = pos[:, 2]
npos[:, 2] = pos[:, 0]
if vel is not None:
nvel = vel[:, 0]
if bvel is not None:
nbvel = bvel[0]
elif axis.lower() == 'z':
if vel is not None:
nvel = vel[:, 2]
if bvel is not None:
nbvel = bvel[2]
else:
raise ValueError("Do not accept this value %s for projection" % axis)
elif isinstance(axis, type([])):
if len(axis) == 3:
sa, ca = np.sin(axis[0] / 180. *
np.pi), np.cos(axis[0] / 180. * np.pi)
sb, cb = np.sin(axis[1] / 180. *
np.pi), np.cos(axis[1] / 180. * np.pi)
sg, cg = np.sin(axis[2] / 180. *
np.pi), np.cos(axis[2] / 180. * np.pi)
# ratation matrix from
# http://inside.mines.edu/fs_home/gmurray/ArbitraryAxisRotation/
Rxyz = np.array(
[[cb * cg, cg * sa * sb - ca * sg, ca * cg * sb + sa * sg],
[cb * sg, ca * cg + sa * sb * sg, ca * sb * sg - cg * sa],
[-sb, cb * sa, ca * cb]], dtype=np.float64)
npos = np.dot(pos, Rxyz)
if vel is not None:
nvel = np.dot(vel, Rxyz)[:, 2]
if bvel is not None:
nbvel = np.dot(bvel, Rxyz)[2]
else:
raise ValueError("Do not accept this value %s for projection" % axis)
elif isinstance(axis, type(np.array([]))):
if len(axis.shape) == 1:
sa, ca = np.sin(axis[0] / 180. *
np.pi), np.cos(axis[0] / 180. * np.pi)
sb, cb = np.sin(axis[1] / 180. *
np.pi), np.cos(axis[1] / 180. * np.pi)
sg, cg = np.sin(axis[2] / 180. *
np.pi), np.cos(axis[2] / 180. * np.pi)
# ratation matrix from
# http://inside.mines.edu/fs_home/gmurray/ArbitraryAxisRotation/
Rxyz = np.array(
[[cb * cg, cg * sa * sb - ca * sg, ca * cg * sb + sa * sg],
[cb * sg, ca * cg + sa * sb * sg, ca * sb * sg - cg * sa],
[-sb, cb * sa, ca * cb]], dtype=np.float64)
npos = np.dot(pos, Rxyz)
if vel is not None:
nvel = np.dot(vel, Rxyz)[:, 2]
if bvel is not None:
nbvel = np.dot(bvel, Rxyz)[2]
elif len(axis.shape) == 2:
if axis.shape[0] == axis.shape[1] == 3:
npos = np.dot(pos, axis)
if vel is not None:
nvel = np.dot(vel, axis)[:, 2]
if bvel is not None:
nbvel = np.dot(bvel, axis)[2]
else:
raise ValueError("Axis shape is not 3x3: ", axis.shape)
else:
raise ValueError("Do not accept this shape of axis %s for projection!" % axis)
else:
raise ValueError("Do not accept this value %s for projection!" % axis)
return npos, nvel, nbvel
# For SPH kernels, we always use the total weights = 1,
# thus the constant C can be ignored
def sph_kernel_cubic(x):
# C = 2.5464790894703255
# if x <= 0.5:
# kernel = 1.-6.*x*x*(1.-x)
# elif x > 0.5 and x <= 1.0:
# kernel = 2.*(1.-x)*(1.-x)*(1.-x)
# else:
# kernel = 0.
# return kernel * C
kernel = np.zeros(x.size, dtype=float)
ids = x <= 0.5
kernel[ids] = 1. - 6. * x[ids] * x[ids] * (1. - x[ids])
ids = (x > 0.5) & (x < 1.0)
kernel[ids] = 2. * (1. - x[ids]) * (1. - x[ids]) * (1. - x[ids])
return kernel
# quartic spline
def sph_kernel_quartic(x):
# C = 5.**6/512/np.pi
# if x < 1:
# kernel = (1.-x)**4
# if x < 3./5:
# kernel -= 5*(3./5-x)**4
# if x < 1./5:
# kernel += 10*(1./5-x)**4
# else:
# kernel = 0.
# return kernel * C
kernel = np.zeros(x.size, dtype=float)
ids = x < 0.2
kernel[ids] = (1. - x[ids])**4 - 5 * (0.6 - x[ids])**4 + 10 * (0.2 - x[ids])**4
ids = (x >= 0.2) & (x < 0.6)
kernel[ids] = (1. - x[ids])**4 - 5 * (0.6 - x[ids])**4
ids = (x >= 0.6) & (x < 1)
kernel[ids] = (1. - x[ids])**4
return kernel
# quintic spline
def sph_kernel_quintic(x):
# C = 3.**7/40/np.pi
# if x < 1:
# kernel = (1.-x)**5
# if x < 2./3:
# kernel -= 6*(2./3-x)**5
# if x < 1./3:
# kernel += 15*(1./3-x)**5
# else:
# kernel = 0.
# return kernel * C
kernel = np.zeros(x.size, dtype=float)
ids = x < 0.3333333333333
kernel[ids] = (1. - x[ids])**5 - 6 * (2. / 3 - x[ids])**5 + 15 * (0.3333333333333 - x[ids])**5
ids = (x >= 0.3333333333333) & (x < 0.6666666666666)
kernel[ids] = (1. - x[ids])**5 - 6 * (2. / 3 - x[ids])**5
ids = (x >= 0.6666666666666) & (x < 1)
kernel[ids] = (1. - x[ids])**5
return kernel
# Wendland C2
def sph_kernel_wendland2(x):
# C = 21./2/np.pi
# if x < 1:
# kernel = (1.-x)**4 * (1+4*x)
# else:
# kernel = 0.
# return kernel * C
kernel = np.zeros(x.size, dtype=float)
ids = x < 1
kernel[ids] = (1. - x[ids])**4 * (1 + 4 * x[ids])
return kernel
# Wendland C4
def sph_kernel_wendland4(x):
# C = 495./32/np.pi
# if x < 1:
# kernel = (1.-x)**6 * (1+6*x+35./3*x**2)
# else:
# kernel = 0.
# return kernel * C
kernel = np.zeros(x.size, dtype=float)
ids = x < 1
kernel[ids] = (1. - x[ids])**6 * (1 + 6 * x[ids] + 35. / 3 * x[ids]**2)
return kernel
# Wendland C6
def sph_kernel_wendland6(x):
# C = 1365./64/np.pi
# if x < 1:
# kernel = (1.-x)**8 * (1+8*x+25*x**2+32*x**3)
# else:
# kernel = 0.
# return kernel * C
kernel = np.zeros(x.size, dtype=float)
ids = x < 1
kernel[ids] = (1. - x[ids])**8 * (1 + 8 * x[ids] + 25 * x[ids]**2 + 32 * x[ids]**3)
return kernel
def worker(input, output):
for func, args in iter(input.get, 'STOP'):
result = calculate(func, args)
output.put(result)
def calculate(func, args):
return func(*args)
# result = func(*args)
# return '%s says that %s%s = %s' % \
# (current_process().name, func.__name__, args, result)
# No boundary effects are taken into account!
def cal_sph_hsml(idst, centp, hsml, posd, pxln, sphkernel, wdata):
imin, jmin, kmin, imax, jmax, kmax = pxln,pxln,pxln,0,0,0
if isinstance(wdata, type(np.array([1]))) or isinstance(wdata, type([])):
if posd == 2:
ydata = np.zeros((pxln, pxln), dtype=np.float64)
for i in range(hsml.size):
if len(idst[i]) <= 0:
px, py = np.array([np.int32(np.round(centp[i,0]))]), np.array([np.int32(np.round(centp[i,1]))])
if px[0]>=pxln: px[0]=pxln-1
if py[0]>=pxln: py[0]=pxln-1
else:
px, py = np.int32(np.array(idst[i])/pxln), np.array(idst[i])%pxln
dist = np.sqrt((px - centp[i,0])**2 + (py - centp[i, 1])**2)
wsph = sphkernel(dist / hsml[i])
if imin > px.min(): imin = px.min()
if imax < px.max(): imax = px.max()
if jmin > py.min(): jmin = py.min()
if jmax < py.max(): jmax = py.max()
# devided by len(ids) is to change N_e to number density
if wsph.sum() > 0:
ydata[px, py] += wdata[i] * wsph / wsph.sum()
else:
# if len(dist[i][idin]) == 0:
ydata[px, py] += wdata[i]
# else: # we also add particles with hsml < pixel size to its nearest four pixels.
# # Then, the y-map looks no smoothed (with some noisy pixels).
# dist, ids = mtree.query(pos[i], k=4)
# ydata[indxyz[ids, 0], indxyz[ids, 1]] += wdata[i] * \
# (1 - dist / np.sum(dist)) / 3.
imax+=1; jmax+=1
ydata=ydata[imin:imax, jmin:jmax]
elif posd == 3:
ydata = np.zeros((pxln, pxln, pxln), dtype=np.float32)
for i in range(hsml.size):
if len(idst[i]) <= 0:
px, py, pz = np.array(np.int32(np.round(centp[i,0]))), np.array(np.int32(np.round(centp[i,1]))), np.array(np.int32(np.round(centp[i,2])))
if px[0]>=pxln: px[0]=pxln-1
if py[0]>=pxln: py[0]=pxln-1
if pz[0]>=pxln: pz[0]=pxln-1
else:
px, py, pz = np.int32(np.array(idst[i])/pxln/pxln), np.int32(np.array(idst[i])%(pxln*pxln)/pxln), np.array(idst[i])%(pxln*pxln)%pxln
dist = np.sqrt((px - centp[i,0])**2 + (py - centp[i, 1])**2 + (pz - centp[i, 2])**2)
wsph = sphkernel(dist / hsml[i])
if imin > px.min(): imin = px.min()
if imax < px.max(): imax = px.max()
if jmin > py.min(): jmin = py.min()
if jmax < py.max(): jmax = py.max()
if kmin > pz.min(): kmin = pz.min()
if kmax < pz.max(): kmax = pz.max()
if wsph.sum() > 0:
ydata[px, py, pz] += wdata[i] * wsph / wsph.sum()
else:
# if len(dist[i][idin]) == 0:
ydata[px, py, pz] += wdata[i]
# dist, ids = mtree.query(pos[i], k=8)
# ydata[indxyz[ids, 0], indxyz[ids, 1], indxyz[ids, 2]
# ] += wdata[i] * (1 - dist / np.sum(dist)) / 7.
imax+=1; jmax+=1; kmax+=1
ydata=ydata[imin:imax, jmin:jmax, kmin:kmax]
else:
ydata = {}
if posd == 2:
for i in wdata.keys():
ydata[i] = np.zeros((pxln, pxln), dtype=np.float64)
for i in range(hsml.size):
if len(idst[i]) <= 0:
px, py = np.array(np.int32(np.round(centp[i,0]))), np.array(np.int32(np.round(centp[i,1])))
if px[0]>=pxln: px[0]=pxln-1
if py[0]>=pxln: py[0]=pxln-1
else:
px, py = np.int32(np.array(idst[i])/pxln), np.array(idst[i])%pxln
dist = np.sqrt((px - centp[i,0])**2 + (py - centp[i, 1])**2)
wsph = sphkernel(dist / hsml[i])
if imin > px.min(): imin = px.min()
if imax < px.max(): imax = px.max()
if jmin > py.min(): jmin = py.min()
if jmax < py.max(): jmax = py.max()
for j in wdata.keys():
if wsph.sum() > 0:
ydata[j][px, py] += wdata[j][i] * wsph / wsph.sum()
else:
# if len(dist[i][idin]) == 0:
ydata[j][px, py] += wdata[j][i]
# dist, ids = mtree.query(pos[i], k=4)
# for j in wdata.keys():
# ydata[j][indxyz[ids, 0], indxyz[ids, 1]] += wdata[j][i] * \
# (1 - dist / np.sum(dist)) / 3.
imax+=1; jmax+=1
for i in wdata.keys():
ydata[i] = ydata[i][imin:imax, jmin:jmax]
elif posd == 3:
for i in wdata.keys():
# There is a problem using multiprocessing with (return) really big objects
# https://bugs.python.org/issue17560
ydata[i] = np.zeros((pxln, pxln, pxln), dtype=np.float32)
for i in range(hsml.size):
if len(idst[i]) <= 0:
px, py, pz = np.array(np.int32(np.round(centp[i,0]))), np.array(np.int32(np.round(centp[i,1]))), np.array(np.int32(np.round(centp[i,2])))
#check boundary
if px[0]>=pxln: px[0]=pxln-1
if py[0]>=pxln: py[0]=pxln-1
if pz[0]>=pxln: pz[0]=pxln-1
else:
px, py, pz = np.int32(np.array(idst[i])/pxln/pxln), np.int32(np.array(idst[i])%(pxln*pxln)/pxln), np.array(idst[i])%(pxln*pxln)%pxln
dist = np.sqrt((px - centp[i,0])**2 + (py - centp[i, 1])**2 + (pz - centp[i, 2])**2)
wsph = sphkernel(dist / hsml[i])
if imin > px.min(): imin = px.min()
if imax < px.max(): imax = px.max()
if jmin > py.min(): jmin = py.min()
if jmax < py.max(): jmax = py.max()
if kmin > pz.min(): kmin = pz.min()
if kmax < pz.max(): kmax = pz.max()
for j in wdata.keys():
if wsph.sum() > 0:
ydata[j][px, py, pz] += wdata[j][i] * wsph / wsph.sum()
else:
# if len(dist[i][idin]) == 0:
ydata[j][px, py, pz] += wdata[j][i]
# dist, ids = mtree.query(pos[i], k=8)
# for j in wdata.keys():
# ydata[j][indxyz[ids, 0], indxyz[ids, 1], indxyz[ids, 2]
# ] += wdata[j][i] * (1 - dist / np.sum(dist)) / 7.
imax+=1; jmax+=1; kmax+=1
for i in wdata.keys():
ydata[i] = ydata[i][imin:imax, jmin:jmax, kmin:kmax]
return ydata, [imin, jmin, kmin, imax, jmax, kmax]
def cal_sph_neib(idst, dist, posd, pxln, sphkernel, wdata):
imin, jmin, kmin, imax, jmax, kmax = pxln,pxln,pxln,0,0,0
if isinstance(wdata, type(np.array([1]))) or isinstance(wdata, type([])):
if posd == 2:
ydata = np.zeros((pxln, pxln), dtype=np.float64) # need to think about reducing this return array later
for i in range(idst.shape[0]):
ids = np.array(idst[i])
wsph = sphkernel(dist[i] / dist[i].max())
px, py = np.int32(ids/pxln), ids%pxln
if imin > px.min(): imin = px.min()
if imax < px.max(): imax = px.max()
if jmin > py.min(): jmin = py.min()
if jmax < py.max(): jmax = py.max()
if wsph.sum() > 0:
ydata[px, py] += wdata[i] * wsph / wsph.sum()
imax+=1; jmax+=1
ydata=ydata[imin:imax, jmin:jmax]
elif posd == 3:
ydata = np.zeros((pxln, pxln, pxln), dtype=np.float32)
for i in range(idst.shape[0]):
ids = np.array(idst[i])
wsph = sphkernel(dist[i] / dist[i].max())
px, py, pz = np.int32(ids/pxln/pxln), np.int32(ids%(pxln*pxln)/pxln), ids%(pxln*pxln)%pxln
if imin > px.min(): imin = px.min()
if imax < px.max(): imax = px.max()
if jmin > py.min(): jmin = py.min()
if jmax < py.max(): jmax = py.max()
if kmin > pz.min(): kmin = pz.min()
if kmax < pz.max(): kmax = pz.max()
if wsph.sum() > 0:
ydata[px, py, pz] += wdata[i] * wsph / wsph.sum()
imax+=1; jmax+=1; kmax+=1
ydata=ydata[imin:imax, jmin:jmax, kmin:kmax]
else:
if posd == 2:
for i in wdata.keys():
ydata[i] = np.zeros((pxln, pxln), dtype=np.float64)
for i in range(idst.shape[0]):
ids = np.array(idst[i])
wsph = sphkernel(dist[i] / dist[i].max())
px, py = np.int32(ids/pxln), ids%pxln
if imin > px.min(): imin = px.min()
if imax < px.max(): imax = px.max()
if jmin > py.min(): jmin = py.min()
if jmax < py.max(): jmax = py.max()
for j in wdata.keys():
if wsph.sum() > 0:
ydata[j][px, py] += wdata[j][i] * wsph / wsph.sum()
imax+=1; jmax+=1
for i in wdata.keys():
ydata[i] = ydata[i][imin:imax, jmin:jmax]
elif posd == 3:
for i in wdata.keys():
ydata[i] = np.zeros((pxln, pxln, pxln), dtype=np.float32)
for i in range(idst.shape[0]):
ids = np.array(idst[i])
wsph = sphkernel(dist[i] / dist[i].max())
px, py, pz = np.int32(ids/pxln/pxln), np.int32(ids%(pxln*pxln)/pxln), ids%(pxln*pxln)%pxln
if imin > px.min(): imin = px.min()
if imax < px.max(): imax = px.max()
if jmin > py.min(): jmin = py.min()
if jmax < py.max(): jmax = py.max()
if kmin > pz.min(): kmin = pz.min()
if kmax < pz.max(): kmax = pz.max()
for j in wdata.keys():
if wsph.sum() > 0:
ydata[j][px, py, pz] += wdata[j][i] * wsph / wsph.sum()
imax+=1; jmax+=1; kmax+=1
for i in wdata.keys():
ydata[i] = ydata[i][imin:imax, jmin:jmax, kmin:kmax]
return ydata, [imin, jmin, kmin, imax, jmax, kmax]
def cal_sph_hsml_v3(ctree, centp, hsml, posd, pxln, sphkernel, wdata):
imin, jmin, kmin, imax, jmax, kmax = pxln,pxln,pxln,0,0,0
if isinstance(wdata, type(np.array([1]))) or isinstance(wdata, type([])):
if posd == 2:
ydata = np.zeros((pxln, pxln), dtype=np.float64)
for i in range(hsml.size):
# idin = dist[i] <= hsml[i]
# if len(dist[i][idin]) == 0:
# idin = [0] # if the grid point within hsml is none, use the closest one.
# ids = idst[i][idin]
idst = np.array(ctree.query_ball_point(centp[i], hsml[i]))
if len(idst) <= 0:
px, py = np.array([np.int32(np.round(centp[i,0]))]), np.array([np.int32(np.round(centp[i,1]))])
if px[0]>=pxln: px[0]=pxln-1
if py[0]>=pxln: py[0]=pxln-1
else:
px, py = np.int32(idst/pxln), idst%pxln
dist = np.sqrt((px - centp[i,0])**2 + (py - centp[i, 1])**2)
wsph = sphkernel(dist / hsml[i])
if imin > px.min(): imin = px.min()
if imax < px.max(): imax = px.max()
if jmin > py.min(): jmin = py.min()
if jmax < py.max(): jmax = py.max()
# devided by len(ids) is to change N_e to number density
if wsph.sum() > 0:
ydata[px, py] += wdata[i] * wsph / wsph.sum()
else:
# if len(dist[i][idin]) == 0:
ydata[px, py] += wdata[i]
# else: # we also add particles with hsml < pixel size to its nearest four pixels.
# # Then, the y-map looks no smoothed (with some noisy pixels).
# dist, ids = mtree.query(pos[i], k=4)
# ydata[indxyz[ids, 0], indxyz[ids, 1]] += wdata[i] * \
# (1 - dist / np.sum(dist)) / 3.
imax+=1; jmax+=1
ydata=ydata[imin:imax, jmin:jmax]
elif posd == 3:
ydata = np.zeros((pxln, pxln, pxln), dtype=np.float32)
for i in range(hsml.size):
# idin = dist[i] <= hsml[i]
# if len(dist[i][idin]) == 0:
# idin = [0] # if the grid point within hsml is none, use the closest one.
# ids = idst[i][idin]
idst = np.array(ctree.query_ball_point(centp[i], hsml[i]))
if len(idst) <= 0:
px, py, pz = np.array(np.int32(np.round(centp[i,0]))), np.array(np.int32(np.round(centp[i,1]))), np.array(np.int32(np.round(centp[i,2])))
if px[0]>=pxln: px[0]=pxln-1
if py[0]>=pxln: py[0]=pxln-1
if pz[0]>=pxln: pz[0]=pxln-1
else:
px, py, pz = np.int32(idst/pxln/pxln), np.int32(idst%(pxln*pxln)/pxln), idst%(pxln*pxln)%pxln
dist = np.sqrt((px - centp[i,0])**2 + (py - centp[i, 1])**2 + (pz - centp[i, 2])**2)
wsph = sphkernel(dist / hsml[i])
if imin > px.min(): imin = px.min()
if imax < px.max(): imax = px.max()
if jmin > py.min(): jmin = py.min()
if jmax < py.max(): jmax = py.max()
if kmin > pz.min(): kmin = pz.min()
if kmax < pz.max(): kmax = pz.max()
if wsph.sum() > 0:
ydata[px, py, pz] += wdata[i] * wsph / wsph.sum()
else:
# if len(dist[i][idin]) == 0:
ydata[px, py, pz] += wdata[i]
# dist, ids = mtree.query(pos[i], k=8)
# ydata[indxyz[ids, 0], indxyz[ids, 1], indxyz[ids, 2]
# ] += wdata[i] * (1 - dist / np.sum(dist)) / 7.
imax+=1; jmax+=1; kmax+=1
ydata=ydata[imin:imax, jmin:jmax, kmin:kmax]
else:
ydata = {}
if posd == 2:
for i in wdata.keys():
ydata[i] = np.zeros((pxln, pxln), dtype=np.float64)
for i in range(hsml.size):
# idin = dist[i] <= hsml[i]
# if len(dist[i][idin]) == 0:
# idin = [0] # if the grid point within hsml is none, use the closest one.
# ids = idst[i][idin]
idst = np.array(ctree.query_ball_point(centp[i], hsml[i]))
if len(idst) <= 0:
px, py = np.array(np.int32(np.round(centp[i,0]))), np.array(np.int32(np.round(centp[i,1])))
if px[0]>=pxln: px[0]=pxln-1
if py[0]>=pxln: py[0]=pxln-1
else:
px, py = np.int32(idst/pxln), idst%pxln
dist = np.sqrt((px - centp[i,0])**2 + (py - centp[i, 1])**2)
wsph = sphkernel(dist / hsml[i])
if imin > px.min(): imin = px.min()
if imax < px.max(): imax = px.max()
if jmin > py.min(): jmin = py.min()
if jmax < py.max(): jmax = py.max()
for j in wdata.keys():
if wsph.sum() > 0:
ydata[j][px, py] += wdata[j][i] * wsph / wsph.sum()
else:
# if len(dist[i][idin]) == 0:
ydata[j][px, py] += wdata[j][i]
# dist, ids = mtree.query(pos[i], k=4)
# for j in wdata.keys():
# ydata[j][indxyz[ids, 0], indxyz[ids, 1]] += wdata[j][i] * \
# (1 - dist / np.sum(dist)) / 3.
imax+=1; jmax+=1
for i in wdata.keys():
ydata[i] = ydata[i][imin:imax, jmin:jmax]
elif posd == 3:
for i in wdata.keys():
# There is a problem using multiprocessing with (return) really big objects
# https://bugs.python.org/issue17560
ydata[i] = np.zeros((pxln, pxln, pxln), dtype=np.float32)
for i in range(hsml.size):
# idin = dist[i] <= hsml[i]
# if len(dist[i][idin]) == 0:
# idin = [0] # if the grid point within hsml is none, use the closest one.
# ids = idst[i][idin]
idst = np.array(ctree.query_ball_point(centp[i], hsml[i]))
if len(idst) <= 0:
px, py, pz = np.array(np.int32(np.round(centp[i,0]))), np.array(np.int32(np.round(centp[i,1]))), np.array(np.int32(np.round(centp[i,2])))
#check boundary
if px[0]>=pxln: px[0]=pxln-1
if py[0]>=pxln: py[0]=pxln-1
if pz[0]>=pxln: pz[0]=pxln-1
else:
px, py, pz = np.int32(idst/pxln/pxln), np.int32(idst%(pxln*pxln)/pxln), idst%(pxln*pxln)%pxln
dist = np.sqrt((px - centp[i,0])**2 + (py - centp[i, 1])**2 + (pz - centp[i, 2])**2)
wsph = sphkernel(dist / hsml[i])
if imin > px.min(): imin = px.min()
if imax < px.max(): imax = px.max()
if jmin > py.min(): jmin = py.min()
if jmax < py.max(): jmax = py.max()
if kmin > pz.min(): kmin = pz.min()
if kmax < pz.max(): kmax = pz.max()
for j in wdata.keys():
if wsph.sum() > 0:
ydata[j][px, py, pz] += wdata[j][i] * wsph / wsph.sum()
else:
# if len(dist[i][idin]) == 0:
ydata[j][px, py, pz] += wdata[j][i]
# dist, ids = mtree.query(pos[i], k=8)
# for j in wdata.keys():
# ydata[j][indxyz[ids, 0], indxyz[ids, 1], indxyz[ids, 2]
# ] += wdata[j][i] * (1 - dist / np.sum(dist)) / 7.
imax+=1; jmax+=1; kmax+=1
for i in wdata.keys():
ydata[i] = ydata[i][imin:imax, jmin:jmax, kmin:kmax]
return ydata, [imin, jmin, kmin, imax, jmax, kmax]
def cal_sph_neib_v3(ctree, centp, neighbors, pxln, sphkernel, wdata):
imin, jmin, kmin, imax, jmax, kmax = pxln,pxln,pxln,0,0,0
if isinstance(wdata, type(np.array([1]))) or isinstance(wdata, type([])):
if posd == 2:
ydata = np.zeros((pxln, pxln), dtype=np.float64) # need to think about reducing this return array later
for i in range(centp.shape[0]):
dist, idst = mtree.query(centp[i], neighbors)
ids = np.array(idst)
wsph = sphkernel(np.array(dist) / dist.max())
px, py = np.int32(ids/pxln), ids%pxln
if imin > px.min(): imin = px.min()
if imax < px.max(): imax = px.max()
if jmin > py.min(): jmin = py.min()
if jmax < py.max(): jmax = py.max()
if wsph.sum() > 0:
ydata[px, py] += wdata[i] * wsph / wsph.sum()
imax+=1; jmax+=1
ydata=ydata[imin:imax, jmin:jmax]
elif posd == 3:
ydata = np.zeros((pxln, pxln, pxln), dtype=np.float32)
for i in range(centp.shape[0]):
dist, idst = mtree.query(centp[i], neighbors)
ids = np.array(idst)
wsph = sphkernel(np.array(dist) / dist.max())
px, py, pz = np.int32(ids/pxln/pxln), np.int32(ids%(pxln*pxln)/pxln), ids%(pxln*pxln)%pxln
if imin > px.min(): imin = px.min()
if imax < px.max(): imax = px.max()
if jmin > py.min(): jmin = py.min()
if jmax < py.max(): jmax = py.max()
if kmin > pz.min(): kmin = pz.min()
if kmax < pz.max(): kmax = pz.max()
if wsph.sum() > 0:
ydata[px, py, pz] += wdata[i] * wsph / wsph.sum()
imax+=1; jmax+=1; kmax+=1
ydata=ydata[imin:imax, jmin:jmax, kmin:kmax]
else:
if posd == 2:
for i in wdata.keys():
ydata[i] = np.zeros((pxln, pxln), dtype=np.float64)
for i in range(centp.shape[0]):
dist, idst = mtree.query(centp[i], neighbors)
ids = np.array(idst)
wsph = sphkernel(np.array(dist) / dist.max())
px, py = np.int32(ids/pxln), ids%pxln
if imin > px.min(): imin = px.min()
if imax < px.max(): imax = px.max()
if jmin > py.min(): jmin = py.min()
if jmax < py.max(): jmax = py.max()
for j in wdata.keys():
if wsph.sum() > 0:
ydata[j][px, py] += wdata[j][i] * wsph / wsph.sum()
imax+=1; jmax+=1
for i in wdata.keys():
ydata[i] = ydata[i][imin:imax, jmin:jmax]
elif posd == 3:
for i in wdata.keys():
ydata[i] = np.zeros((pxln, pxln, pxln), dtype=np.float32)
for i in range(centp.shape[0]):
dist, idst = mtree.query(centp[i], neighbors)
ids = np.array(idst)
wsph = sphkernel(np.array(dist) / dist.max())
px, py, pz = np.int32(ids/pxln/pxln), np.int32(ids%(pxln*pxln)/pxln), ids%(pxln*pxln)%pxln
if imin > px.min(): imin = px.min()
if imax < px.max(): imax = px.max()
if jmin > py.min(): jmin = py.min()
if jmax < py.max(): jmax = py.max()
if kmin > pz.min(): kmin = pz.min()
if kmax < pz.max(): kmax = pz.max()
for j in wdata.keys():
if wsph.sum() > 0:
ydata[j][px, py, pz] += wdata[j][i] * wsph / wsph.sum()
imax+=1; jmax+=1; kmax+=1
for i in wdata.keys():
ydata[i] = ydata[i][imin:imax, jmin:jmax, kmin:kmax]
return ydata, [imin, jmin, kmin, imax, jmax, kmax]
def SPH_smoothing(wdata, pos, pxls, neighbors, hsml=None, pxln=None, Memreduce=False,
Ncpu=None, Ntasks=None, kernel_name='cubic'):
r"""SPH smoothing for given data
Parameters
----------
wdata : the data to be smoothed. Type: array or list of arrays
pos : the position of your SPH particles. Type: array.
It can be 3D or 2D
pxls : grid pixel size of the mesh, where the data is smoothed into.
Type: float.
It must be in the same units of positions
neighbors: how many nearby mesh points the SPH particles smoothed into.
Type: int. Note this has to be rescaled to the image pixel size!
hsml : smoothing length of these SPH particles. Type: array or float.
If it is None, then the neighbours will be used to do the smoothing.
pxln : number of pixels for the mesh. Type: int. Must be set.
# If it is None (Default), it will calculate from the particle positions.
# I.E. pxln = (max(pos)-min(pos))/pxls
Memreduce: Try to reduce the memory in parallel calculation by passing the cKDTree class. This overcomes the
memory cost in the cKDTree query, but should require Python>3.8 to pass the class > 4Gb.
Default: False.
Ncpu : number of CPU for parallel calculation. Type: int. Default: None, all cpus from the
computer will be used.
Ntasks : number of tasks for separating the calculation. Type: int. Default: None,
the same number of Ncpu will be used. Ideally, it should be larger than or equal to the number of Ncpu.
Set this to a much larger value if you encounter an 'IO Error: bad message length'
kernel_name : the SPH kernel used to make the smoothing. Type: str.
Default : 'cubic'. Since a normalization will applied in the
smoothing, the constants in the kernel are always ignored.
Returns
-------
(list of) Smoothed 2D or 3D mesh data.
See also
--------
Notes
-----
Example
-------
ydata = SPH_smoothing(wdata, pos, 10, hsml=HSML)
"""
if kernel_name.lower() == 'cubic':
sphkernel = sph_kernel_cubic
elif kernel_name.lower() == 'quartic':
sphkernel = sph_kernel_quartic
elif kernel_name.lower() == 'quintic':
sphkernel = sph_kernel_quintic
elif kernel_name.lower() == 'wendland2':
sphkernel = sph_kernel_wendland2
elif kernel_name.lower() == 'wendland4':
sphkernel = sph_kernel_wendland4
elif kernel_name.lower() == 'wendland6':
sphkernel = sph_kernel_wendland6
else:
raise ValueError("Do not accept this kernel name %s" % kernel_name)
# shared memory data
# global shm_pos, shm_hsml, shm_idst, shm_dist
# shared_array_base = Array(ctypes.c_float, pos.size)
# shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
# shm_pos = shared_array.reshape(pos.shape[0], pos.shape[1])
# shm_pos[:, 0] = np.copy(pos[:, 0]); shm_pos[:, 1] = np.copy(pos[:, 1]); shm_pos[:, 2] = np.copy(pos[:, 2])
# if hsml is not None:
# shared_array_base = Array(ctypes.c_float, hsml.size)
# shm_hsml = np.ctypeslib.as_array(shared_array_base.get_obj())
# shm_hsml = np.copy(hsml)
memlog('Init smoothing')
SD = pos.shape[1]
if SD not in [2, 3]:
raise ValueError(
"pos shape %d is not correct, the second dimension must be 2 or 3" % pos.shape)
minx = -pxls * pxln / 2
if SD == 2:
idls= np.lexsort((pos[:,1],pos[:,0]))
pos = (pos - [minx, minx]) / pxls # in units of pixel size
x, y = np.meshgrid(np.arange(0.5, pxln, 1.0), np.arange(0.5, pxln, 1.0), indexing='ij')
indxyz = np.concatenate((x.reshape(x.size, 1), y.reshape(y.size, 1)), axis=1)
if isinstance(wdata, type(np.array([1]))) or isinstance(wdata, type([])):
wdata = wdata[idls]
ydata = np.zeros((pxln, pxln), dtype=np.float64)
elif isinstance(wdata, type({})):
if len(wdata) > 20:
raise ValueError("Too many data to be smoothed %d" % len(wdata))
else:
ydata = {}
for i in wdata.keys():
wdata[i] = wdata[i][idls]
ydata[i] = np.zeros((pxln, pxln), dtype=np.float32)
else:
idls= np.lexsort((pos[:,2],pos[:,1],pos[:,0]))
pos = (pos - [minx, minx, minx]) / pxls
x, y, z = np.meshgrid(np.arange(0.5, pxln, 1.0), np.arange(0.5, pxln, 1.0),
np.arange(0.5, pxln, 1.0), indexing='ij')
indxyz = np.concatenate((x.reshape(x.size, 1), y.reshape(y.size, 1),
z.reshape(z.size, 1)), axis=1)
if isinstance(wdata, type(np.array([1]))) or isinstance(wdata, type([])):
wdata = wdata[idls]
ydata = np.zeros((pxln, pxln, pxln), dtype=np.float64)
else:
if len(wdata) > 20:
raise ValueError("Too many data to be smoothed %d" % len(wdata))
else:
ydata = {}
for i in wdata.keys():
wdata[i] = wdata[i][idls]
ydata[i] = np.zeros((pxln, pxln, pxln), dtype=np.float64)
# lex sorted arrays before next step
pos = pos[idls]
if hsml is not None:
hsml = hsml[idls] / pxls
# Federico's method
# if hsml is not None:
# hsml /= pxls
# for i in np.arange(pos.shape[0]):
# x = np.arange(np.int32(pos[i, 0] - hsml[i]), np.int32(pos[i, 0] + hsml[i]), 1)
# y = np.arange(np.int32(pos[i, 1] - hsml[i]), np.int32(pos[i, 1] + hsml[i]), 1)
# x, y = np.meshgrid(x, y, indexing='ij')
# xyz = np.concatenate((x.reshape(x.size, 1), y.reshape(y.size, 1)), axis=1)
# dist = np.sqrt(np.sum((xyz - pos[i])**2, axis=1)) / hsml[i]
# if len(dist[dist < 1]) >= 1:
# wsph = sphkernel(dist)
# ids = (xyz[:, 0] >= 0) & (xyz[:, 0] < pxln) & (xyz[:, 1] >= 0) & (xyz[:, 1] < pxln)
# if wsph[ids].sum() > 0:
# ydata[xyz[ids, 0], xyz[ids, 1]] += wdata[i] * wsph[ids] / wsph[ids].sum()
mtree = cKDTree(indxyz, boxsize=pxln, leafsize=np.int32(np.ceil(pxln/20)))
# indxyz = np.int32(indxyz)
memlog('After cKDTree ')
freeze_support()
if Ncpu is None:
NUMBER_OF_PROCESSES = 1 # cpu_count()
else:
NUMBER_OF_PROCESSES = Ncpu
if Ntasks is None:
Ntasks = NUMBER_OF_PROCESSES
N = np.int32(pos.shape[0] / Ntasks)
listn = np.append(np.arange(0, pos.shape[0], N), pos.shape[0])
# Create queues
task_queue = Queue()
done_queue = Queue()
if (sys.version_info.major >= 3) and (sys.version_info.minor >= 8) and (Memreduce): # only tested with python3.8
# we can transfer the cKDTree class directly, seems no limitations on the transferred data size now
print("Directly pass the cKDTree to tasks with Python version", sys.version)
if hsml is None:
if isinstance(wdata, type(np.array([1]))) or isinstance(wdata, type([])):
Tasks = [(cal_sph_neib_v3, (mtree, pos[listn[i]:listn[i + 1]], neighbors, SD, pxln,
sphkernel, wdata[listn[i]:listn[i + 1]])) for i in range(listn.size-1)]
else:
Tasks = []
for i in range(listn.size-1):
tmpwd = {}
for j in wdata.keys():
tmpwd[j] = wdata[listn[i]:listn[i + 1]]
Tasks.append((cal_sph_neib_v3, (mtree, pos[listn[i]:listn[i + 1]], neighbors,
SD, pxln, sphkernel, tmpwd)))
else:
if isinstance(wdata, type(np.array([1]))) or isinstance(wdata, type([])):
Tasks = [(cal_sph_hsml_v3, (mtree, pos[listn[i]:listn[i + 1]], hsml[listn[i]:listn[i + 1]],
SD, pxln, sphkernel, wdata[listn[i]:listn[i + 1]])) for i in range(listn.size-1)]
else:
Tasks = []
for i in range(listn.size-1):
tmpwd = {}
for j in wdata.keys():
tmpwd[j] = wdata[listn[i]:listn[i + 1]]
Tasks.append((cal_sph_hsml_v3, (mtree, pos[listn[i]:listn[i + 1]], hsml[listn[i]:listn[i + 1]],
SD, pxln, sphkernel, tmpwd)))
else: # we need do the query first as the sending data size is limited. Sometimes We can use Ntasks to overcome the memory issue.
if hsml is None:
dist, idst = mtree.query(pos, neighbors, n_jobs=NUMBER_OF_PROCESSES) # estimate the neighbors and distance
memlog('After query ')
if isinstance(wdata, type(np.array([1]))) or isinstance(wdata, type([])):
Tasks = [(cal_sph_neib, (idst[listn[i]:listn[i + 1]], dist[listn[i]:listn[i + 1]], SD, pxln,
sphkernel, wdata[listn[i]:listn[i + 1]])) for i in range(listn.size-1)]
else:
Tasks = []
for i in range(listn.size-1):
tmpwd = {}
for j in wdata.keys():
tmpwd[j] = wdata[listn[i]:listn[i + 1]]
Tasks.append((cal_sph_neib, (idst[listn[i]:lintn[i + 1]], dist[listn[i]:listn[i + 1]],
SD, pxln, sphkernel, tmpwd)))
else:
# However, this may help with the extremly memory cost of cKDTree query
# idst=np.empty(0, dtype=object)
# for i in range(listn.size-1):
# tid = mtree.query_ball_point(pos[listn[i]:listn[i + 1]], hsml[listn[i]:listn[i + 1]], n_jobs=NUMBER_OF_PROCESSES)
# idst=np.append(idst, np.array([np.array(xi) for xi in tid]))
# del(tid)
idst = mtree.query_ball_point(pos, hsml, n_jobs=NUMBER_OF_PROCESSES)
memlog('After query ')
if isinstance(wdata, type(np.array([1]))) or isinstance(wdata, type([])):
Tasks = [(cal_sph_hsml, (idst[listn[i]:listn[i + 1]], pos[listn[i]:listn[i + 1]], hsml[listn[i]:listn[i + 1]],
SD, pxln, sphkernel, wdata[listn[i]:listn[i + 1]])) for i in range(listn.size-1)]
else:
Tasks = []
for i in range(listn.size-1):
tmpwd = {}
for j in wdata.keys():
tmpwd[j] = wdata[listn[i]:listn[i + 1]]
Tasks.append((cal_sph_hsml, (idst[listn[i]:listn[i + 1]], pos[listn[i]:listn[i + 1]], hsml[listn[i]:listn[i + 1]],
SD, pxln, sphkernel, tmpwd)))
# Tasks = [(cal_sph_hsml, (range(i * N, (i + 1) * N), mtree, pos, hsml, pxln, indxyz,
# sphkernel, wdata)) for i in range(NUMBER_OF_PROCESSES)]
# Submit tasks
for task in Tasks:
task_queue.put(task)
# Start worker processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get results
for i in range(len(Tasks)):
if isinstance(wdata, type(np.array([1]))) or isinstance(wdata, type([])):
td, idx = done_queue.get()
if SD == 2:
ydata[idx[0]:idx[3], idx[1]:idx[4]] += td
elif SD == 3:
ydata[idx[0]:idx[3], idx[1]:idx[4], idx[2]:idx[5]] += td
else:
for j in wdata.keys():
if SD == 2:
ydata[j][idx[0]:idx[3], idx[1]:idx[4]] += td[j]
elif SD == 3:
ydata[j][idx[0]:idx[3], idx[1]:idx[4], idx[2]:idx[5]] += td[j]
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
# # for i in range(0, 6):
# # p = Process(target=cal_sph_2d, args=(range(i*N, (i+1)*N), mtree, pos, hsml,
# # pxln, indxyz, sphkernel, wdata, ydata))
# # p.start()
# # p.join()
#
# # for i in np.arange(pos.shape[0]):
# # ids = mtree.query_ball_point(pos[i], hsml[i])
# # if len(ids) != 0:
# # dist = np.sqrt(np.sum((pos[i] - mtree.data[ids])**2, axis=1))
# # wsph = sphkernel(dist/hsml[i])
# # ydata[indxyz[ids, 0], indxyz[ids, 1]] += wdata[i] * wsph / wsph.sum()
# # else:
# # dist, ids = mtree.query(pos[i], k=1)
# # ydata[indxyz[ids, 0], indxyz[ids, 1]] += wdata[i]
# elif SD == 3:
# for i in np.arange(pos.shape[0]):
# ids = mtree.query_ball_point(pos[i], hsml[i])
# if len(ids) != 0:
# dist = np.sqrt(np.sum((pos[i] - mtree.data[ids])**2, axis=1))
# wsph = sphkernel(dist/hsml[i])
# ydata[indxyz[ids, 0], indxyz[ids, 1], indxyz[ids, 2]] += wdata[i] * wsph / wsph.sum()
# else:
# dist, ids = mtree.query(pos[i], k=1)
# ydata[indxyz[ids, 0], indxyz[ids, 1], indxyz[ids, 2]] += wdata[i]
# else:
# raise ValueError("Don't accept this data dimension %d" % SD)
# else:
# if SD == 2:
# for i in np.arange(pos.shape[0]):
# ids = mtree.query_ball_point(pos[i], hsml[i])
# if len(ids) != 0:
# dist = np.sqrt(np.sum((pos[i] - mtree.data[ids])**2, axis=1))
# wsph = sphkernel(dist/hsml[i])
# for j in range(len(wdata)):
# ydata[str(j)][indxyz[ids, 0], indxyz[ids, 1]] += wdata[i] * wsph / wsph.sum()
# else:
# dist, ids = mtree.query(pos[i], k=1)
# for j in range(len(wdata)):
# ydata[str(j)][indxyz[ids, 0], indxyz[ids, 1]] += wdata[i]
# elif SD == 3:
# for i in np.arange(pos.shape[0]):
# ids = mtree.query_ball_point(pos[i], hsml[i])
# if len(ids) != 0:
# dist = np.sqrt(np.sum((pos[i] - mtree.data[ids])**2, axis=1))
# wsph = sphkernel(dist/hsml[i])
# for j in range(len(wdata)):
# ydata[str(j)][indxyz[ids, 0], indxyz[ids, 1], indxyz[ids, 2]] += wdata[i]*wsph/wsph.sum()
# else:
# dist, ids = mtree.query(pos[i], k=1)
# for j in range(len(wdata)):
# ydata[str(j)][indxyz[ids, 0], indxyz[ids, 1], indxyz[ids, 2]] += wdata[i]
# else:
# raise ValueError("Don't accept this data dimension %d" % SD)
return ydata
|
cronus.py | from .db import DB
import sqlalchemy
from sqlalchemy import Integer, Text
import threading
import base64
from tldextract import extract
import urllib.parse
from bs4 import BeautifulSoup
import requests
import logging
import os
import re
logging.basicConfig(format='[%(levelname)s] : [%(asctime)s] : %(message)s', datefmt='%d-%b-%y %H:%M:%S',
level=logging.DEBUG)
URL_TABLE = ("url",
[
{"name" : "id", "type" : Integer, "unique":True, "nullable":False, "primary_key":True},
{"name" : "url", "type" : Text, "unique":True},
{"name" : "state","type" : Integer}
]
)
global_lock = threading.Lock()
class Cronus:
def __init__(self, **config):
"""
Initiates url database and few configs
:param pass: links to reject (out of scope)
:param seed: links to start with
:param limit_urls: Amount of urls to crawl at a time
"""
logging.debug('Instantiating Cronus')
fields = ("pass", "seed", "working_dir")
if not all(map(lambda x:x in config, fields)):
raise Exception("Expected these arguements: %s" % (fields))
self._pass = [re.compile(r) for r in config.get("pass", [])]
self.url = None
self.seed = config.get("seed")
self.working_dir = config.get("working_dir")
self.site_data = os.path.join(self.working_dir, "site")
self.db_path = os.path.join(self.working_dir, "url.db")
self.limit_urls = config.get("limit_urls", 10)
self.init_db()
self.crawled = []
self.new_links = []
self._urls = []
def run(self):
"""
Starts gathering links and storing site downloaded data
Note: Does this in threads, for speed
"""
logging.debug('Add url tasks')
seed_urls = self.get_urls()
tasks = []
for url in seed_urls:
link = url.get("url")
t = threading.Thread(target=self.save_url, args=(link,))
t.start()
tasks.append(t)
for t in tasks:
t.join()
self.save_all()
def init_db(self):
"""
Creates the neccessary table to store
links to crawl and has been crawled
"""
logging.debug('Instantialazing Site Database [%s]' % (self.db_path))
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
if not os.path.exists(self.site_data):
os.makedirs(self.site_data)
self.db = DB("sqlite:///%s" % (self.db_path))
self.db.create_table(*URL_TABLE)
self.url = self.db.table("url")
for url in self.seed:
self.add_url([{"url":url, "state":0}])
def get_urls(self):
"""
Gathers links to crawl from the table
if they've not been crawled
:param limit: amount of links to return
:return: list of urls
"""
limit = self.limit_urls
logging.debug('Gathering [%s] seed urls' % (limit))
filters = [("state","==",0)]
result = (
self.url.select()
.where(*filters)
.execute()
)
result = [ dict(row.items()) for i, row in enumerate(result) if i < limit ]
return result
def can_add(self, url):
for regex in self._pass:
if regex.fullmatch(url):
return False
return True
def _add_url(self, *new_urls):
"""
Adds url to the `URL` table in the url database
if they don't exists
:param url: a valid url/link
"""
urls = []
for origin, url in new_urls:
url = url.strip()
if not url.startswith("#"):
if url.startswith("/"):
u = urllib.parse.urlparse(origin)
addon = u.netloc
if u.scheme:
addon = u.scheme+"://"+addon
url = addon + url
url = self.clean_url(url)
eu = extract(url)
o_eu = extract(origin)
if eu.domain == o_eu.domain and self.can_add(url):
if url not in self._urls:
self._urls.append(url)
urls.append({"url":url, "state":0})
return urls
def add_url(self, urls):
for url in urls:
try:
self.url.insert(**url)
except sqlalchemy.exc.IntegrityError:
pass
def update_urls(self, urls, state=2):
"""
Signifies that a url has been crawled
:param url: valid link/url to crawl
:param state: an integer that signifies that a link
has been crawled
"""
for url in urls:
filters = [("url", "==", url)]
(
self.url.update()
.where(*filters)
.values(state=state)
.execute()
)
def clean_url(self, url):
"""
Removes query parameters for url i.e `ID` and `query-parameters`
target
Note: since we'd only crawl specific sites it's neccessary
Example:
--------
>>> clean_url('https://www.example.com#abc?a=b')
>>> 'https://www.example.com'
:return: new url `string`
"""
return urllib.parse.urljoin(
url, urllib.parse.urlparse(url).path
)
def save_url(self, url):
"""
Get downloads html data from url and gathers all links
in the page to add to `not crawled` list sort off :)
"""
response = None
headers = {
'Accept': '*/*',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'}
try:
response = requests.get(url, headers=headers)
except requests.exceptions.InvalidSchema:
self.crawled.append(url)
except requests.exceptions.MissingSchema:
self.crawled.append(url)
logging.debug('[%s] Requesting' % (url))
if response:
if response.status_code in range(200,300):
logging.debug('[%s] Requesting succeeded <%s>' % (url, response.status_code))
soup = BeautifulSoup(response.text)
links = soup.select("a")
more_urls = []
for a in links:
href = a.attrs.get("href")
if href:
more_urls.append(href)
self.save(url=url, html=response.text, more_urls=more_urls)
elif response.status_code in range(500,600):
pass
else:
self.crawled.append(url)
def save(self, url, html,more_urls=[]):
"""
Saves downoad html data and save new
urls to be crawled
:param url: crawled url
:param html: text/html data
:param more_urls: New urls to be crawled
"""
logging.debug('[%s] Saving html data' % (url))
for u in more_urls:
self.new_links.append((url, u))
self.crawled.append(url)
eu = extract(url)
fp = os.path.join(self.site_data, eu.domain)
if not os.path.exists(fp):
os.mkdir(fp)
fn = base64.b64encode(url.encode("UTF-8"))
fn = fn.decode("UTF-8")
filename = os.path.join(fp, fn)
with open(filename, "w", encoding="UTF-8", errors="ignore") as fp:
fp.write(html)
def save_all(self):
print("------------------------------------------------------------")
logging.debug('Saving all!!!!')
self.update_urls(self.crawled)
urls = self._add_url(*self.new_links)
self.add_url(urls)
self.db.session.commit()
logging.debug("Crawled: [%s]" % (len(self.crawled)))
logging.debug("New links: [%s]" % (len(self.new_links)))
|
PythonExecutor.py | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import ambari_simplejson as json
import logging
import os
from ambari_commons import subprocess32
import pprint
import threading
import platform
from threading import Thread
import time
from BackgroundCommandExecutionHandle import BackgroundCommandExecutionHandle
from resource_management.libraries.functions.log_process_information import log_process_information
from ambari_commons.os_check import OSConst, OSCheck
from Grep import Grep
import sys
from ambari_commons import shell
from ambari_commons.shell import shellRunner
logger = logging.getLogger()
class PythonExecutor(object):
"""
Performs functionality for executing python scripts.
Warning: class maintains internal state. As a result, instances should not be
used as a singleton for a concurrent execution of python scripts
"""
NO_ERROR = "none"
def __init__(self, tmpDir, config):
self.grep = Grep()
self.event = threading.Event()
self.python_process_has_been_killed = False
self.tmpDir = tmpDir
self.config = config
self.log_max_symbols_size = self.config.log_max_symbols_size
pass
def open_subprocess32_files(self, tmpoutfile, tmperrfile, override_output_files, backup_log_files = True):
if override_output_files: # Recreate files, existing files are backed up if backup_log_files is True
if backup_log_files:
self.back_up_log_file_if_exists(tmpoutfile)
self.back_up_log_file_if_exists(tmperrfile)
tmpout = open(tmpoutfile, 'w')
tmperr = open(tmperrfile, 'w')
else: # Append to files
tmpout = open(tmpoutfile, 'a')
tmperr = open(tmperrfile, 'a')
return tmpout, tmperr
def back_up_log_file_if_exists(self, file_path):
if os.path.isfile(file_path):
counter = 0
while True:
# Find backup name that is not used yet (saves logs
# from multiple command retries)
backup_name = file_path + "." + str(counter)
if not os.path.isfile(backup_name):
break
counter += 1
os.rename(file_path, backup_name)
def run_file(self, script, script_params, tmpoutfile, tmperrfile,
timeout, tmpstructedoutfile, callback, task_id,
override_output_files = True, backup_log_files = True, handle = None,
log_info_on_failure = True):
"""
Executes the specified python file in a separate subprocess32.
Method returns only when the subprocess32 is finished.
Params arg is a list of script parameters
Timeout meaning: how many seconds should pass before script execution
is forcibly terminated
override_output_files option defines whether stdout/stderr files will be
recreated or appended.
The structured out file, however, is preserved during multiple invocations that use the same file.
"""
pythonCommand = self.python_command(script, script_params)
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Running command %s", pprint.pformat(pythonCommand))
if handle is None:
tmpout, tmperr = self.open_subprocess32_files(tmpoutfile, tmperrfile, override_output_files, backup_log_files)
process = self.launch_python_subprocess32(pythonCommand, tmpout, tmperr)
# map task_id to pid
callback(task_id, process.pid)
logger.debug("Launching watchdog thread")
self.event.clear()
self.python_process_has_been_killed = False
thread = Thread(target = self.python_watchdog_func, args = (process, timeout))
thread.start()
# Waiting for the process to be either finished or killed
process.communicate()
self.event.set()
thread.join()
result = self.prepare_process_result(process.returncode, tmpoutfile, tmperrfile, tmpstructedoutfile, timeout=timeout)
if log_info_on_failure and result['exitcode']:
self.on_failure(pythonCommand, result)
return result
else:
holder = Holder(pythonCommand, tmpoutfile, tmperrfile, tmpstructedoutfile, handle)
background = BackgroundThread(holder, self)
background.start()
return {"exitcode": 777}
def on_failure(self, pythonCommand, result):
"""
Log some useful information after task failure.
"""
pass
#logger.info("Command %s failed with exitcode=%s", pprint.pformat(pythonCommand), result['exitcode'])
#log_process_information(logger)
def prepare_process_result(self, returncode, tmpoutfile, tmperrfile, tmpstructedoutfile, timeout=None):
out, error, structured_out = self.read_result_from_files(tmpoutfile, tmperrfile, tmpstructedoutfile)
if self.python_process_has_been_killed:
error = str(error) + "\n Python script has been killed due to timeout" + \
(" after waiting %s secs" % str(timeout) if timeout else "")
returncode = 999
result = self.condenseOutput(out, error, returncode, structured_out)
logger.debug("Result: %s", result)
return result
def read_result_from_files(self, out_path, err_path, structured_out_path):
out = open(out_path, 'r').read()
error = open(err_path, 'r').read()
try:
with open(structured_out_path, 'r') as fp:
structured_out = json.load(fp)
except Exception:
if os.path.exists(structured_out_path):
errMsg = 'Unable to read structured output from ' + structured_out_path
structured_out = {
'msg' : errMsg
}
logger.warn(structured_out)
else:
structured_out = {}
return out, error, structured_out
def preexec_fn(self):
os.setpgid(0, 0)
def launch_python_subprocess32(self, command, tmpout, tmperr):
"""
Creates subprocess32 with given parameters. This functionality was moved to separate method
to make possible unit testing
"""
close_fds = None if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY else True
command_env = dict(os.environ)
if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
command_env["PYTHONPATH"] = os.pathsep.join(sys.path)
for k, v in command_env.iteritems():
command_env[k] = str(v)
return subprocess32.Popen(command,
stdout=tmpout,
stderr=tmperr, close_fds=close_fds, env=command_env, preexec_fn=self.preexec_fn)
def isSuccessfull(self, returncode):
return not self.python_process_has_been_killed and returncode == 0
def python_command(self, script, script_params):
#we need manually pass python executable on windows because sys.executable will return service wrapper
python_binary = os.environ['PYTHON_EXE'] if 'PYTHON_EXE' in os.environ else sys.executable
python_command = [python_binary, script] + script_params
return python_command
def condenseOutput(self, stdout, stderr, retcode, structured_out):
result = {
"exitcode": retcode,
"stdout": self.grep.tail_by_symbols(stdout, self.log_max_symbols_size) if self.log_max_symbols_size else stdout,
"stderr": self.grep.tail_by_symbols(stderr, self.log_max_symbols_size) if self.log_max_symbols_size else stderr,
"structuredOut" : structured_out
}
return result
def python_watchdog_func(self, python, timeout):
self.event.wait(timeout)
if python.returncode is None:
logger.error("subprocess32 timed out and will be killed")
shell.kill_process_with_children(python.pid)
self.python_process_has_been_killed = True
pass
class Holder:
def __init__(self, command, out_file, err_file, structured_out_file, handle):
self.command = command
self.out_file = out_file
self.err_file = err_file
self.structured_out_file = structured_out_file
self.handle = handle
class BackgroundThread(threading.Thread):
def __init__(self, holder, pythonExecutor):
threading.Thread.__init__(self)
self.holder = holder
self.pythonExecutor = pythonExecutor
def run(self):
process_out, process_err = self.pythonExecutor.open_subprocess32_files(self.holder.out_file, self.holder.err_file, True)
logger.debug("Starting process command %s", self.holder.command)
process = self.pythonExecutor.launch_python_subprocess32(self.holder.command, process_out, process_err)
logger.debug("Process has been started. Pid = %s", process.pid)
self.holder.handle.pid = process.pid
self.holder.handle.status = BackgroundCommandExecutionHandle.RUNNING_STATUS
self.holder.handle.on_background_command_started(self.holder.handle.command['taskId'], process.pid)
process.communicate()
self.holder.handle.exitCode = process.returncode
process_condensed_result = self.pythonExecutor.prepare_process_result(process.returncode, self.holder.out_file, self.holder.err_file, self.holder.structured_out_file)
logger.debug("Calling callback with args %s", process_condensed_result)
self.holder.handle.on_background_command_complete_callback(process_condensed_result, self.holder.handle)
logger.debug("Exiting from thread for holder pid %s", self.holder.handle.pid)
|
_image_feature_extractor.py | # -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
from . import _mxnet_utils
def _create_feature_extractor(model_name):
import os
from platform import system
from ._internal_utils import _mac_ver
from ._pre_trained_models import MODELS, _get_model_cache_dir
from turicreate.config import get_runtime_config
from turicreate import extensions
# If we don't have Core ML, use an MxNet model.
if system() != 'Darwin' or _mac_ver() < (10, 13):
ptModel = MODELS[model_name]()
return MXFeatureExtractor(ptModel)
download_path = _get_model_cache_dir()
if not os.path.exists(download_path):
os.makedirs(download_path)
if(model_name == 'resnet-50'):
mlmodel_resnet_save_path = download_path + "/Resnet50.mlmodel"
if not os.path.exists(mlmodel_resnet_save_path):
from turicreate.toolkits import _pre_trained_models
mxnetResNet = _pre_trained_models.ResNetImageClassifier()
feature_extractor = MXFeatureExtractor(mxnetResNet)
mlModel = feature_extractor.get_coreml_model()
mlModel.save(mlmodel_resnet_save_path)
result = extensions.__dict__["image_deep_feature_extractor"]()
result.init_options({'model_name': model_name, 'download_path': download_path})
return result
class ImageFeatureExtractor(object):
def __init__(self, ptModel):
"""
Parameters
----------
ptModel: ImageClassifierPreTrainedModel
An instance of a pre-trained model.
"""
pass
def extract_features(self, dataset):
"""
Parameters
----------
dataset: SFrame
SFrame with data to extract features from
"""
pass
def get_coreml_model(self):
"""
Returns
-------
model:
Return the underlying model in Core ML format
"""
pass
class MXFeatureExtractor(ImageFeatureExtractor):
def __init__(self, ptModel):
"""
Parameters
----------
ptModel: ImageClassifierPreTrainedModel
An instance of a pre-trained model.
"""
self.ptModel = ptModel
self.data_layer = ptModel.data_layer
self.feature_layer = ptModel.feature_layer
self.image_shape = ptModel.input_image_shape
self.context = _mxnet_utils.get_mxnet_context()
@staticmethod
def _get_mx_module(mxmodel, data_layer, feature_layer, context,
image_shape, batch_size = 1, label_layer = None):
import mxnet as _mx
sym, arg_params, aux_params = mxmodel
all_layers = sym.get_internals()
feature_layer_sym = all_layers[feature_layer]
# Add a feature label layer (for exporting)
if (label_layer is not None) and (label_layer not in arg_params):
arg_params[label_layer] = _mx.nd.array([0])
# Make sure we do not use a number of contexts that could leave empty workloads
context = context[:batch_size]
model = _mx.mod.Module(symbol=feature_layer_sym, label_names=None, context=context)
model.bind(for_training=False, data_shapes=[(data_layer, (batch_size, ) + image_shape)])
model.set_params(arg_params, aux_params)
return model
def extract_features(self, dataset, feature, batch_size=64, verbose=False):
"""
Parameters
----------
dataset: SFrame
SFrame of images
"""
from ..mx import SFrameImageIter as _SFrameImageIter
from six.moves.queue import Queue as _Queue
from threading import Thread as _Thread
import turicreate as _tc
import array
if len(dataset) == 0:
return _tc.SArray([], array.array)
batch_size = min(len(dataset), batch_size)
# Make a data iterator
dataIter = _SFrameImageIter(sframe=dataset, data_field=[feature], batch_size=batch_size, image_shape=self.image_shape)
# Setup the MXNet model
model = MXFeatureExtractor._get_mx_module(self.ptModel.mxmodel,
self.data_layer, self.feature_layer, self.context, self.image_shape, batch_size)
out = _tc.SArrayBuilder(dtype = array.array)
progress = { 'num_processed' : 0, 'total' : len(dataset) }
if verbose:
print("Performing feature extraction on resized images...")
# Encapsulates the work done by the MXNet model for a single batch
def handle_request(batch):
model.forward(batch)
mx_out = [array.array('d',m) for m in model.get_outputs()[0].asnumpy()]
if batch.pad != 0:
# If batch size is not evenly divisible by the length, it will loop back around.
# We don't want that.
mx_out = mx_out[:-batch.pad]
return mx_out
# Copies the output from MXNet into the SArrayBuilder and emits progress
def consume_response(mx_out):
out.append_multiple(mx_out)
progress['num_processed'] += len(mx_out)
if verbose:
print('Completed {num_processed:{width}d}/{total:{width}d}'.format(
width = len(str(progress['total'])), **progress))
# Create a dedicated thread for performing MXNet work, using two FIFO
# queues for communication back and forth with this thread, with the
# goal of keeping MXNet busy throughout.
request_queue = _Queue()
response_queue = _Queue()
def mx_worker():
while True:
batch = request_queue.get() # Consume request
if batch is None:
# No more work remains. Allow the thread to finish.
return
response_queue.put(handle_request(batch)) # Produce response
mx_worker_thread = _Thread(target=mx_worker)
mx_worker_thread.start()
try:
# Attempt to have two requests in progress at any one time (double
# buffering), so that the iterator is creating one batch while MXNet
# performs inference on the other.
if dataIter.has_next:
request_queue.put(next(dataIter)) # Produce request
while dataIter.has_next:
request_queue.put(next(dataIter)) # Produce request
consume_response(response_queue.get())
consume_response(response_queue.get())
finally:
# Tell the worker thread to shut down.
request_queue.put(None)
return out.close()
def get_coreml_model(self, mode = 'classifier'):
"""
Parameters
----------
mode: str ('classifier', 'regressor' or None)
Mode of the converted coreml model.
When mode = 'classifier', a NeuralNetworkClassifier spec will be constructed.
When mode = 'regressor', a NeuralNetworkRegressor spec will be constructed.
Returns
-------
model: MLModel
Return the underlying model.
"""
from ._mxnet_to_coreml import _mxnet_converter
import mxnet as _mx
(sym, arg_params, aux_params) = self.ptModel.mxmodel
fe_mxmodel = self.ptModel.mxmodel
if self.ptModel.is_feature_layer_final:
feature_layer_size = self.ptModel.feature_layer_size
num_dummy_classes = 10
feature_layer_sym = sym.get_children()[0]
fc_symbol = _mx.symbol.FullyConnected(feature_layer_sym, num_hidden=num_dummy_classes)
prob = _mx.symbol.SoftmaxOutput(fc_symbol, name = sym.name, attr=sym.attr_dict()[sym.name])
arg_params['%s_weight' % fc_symbol.name] = _mx.ndarray.zeros((num_dummy_classes, feature_layer_size))
arg_params['%s_bias' % fc_symbol.name] = _mx.ndarray.zeros((num_dummy_classes))
fe_mxmodel = (prob, arg_params, aux_params)
model = MXFeatureExtractor._get_mx_module(fe_mxmodel,
self.data_layer, self.ptModel.output_layer, _mxnet_utils.get_mxnet_context(max_devices=1),
self.image_shape, label_layer = self.ptModel.label_layer)
preprocessor_args = {'image_input_names': [self.data_layer]}
return _mxnet_converter.convert(model, mode = 'classifier',
input_shape=[(self.data_layer, (1, ) + self.image_shape)],
class_labels = list(map(str, range(self.ptModel.num_classes))),
preprocessor_args = preprocessor_args, verbose = False)
|
test.py | Skip to content
Features
Business
Explore
Marketplace
Pricing
Search
Sign in or Sign up
1 0 0 ryrizo/AIND-Isolation
Code Issues 0 Pull requests 0 Projects 0 Insights
Join GitHub today
GitHub is home to over 28 million developers working together to host and review code, manage projects, and build software together.
AIND-Isolation/agent_test.py
2031e28 on Feb 20, 2017
@ryrizo ryrizo Add minimax, alpha-beta pruning, and heuristics for evaluation
@cgearhart @ryrizo @danainschool
541 lines (440 sloc) 21.7 KB
"""
This file contains test cases to verify the correct implementation of the
functions required for this project including minimax, alphabeta, and iterative
deepening. The heuristic function is tested for conformance to the expected
interface, but cannot be automatically assessed for correctness.
STUDENTS SHOULD NOT NEED TO MODIFY THIS CODE. IT WOULD BE BEST TO TREAT THIS
FILE AS A BLACK BOX FOR TESTING.
"""
import random
import unittest
import timeit
import sys
import isolation
import game_agent
from collections import Counter
from copy import deepcopy
from copy import copy
from functools import wraps
from queue import Queue
from threading import Thread
from multiprocessing import TimeoutError
from queue import Empty as QueueEmptyError
from importlib import reload
WRONG_MOVE = """
The {} function failed because it returned a non-optimal move at search depth {}.
Valid choices: {}
Your selection: {}
"""
WRONG_NUM_EXPLORED = """
Your {} search visited the wrong nodes at search depth {}. If the number
of visits is too large, make sure that iterative deepening is only
running when the `iterative` flag is set in the agent constructor.
Max explored size: {}
Number you explored: {}
"""
UNEXPECTED_VISIT = """
Your {} search did not visit the number of expected unique nodes at search
depth {}.
Max explored size: {}
Number you explored: {}
"""
ID_FAIL = """
Your agent explored the wrong number of nodes using Iterative Deepening and
minimax. Remember that ID + MM should check every node in each layer of the
game tree before moving on to the next layer.
"""
INVALID_MOVE = """
Your agent returned an invalid move. Make sure that your function returns
a selection when the search times out during iterative deepening.
Valid choices: {!s}
Your choice: {}
"""
TIMER_MARGIN = 15 # time (in ms) to leave on the timer to avoid timeout
def curr_time_millis():
"""Simple timer to return the current clock time in milliseconds."""
return 1000 * timeit.default_timer()
def handler(obj, testcase, queue):
"""Handler to pass information between threads; used in the timeout
function to abort long-running (i.e., probably hung) test cases.
"""
try:
queue.put((None, testcase(obj)))
except:
queue.put((sys.exc_info(), None))
def timeout(time_limit):
"""Function decorator for unittest test cases to specify test case timeout.
The timer mechanism works by spawning a new thread for the test to run in
and using the timeout handler for the thread-safe queue class to abort and
kill the child thread if it doesn't return within the timeout.
It is not safe to access system resources (e.g., files) within test cases
wrapped by this timer.
"""
def wrapUnitTest(testcase):
@wraps(testcase)
def testWrapper(self):
queue = Queue()
try:
p = Thread(target=handler, args=(self, testcase, queue))
p.daemon = True
p.start()
err, res = queue.get(timeout=time_limit)
p.join()
if err:
raise err[0](err[1]).with_traceback(err[2])
return res
except QueueEmptyError:
raise TimeoutError("Test aborted due to timeout. Test was " +
"expected to finish in less than {} second(s).".format(time_limit))
return testWrapper
return wrapUnitTest
def makeEvalTable(table):
"""Use a closure to create a heuristic function that returns values from
a table that maps board locations to constant values. This supports testing
the minimax and alphabeta search functions.
THIS HEURISTIC IS ONLY USEFUL FOR TESTING THE SEARCH FUNCTIONALITY -
IT IS NOT MEANT AS AN EXAMPLE OF A USEFUL HEURISTIC FOR GAME PLAYING.
"""
def score(game, player):
row, col = game.get_player_location(player)
return table[row][col]
return score
def makeEvalStop(limit, timer, value=None):
"""Use a closure to create a heuristic function that forces the search
timer to expire when a fixed number of node expansions have been perfomred
during the search. This ensures that the search algorithm should always be
in a predictable state regardless of node expansion order.
THIS HEURISTIC IS ONLY USEFUL FOR TESTING THE SEARCH FUNCTIONALITY -
IT IS NOT MEANT AS AN EXAMPLE OF A USEFUL HEURISTIC FOR GAME PLAYING.
"""
def score(game, player):
if timer.time_left() < 0:
raise TimeoutError("Timer expired during search. You must " +
"return an answer before the timer reaches 0.")
if limit == game.counts[0]:
timer.time_limit = 0
return 0
return score
def makeBranchEval(first_branch):
"""Use a closure to create a heuristic function that evaluates to a nonzero
score when the root of the search is the first branch explored, and
otherwise returns 0. This heuristic is used to force alpha-beta to prune
some parts of a game tree for testing.
THIS HEURISTIC IS ONLY USEFUL FOR TESTING THE SEARCH FUNCTIONALITY -
IT IS NOT MEANT AS AN EXAMPLE OF A USEFUL HEURISTIC FOR GAME PLAYING.
"""
def score(game, player):
if not first_branch:
first_branch.append(game.root)
if game.root in first_branch:
return 1.
return 0.
return score
class CounterBoard(isolation.Board):
"""Subclass of the isolation board that maintains counters for the number
of unique nodes and total nodes visited during depth first search.
Some functions from the base class must be overridden to maintain the
counters during search.
"""
def __init__(self, *args, **kwargs):
super(CounterBoard, self).__init__(*args, **kwargs)
self.counter = Counter()
self.visited = set()
self.root = None
def copy(self):
new_board = CounterBoard(self.__player_1__, self.__player_2__,
width=self.width, height=self.height)
new_board.move_count = self.move_count
new_board.__active_player__ = self.__active_player__
new_board.__inactive_player__ = self.__inactive_player__
new_board.__last_player_move__ = copy(self.__last_player_move__)
new_board.__player_symbols__ = copy(self.__player_symbols__)
new_board.__board_state__ = deepcopy(self.__board_state__)
new_board.counter = self.counter
new_board.visited = self.visited
new_board.root = self.root
return new_board
def forecast_move(self, move):
self.counter[move] += 1
self.visited.add(move)
new_board = self.copy()
new_board.apply_move(move)
if new_board.root is None:
new_board.root = move
return new_board
@property
def counts(self):
""" Return counts of (total, unique) nodes visited """
return sum(self.counter.values()), len(self.visited)
class Project1Test(unittest.TestCase):
def initAUT(self, depth, eval_fn, iterative=False,
method="minimax", loc1=(3, 3), loc2=(0, 0), w=7, h=7):
"""Generate and initialize player and board objects to be used for
testing.
"""
reload(game_agent)
agentUT = game_agent.CustomPlayer(depth, eval_fn, iterative, method)
board = CounterBoard(agentUT, 'null_agent', w, h)
board.apply_move(loc1)
board.apply_move(loc2)
return agentUT, board
@timeout(5)
#@unittest.skip("Skip eval function test.") # Uncomment this line to skip test
def test_heuristic(self):
""" Test output interface of heuristic score function interface."""
player1 = "Player1"
player2 = "Player2"
p1_location = (0, 0)
p2_location = (1, 1) # top left corner
game = isolation.Board(player1, player2)
game.apply_move(p1_location)
game.apply_move(p2_location)
self.assertIsInstance(game_agent.custom_score(game, player1), float,
"The heuristic function should return a floating point")
timeout(5)
#@unittest.skip("Skip simple minimax test.") # Uncomment this line to skip test
def test_minimax_interface(self):
""" Test CustomPlayer.minimax interface with simple input """
h, w = 7, 7 # board size
test_depth = 1
starting_location = (5, 3)
adversary_location = (0, 0) # top left corner
iterative_search = False
search_method = "minimax"
heuristic = lambda g, p: 0. # return 0 everywhere
# create a player agent & a game board
agentUT = game_agent.CustomPlayer(
test_depth, heuristic, iterative_search, search_method)
agentUT.time_left = lambda: 99 # ignore timeout for fixed-depth search
board = isolation.Board(agentUT, 'null_agent', w, h)
# place two "players" on the board at arbitrary (but fixed) locations
board.apply_move(starting_location)
board.apply_move(adversary_location)
for move in board.get_legal_moves():
next_state = board.forecast_move(move)
v, _ = agentUT.minimax(next_state, test_depth)
self.assertTrue(type(v) == float,
("Minimax function should return a floating " +
"point value approximating the score for the " +
"branch being searched."))
timeout(5)
#@unittest.skip("Skip alphabeta test.") # Uncomment this line to skip test
def test_alphabeta_interface(self):
""" Test CustomPlayer.alphabeta interface with simple input """
h, w = 9, 9 # board size
test_depth = 1
starting_location = (2, 7)
adversary_location = (0, 0) # top left corner
iterative_search = False
search_method = "alphabeta"
heuristic = lambda g, p: 0. # return 0 everywhere
# create a player agent & a game board
agentUT = game_agent.CustomPlayer(
test_depth, heuristic, iterative_search, search_method)
agentUT.time_left = lambda: 99 # ignore timeout for fixed-depth search
board = isolation.Board(agentUT, 'null_agent', w, h)
# place two "players" on the board at arbitrary (but fixed) locations
board.apply_move(starting_location)
board.apply_move(adversary_location)
for move in board.get_legal_moves():
next_state = board.forecast_move(move)
v, _ = agentUT.alphabeta(next_state, test_depth)
self.assertTrue(type(v) == float,
("Alpha Beta function should return a floating " +
"point value approximating the score for the " +
"branch being searched."))
@timeout(5)
#@unittest.skip("Skip get_move test.") # Uncomment this line to skip test
def test_get_move_interface(self):
""" Test CustomPlayer.get_move interface with simple input """
h, w = 9, 9 # board size
test_depth = 1
starting_location = (2, 7)
adversary_location = (0, 0) # top left corner
iterative_search = False
search_method = "minimax"
heuristic = lambda g, p: 0. # return 0 everywhere
# create a player agent & a game board
agentUT = game_agent.CustomPlayer(
test_depth, heuristic, iterative_search, search_method)
# Test that get_move returns a legal choice on an empty game board
board = isolation.Board(agentUT, 'null_agent', w, h)
legal_moves = board.get_legal_moves()
move = agentUT.get_move(board, legal_moves, lambda: 99)
self.assertIn(move, legal_moves,
("The get_move() function failed as player 1 on an " +
"empty board. It should return coordinates on the " +
"game board for the location of the agent's next " +
"move. The move must be one of the legal moves on " +
"the current game board."))
# Test that get_move returns a legal choice for first move as player 2
board = isolation.Board('null_agent', agentUT, w, h)
board.apply_move(starting_location)
legal_moves = board.get_legal_moves()
move = agentUT.get_move(board, legal_moves, lambda: 99)
self.assertIn(move, legal_moves,
("The get_move() function failed making the first " +
"move as player 2 on a new board. It should return " +
"coordinates on the game board for the location " +
"of the agent's next move. The move must be one " +
"of the legal moves on the current game board."))
# Test that get_move returns a legal choice after first move
board = isolation.Board(agentUT, 'null_agent', w, h)
board.apply_move(starting_location)
board.apply_move(adversary_location)
legal_moves = board.get_legal_moves()
move = agentUT.get_move(board, legal_moves, lambda: 99)
self.assertIn(move, legal_moves,
("The get_move() function failed as player 1 on a " +
"game in progress. It should return coordinates on" +
"the game board for the location of the agent's " +
"next move. The move must be one of the legal moves " +
"on the current game board."))
@timeout(5)
#@unittest.skip("Skip minimax test.") # Uncomment this line to skip test
def test_minimax(self):
""" Test CustomPlayer.minimax
This test uses a scoring function that returns a constant value based
on the location of the search agent on the board to force minimax to
choose a branch that visits those cells at a specific fixed-depth.
If minimax is working properly, it will visit a constant number of
nodes during the search and return one of the acceptable legal moves.
"""
h, w = 7, 7 # board size
starting_location = (2, 3)
adversary_location = (0, 0) # top left corner
iterative_search = False
method = "minimax"
# The agent under test starts at position (2, 3) on the board, which
# gives eight (8) possible legal moves [(0, 2), (0, 4), (1, 1), (1, 5),
# (3, 1), (3, 5), (4, 2), (4, 4)]. The search function will pick one of
# those moves based on the estimated score for each branch. The value
# only changes on odd depths because even depths end on when the
# adversary has initiative.
value_table = [[0] * w for _ in range(h)]
value_table[1][5] = 1 # depth 1 & 2
value_table[4][3] = 2 # depth 3 & 4
value_table[6][6] = 3 # depth 5
heuristic = makeEvalTable(value_table)
# These moves are the branches that will lead to the cells in the value
# table for the search depths.
expected_moves = [set([(1, 5)]),
set([(3, 1), (3, 5)]),
set([(3, 5), (4, 2)])]
# Expected number of node expansions during search
counts = [(8, 8), (24, 10), (92, 27), (418, 32), (1650, 43)]
# Test fixed-depth search; note that odd depths mean that the searching
# player (student agent) has the last move, while even depths mean that
# the adversary has the last move before calling the heuristic
# evaluation function.
for idx in range(5):
test_depth = idx + 1
agentUT, board = self.initAUT(test_depth, heuristic,
iterative_search, method,
loc1=starting_location,
loc2=adversary_location)
# disable search timeout by returning a constant value
agentUT.time_left = lambda: 1e3
_, move = agentUT.minimax(board, test_depth)
num_explored_valid = board.counts[0] == counts[idx][0]
num_unique_valid = board.counts[1] == counts[idx][1]
self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(
method, test_depth, counts[idx][0], board.counts[0]))
self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(
method, test_depth, counts[idx][1], board.counts[1]))
self.assertIn(move, expected_moves[idx // 2], WRONG_MOVE.format(
method, test_depth, expected_moves[idx // 2], move))
@timeout(20)
#@unittest.skip("Skip alpha-beta test.") # Uncomment this line to skip test
def test_alphabeta(self):
""" Test CustomPlayer.alphabeta
This test uses a scoring function that returns a constant value based
on the branch being searched by alphabeta in the user agent, and forces
the search to prune on every other branch it visits. By using a huge
board where the players are too far apart to interact and every branch
has the same growth factor, the expansion and pruning must result in
an exact number of expanded nodes.
"""
h, w = 101, 101 # board size
starting_location = (50, 50)
adversary_location = (0, 0) # top left corner
iterative_search = False
method = "alphabeta"
# The agent under test starts in the middle of a huge board so that
# every branch has the same number of possible moves, so pruning any
# branch has the same effect during testing
# These are the expected number of node expansions for alphabeta search
# to explore the game tree to fixed depth. The custom eval function
# used for this test ensures that some branches must be pruned, while
# the search should still return an optimal move.
counts = [(8, 8), (17, 10), (74, 42), (139, 51), (540, 119)]
for idx in range(len(counts)):
test_depth = idx + 1 # pruning guarantee requires min depth of 3
first_branch = []
heuristic = makeBranchEval(first_branch)
agentUT, board = self.initAUT(test_depth, heuristic,
iterative_search, method,
loc1=starting_location,
loc2=adversary_location,
w=w, h=h)
# disable search timeout by returning a constant value
agentUT.time_left = lambda: 1e3
_, move = agentUT.alphabeta(board, test_depth)
num_explored_valid = board.counts[0] == counts[idx][0]
num_unique_valid = board.counts[1] == counts[idx][1]
self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(
method, test_depth, counts[idx][0], board.counts[0]))
self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(
method, test_depth, counts[idx][1], board.counts[1]))
self.assertIn(move, first_branch, WRONG_MOVE.format(
method, test_depth, first_branch, move))
@timeout(20)
#@unittest.skip("Skip iterative deepening test.") # Uncomment this line to skip test
def test_get_move(self):
""" Test iterative deepening in CustomPlayer.get_move by placing an
agent on the game board and performing ID minimax search, which
should visit a specific number of unique nodes while expanding. By
forcing the search to timeout when a predetermined number of nodes
have been expanded, we can then verify that the expected number of
unique nodes have been visited.
"""
class DynamicTimer():
"""Dynamic Timer allows the time limit to be changed after the
timer is initialized so that the search timeout can be triggered
before the timer actually expires. This allows the timer to expire
when an event occurs, regardless of the clock time required until
the event happens.
"""
def __init__(self, time_limit):
self.time_limit = time_limit
self.start_time = curr_time_millis()
def time_left(self):
return self.time_limit - (curr_time_millis() - self.start_time)
w, h = 11, 11 # board size
adversary_location = (0, 0)
method = "minimax"
# The agent under test starts at the positions indicated below, and
# performs an iterative deepening minimax search (minimax is easier to
# test because it always visits all nodes in the game tree at every
# level).
origins = [(2, 3), (6, 6), (7, 4), (4, 2), (0, 5), (10, 10)]
exact_counts = [(8, 8), (32, 10), (160, 39), (603, 35), (1861, 54), (3912, 62)]
for idx in range(len(origins)):
# set the initial timer high enough that the search will not
# timeout before triggering the dynamic timer to halt by visiting
# the expected number of nodes
time_limit = 1e4
timer = DynamicTimer(time_limit)
eval_fn = makeEvalStop(exact_counts[idx][0], timer, time_limit)
agentUT, board = self.initAUT(-1, eval_fn, True, method,
origins[idx], adversary_location,
w, h)
legal_moves = board.get_legal_moves()
chosen_move = agentUT.get_move(board, legal_moves, timer.time_left)
diff_total = abs(board.counts[0] - exact_counts[idx][0])
diff_unique = abs(board.counts[1] - exact_counts[idx][1])
self.assertTrue(diff_total <= 1 and diff_unique == 0, ID_FAIL)
self.assertTrue(chosen_move in legal_moves, INVALID_MOVE.format(
legal_moves, chosen_move))
if __name__ == '__main__':
unittest.main()
© 2018 GitHub, Inc.
Terms
Privacy
Security
Status
Help
Contact GitHub
API
Training
Shop
Blog
About
Press h to open a hovercard with more details. |
fileStore.py | # Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
from future import standard_library
standard_library.install_aliases()
from builtins import map
from builtins import str
from builtins import range
from builtins import object
from abc import abstractmethod, ABCMeta
from toil.lib.objects import abstractclassmethod
import base64
from collections import namedtuple, defaultdict
import dill
import errno
import logging
import os
import shutil
import stat
import tempfile
import time
import uuid
from contextlib import contextmanager
from fcntl import flock, LOCK_EX, LOCK_UN
from functools import partial
from hashlib import sha1
from threading import Thread, Semaphore, Event
# Python 3 compatibility imports
from six.moves.queue import Empty, Queue
from six.moves import xrange
from toil.lib.humanize import bytes2human
from toil.common import cacheDirName, getDirSizeRecursively, getFileSystemSize
from toil.lib.bioio import makePublicDir
from toil.resource import ModuleDescriptor
from future.utils import with_metaclass
logger = logging.getLogger(__name__)
class DeferredFunction(namedtuple('DeferredFunction', 'function args kwargs name module')):
"""
>>> df = DeferredFunction.create(defaultdict, None, {'x':1}, y=2)
>>> df
DeferredFunction(defaultdict, ...)
>>> df.invoke() == defaultdict(None, x=1, y=2)
True
"""
@classmethod
def create(cls, function, *args, **kwargs):
"""
Capture the given callable and arguments as an instance of this class.
:param callable function: The deferred action to take in the form of a function
:param tuple args: Non-keyword arguments to the function
:param dict kwargs: Keyword arguments to the function
"""
# The general principle is to deserialize as late as possible, i.e. when the function is
# to be invoked, as that will avoid redundantly deserializing deferred functions for
# concurrently running jobs when the cache state is loaded from disk. By implication we
# should serialize as early as possible. We need to serialize the function as well as its
# arguments.
return cls(*list(map(dill.dumps, (function, args, kwargs))),
name=function.__name__,
module=ModuleDescriptor.forModule(function.__module__).globalize())
def invoke(self):
"""
Invoke the captured function with the captured arguments.
"""
logger.debug('Running deferred function %s.', self)
self.module.makeLoadable()
function, args, kwargs = list(map(dill.loads, (self.function, self.args, self.kwargs)))
return function(*args, **kwargs)
def __str__(self):
return '%s(%s, ...)' % (self.__class__.__name__, self.name)
__repr__ = __str__
class FileStore(with_metaclass(ABCMeta, object)):
"""
An abstract base class to represent the interface between a worker and the job store. Concrete
subclasses will be used to manage temporary files, read and write files from the job store and
log messages, passed as argument to the :meth:`toil.job.Job.run` method.
"""
# Variables used for syncing reads/writes
_pendingFileWritesLock = Semaphore()
_pendingFileWrites = set()
_terminateEvent = Event() # Used to signify crashes in threads
def __init__(self, jobStore, jobGraph, localTempDir, inputBlockFn):
self.jobStore = jobStore
self.jobGraph = jobGraph
self.localTempDir = os.path.abspath(localTempDir)
self.workFlowDir = os.path.dirname(self.localTempDir)
self.jobName = self.jobGraph.command.split()[1]
self.inputBlockFn = inputBlockFn
self.loggingMessages = []
self.filesToDelete = set()
self.jobsToDelete = set()
@staticmethod
def createFileStore(jobStore, jobGraph, localTempDir, inputBlockFn, caching):
fileStoreCls = CachingFileStore if caching else NonCachingFileStore
return fileStoreCls(jobStore, jobGraph, localTempDir, inputBlockFn)
@abstractmethod
@contextmanager
def open(self, job):
"""
The context manager used to conduct tasks prior-to, and after a job has been run.
:param toil.job.Job job: The job instance of the toil job to run.
"""
raise NotImplementedError()
# Functions related to temp files and directories
def getLocalTempDir(self):
"""
Get a new local temporary directory in which to write files that persist for the duration of
the job.
:return: The absolute path to a new local temporary directory. This directory will exist
for the duration of the job only, and is guaranteed to be deleted once the job
terminates, removing all files it contains recursively.
:rtype: str
"""
return os.path.abspath(tempfile.mkdtemp(prefix="t", dir=self.localTempDir))
def getLocalTempFile(self):
"""
Get a new local temporary file that will persist for the duration of the job.
:return: The absolute path to a local temporary file. This file will exist for the
duration of the job only, and is guaranteed to be deleted once the job terminates.
:rtype: str
"""
handle, tmpFile = tempfile.mkstemp(prefix="tmp", suffix=".tmp", dir=self.localTempDir)
os.close(handle)
return os.path.abspath(tmpFile)
def getLocalTempFileName(self):
"""
Get a valid name for a new local file. Don't actually create a file at the path.
:return: Path to valid file
:rtype: str
"""
# Create, and then delete a temp file. Creating will guarantee you a unique, unused
# file name. There is a very, very, very low chance that another job will create the
# same file name in the span of this one being deleted and then being used by the user.
tempFile = self.getLocalTempFile()
os.remove(tempFile)
return tempFile
# Functions related to reading, writing and removing files to/from the job store
@abstractmethod
def writeGlobalFile(self, localFileName, cleanup=False):
"""
Takes a file (as a path) and uploads it to the job store.
:param string localFileName: The path to the local file to upload.
:param bool cleanup: if True then the copy of the global file will be deleted once the
job and all its successors have completed running. If not the global file must be
deleted manually.
:return: an ID that can be used to retrieve the file.
:rtype: toil.fileStore.FileID
"""
raise NotImplementedError()
def writeGlobalFileStream(self, cleanup=False):
"""
Similar to writeGlobalFile, but allows the writing of a stream to the job store.
The yielded file handle does not need to and should not be closed explicitly.
:param bool cleanup: is as in :func:`toil.fileStore.FileStore.writeGlobalFile`.
:return: A context manager yielding a tuple of
1) a file handle which can be written to and
2) the ID of the resulting file in the job store.
"""
# TODO: Make this work with FileID
return self.jobStore.writeFileStream(None if not cleanup else self.jobGraph.jobStoreID)
@abstractmethod
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
"""
Makes the file associated with fileStoreID available locally. If mutable is True,
then a copy of the file will be created locally so that the original is not modified
and does not change the file for other jobs. If mutable is False, then a link can
be created to the file, saving disk resources.
If a user path is specified, it is used as the destination. If a user path isn't
specified, the file is stored in the local temp directory with an encoded name.
:param toil.fileStore.FileID fileStoreID: job store id for the file
:param string userPath: a path to the name of file to which the global file will be copied
or hard-linked (see below).
:param bool cache: Described in :func:`toil.fileStore.CachingFileStore.readGlobalFile`
:param bool mutable: Described in :func:`toil.fileStore.CachingFileStore.readGlobalFile`
:return: An absolute path to a local, temporary copy of the file keyed by fileStoreID.
:rtype: str
"""
raise NotImplementedError()
@abstractmethod
def readGlobalFileStream(self, fileStoreID):
"""
Similar to readGlobalFile, but allows a stream to be read from the job store. The yielded
file handle does not need to and should not be closed explicitly.
:return: a context manager yielding a file handle which can be read from.
"""
raise NotImplementedError()
@abstractmethod
def deleteLocalFile(self, fileStoreID):
"""
Deletes Local copies of files associated with the provided job store ID.
:param str fileStoreID: File Store ID of the file to be deleted.
"""
raise NotImplementedError()
@abstractmethod
def deleteGlobalFile(self, fileStoreID):
"""
Deletes local files with the provided job store ID and then permanently deletes them from
the job store. To ensure that the job can be restarted if necessary, the delete will not
happen until after the job's run method has completed.
:param fileStoreID: the job store ID of the file to be deleted.
"""
raise NotImplementedError()
# Functions used to read and write files directly between a source url and the job store.
def importFile(self, srcUrl, sharedFileName=None):
return self.jobStore.importFile(srcUrl, sharedFileName=sharedFileName)
def exportFile(self, jobStoreFileID, dstUrl):
raise NotImplementedError()
# A utility method for accessing filenames
def _resolveAbsoluteLocalPath(self, filePath):
"""
Return the absolute path to filePath. This is a wrapper for os.path.abspath because mac OS
symlinks /tmp and /var (the most common places for a default tempdir) to /private/tmp and
/private/var respectively.
:param str filePath: The absolute or relative path to the file. If relative, it must be
relative to the local temp working dir
:return: Absolute path to key
:rtype: str
"""
if os.path.isabs(filePath):
return os.path.abspath(filePath)
else:
return os.path.join(self.localTempDir, filePath)
class _StateFile(object):
"""
Utility class to read and write dill-ed state dictionaries from/to a file into a namespace.
"""
def __init__(self, stateDict):
assert isinstance(stateDict, dict)
self.__dict__.update(stateDict)
@abstractclassmethod
@contextmanager
def open(cls, outer=None):
"""
This is a context manager that state file and reads it into an object that is returned
to the user in the yield.
:param outer: Instance of the calling class (to use outer methods).
"""
raise NotImplementedError()
@classmethod
def _load(cls, fileName):
"""
Load the state of the cache from the state file
:param str fileName: Path to the cache state file.
:return: An instance of the state as a namespace.
:rtype: _StateFile
"""
# Read the value from the cache state file then initialize and instance of
# _CacheState with it.
with open(fileName, 'rb') as fH:
infoDict = dill.load(fH)
return cls(infoDict)
def write(self, fileName):
"""
Write the current state into a temporary file then atomically rename it to the main
state file.
:param str fileName: Path to the state file.
"""
with open(fileName + '.tmp', 'wb') as fH:
# Based on answer by user "Mark" at:
# http://stackoverflow.com/questions/2709800/how-to-pickle-yourself
# We can't pickle nested classes. So we have to pickle the variables of the class
# If we ever change this, we need to ensure it doesn't break FileID
dill.dump(self.__dict__, fH)
os.rename(fileName + '.tmp', fileName)
# Methods related to the deferred function logic
@abstractclassmethod
def findAndHandleDeadJobs(cls, nodeInfo, batchSystemShutdown=False):
"""
This function looks at the state of all jobs registered on the node and will handle them
(clean up their presence ont he node, and run any registered defer functions)
:param nodeInfo: Information regarding the node required for identifying dead jobs.
:param bool batchSystemShutdown: Is the batch system in the process of shutting down?
"""
raise NotImplementedError()
@abstractmethod
def _registerDeferredFunction(self, deferredFunction):
"""
Register the given deferred function with this job.
:param DeferredFunction deferredFunction: the function to register
"""
raise NotImplementedError()
@staticmethod
def _runDeferredFunctions(deferredFunctions):
"""
Invoke the specified deferred functions and return a list of names of functions that
raised an exception while being invoked.
:param list[DeferredFunction] deferredFunctions: the DeferredFunctions to run
:rtype: list[str]
"""
failures = []
for deferredFunction in deferredFunctions:
try:
deferredFunction.invoke()
except:
failures.append(deferredFunction.name)
logger.exception('%s failed.', deferredFunction)
return failures
# Functions related to logging
def logToMaster(self, text, level=logging.INFO):
"""
Send a logging message to the leader. The message will also be \
logged by the worker at the same level.
:param text: The string to log.
:param int level: The logging level.
"""
logger.log(level=level, msg=("LOG-TO-MASTER: " + text))
self.loggingMessages.append(dict(text=text, level=level))
# Functions run after the completion of the job.
@abstractmethod
def _updateJobWhenDone(self):
"""
Update the status of the job on the disk.
"""
raise NotImplementedError()
@abstractmethod
def _blockFn(self):
"""
Blocks while _updateJobWhenDone is running. This function is called by this job's
successor to ensure that it does not begin modifying the job store until after this job has
finished doing so.
"""
raise NotImplementedError()
# Utility function used to identify if a pid is still running on the node.
@staticmethod
def _pidExists(pid):
"""
This will return True if the process associated with pid is still running on the machine.
This is based on stackoverflow question 568271.
:param int pid: ID of the process to check for
:return: True/False
:rtype: bool
"""
assert pid > 0
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
else:
raise
else:
return True
@abstractclassmethod
def shutdown(cls, dir_):
"""
Shutdown the filestore on this node.
This is intended to be called on batch system shutdown.
:param dir_: The jeystone directory containing the required information for fixing the state
of failed workers on the node before cleaning up.
"""
raise NotImplementedError()
class CachingFileStore(FileStore):
"""
A cache-enabled file store that attempts to use hard-links and asynchronous job store writes to
reduce I/O between, and during jobs.
"""
def __init__(self, jobStore, jobGraph, localTempDir, inputBlockFn):
super(CachingFileStore, self).__init__(jobStore, jobGraph, localTempDir, inputBlockFn)
# Variables related to asynchronous writes.
self.workerNumber = 2
self.queue = Queue()
self.updateSemaphore = Semaphore()
self.workers = [Thread(target=self.asyncWrite) for i in range(self.workerNumber)]
for worker in self.workers:
worker.start()
# Variables related to caching
# cacheDir has to be 1 levels above local worker tempdir, at the same level as the
# worker dirs. At this point, localTempDir is the worker directory, not the job
# directory.
self.localCacheDir = os.path.join(os.path.dirname(localTempDir),
cacheDirName(self.jobStore.config.workflowID))
self.cacheLockFile = os.path.join(self.localCacheDir, '.cacheLock')
self.cacheStateFile = os.path.join(self.localCacheDir, '_cacheState')
# Since each worker has it's own unique CachingFileStore instance, and only one Job can run
# at a time on a worker, we can bookkeep the job's file store operated files in a
# dictionary.
self.jobSpecificFiles = {}
self.jobName = str(self.jobGraph)
self.jobID = sha1(self.jobName.encode('utf-8')).hexdigest()
logger.info('Starting job (%s) with ID (%s).', self.jobName, self.jobID)
# A variable to describe how many hard links an unused file in the cache will have.
self.nlinkThreshold = None
self.workflowAttemptNumber = self.jobStore.config.workflowAttemptNumber
# This is a flag to better resolve cache equation imbalances at cleanup time.
self.cleanupInProgress = False
# Now that we've setup all the required variables, setup the cache directory for the
# job if required.
self._setupCache()
@contextmanager
def open(self, job):
"""
This context manager decorated method allows cache-specific operations to be conducted
before and after the execution of a job in worker.py
"""
# Create a working directory for the job
startingDir = os.getcwd()
self.localTempDir = makePublicDir(os.path.join(self.localTempDir, str(uuid.uuid4())))
# Check the status of all jobs on this node. If there are jobs that started and died before
# cleaning up their presence from the cache state file, restore the cache file to a state
# where the jobs don't exist.
with self._CacheState.open(self) as cacheInfo:
self.findAndHandleDeadJobs(cacheInfo)
# While we have a lock on the cache file, run a naive check to see if jobs on this node
# have greatly gone over their requested limits.
if cacheInfo.sigmaJob < 0:
logger.warning('Detecting that one or more jobs on this node have used more '
'resources than requested. Turn on debug logs to see more'
'information on cache usage.')
# Get the requirements for the job and clean the cache if necessary. cleanCache will
# ensure that the requirements for this job are stored in the state file.
jobReqs = job.disk
# Cleanup the cache to free up enough space for this job (if needed)
self.cleanCache(jobReqs)
try:
os.chdir(self.localTempDir)
yield
finally:
diskUsed = getDirSizeRecursively(self.localTempDir)
logString = ("Job {jobName} used {percent:.2f}% ({humanDisk}B [{disk}B] used, "
"{humanRequestedDisk}B [{requestedDisk}B] requested) at the end of "
"its run.".format(jobName=self.jobName,
percent=(float(diskUsed) / jobReqs * 100 if
jobReqs > 0 else 0.0),
humanDisk=bytes2human(diskUsed),
disk=diskUsed,
humanRequestedDisk=bytes2human(jobReqs),
requestedDisk=jobReqs))
self.logToMaster(logString, level=logging.DEBUG)
if diskUsed > jobReqs:
self.logToMaster("Job used more disk than requested. Please reconsider modifying "
"the user script to avoid the chance of failure due to "
"incorrectly requested resources. " + logString,
level=logging.WARNING)
os.chdir(startingDir)
self.cleanupInProgress = True
# Delete all the job specific files and return sizes to jobReqs
self.returnJobReqs(jobReqs)
with self._CacheState.open(self) as cacheInfo:
# Carry out any user-defined cleanup actions
deferredFunctions = cacheInfo.jobState[self.jobID]['deferredFunctions']
failures = self._runDeferredFunctions(deferredFunctions)
for failure in failures:
self.logToMaster('Deferred function "%s" failed.' % failure, logging.WARN)
# Finally delete the job from the cache state file
cacheInfo.jobState.pop(self.jobID)
# Functions related to reading, writing and removing files to/from the job store
def writeGlobalFile(self, localFileName, cleanup=False):
"""
Takes a file (as a path) and uploads it to the job store. Depending on the jobstore
used, carry out the appropriate cache functions.
"""
absLocalFileName = self._resolveAbsoluteLocalPath(localFileName)
# What does this do?
cleanupID = None if not cleanup else self.jobGraph.jobStoreID
# If the file is from the scope of local temp dir
if absLocalFileName.startswith(self.localTempDir):
# If the job store is of type FileJobStore and the job store and the local temp dir
# are on the same file system, then we want to hard link the files istead of copying
# barring the case where the file being written was one that was previously read
# from the file store. In that case, you want to copy to the file store so that
# the two have distinct nlink counts.
# Can read without a lock because we're only reading job-specific info.
jobSpecificFiles = list(self._CacheState._load(self.cacheStateFile).jobState[
self.jobID]['filesToFSIDs'].keys())
# Saying nlink is 2 implicitly means we are using the job file store, and it is on
# the same device as the work dir.
if self.nlinkThreshold == 2 and absLocalFileName not in jobSpecificFiles:
jobStoreFileID = self.jobStore.getEmptyFileStoreID(cleanupID)
# getEmptyFileStoreID creates the file in the scope of the job store hence we
# need to delete it before linking.
os.remove(self.jobStore._getAbsPath(jobStoreFileID))
os.link(absLocalFileName, self.jobStore._getAbsPath(jobStoreFileID))
# If they're not on the file system, or if the file is already linked with an
# existing file, we need to copy to the job store.
# Check if the user allows asynchronous file writes
elif self.jobStore.config.useAsync:
jobStoreFileID = self.jobStore.getEmptyFileStoreID(cleanupID)
# Before we can start the async process, we should also create a dummy harbinger
# file in the cache such that any subsequent jobs asking for this file will not
# attempt to download it from the job store till the write is complete. We do
# this now instead of in the writing thread because there is an edge case where
# readGlobalFile in a subsequent job is called before the writing thread has
# received the message to write the file and has created the dummy harbinger
# (and the file was unable to be cached/was evicted from the cache).
harbingerFile = self.HarbingerFile(self, fileStoreID=jobStoreFileID)
harbingerFile.write()
fileHandle = open(absLocalFileName, 'rb')
with self._pendingFileWritesLock:
self._pendingFileWrites.add(jobStoreFileID)
# A file handle added to the queue allows the asyncWrite threads to remove their
# jobID from _pendingFileWrites. Therefore, a file should only be added after
# its fileID is added to _pendingFileWrites
self.queue.put((fileHandle, jobStoreFileID))
# Else write directly to the job store.
else:
jobStoreFileID = self.jobStore.writeFile(absLocalFileName, cleanupID)
# Local files are cached by default, unless they were written from previously read
# files.
if absLocalFileName not in jobSpecificFiles:
self.addToCache(absLocalFileName, jobStoreFileID, 'write')
else:
self._JobState.updateJobSpecificFiles(self, jobStoreFileID, absLocalFileName,
0.0, False)
# Else write directly to the job store.
else:
jobStoreFileID = self.jobStore.writeFile(absLocalFileName, cleanupID)
# Non local files are NOT cached by default, but they are tracked as local files.
self._JobState.updateJobSpecificFiles(self, jobStoreFileID, None,
0.0, False)
return FileID.forPath(jobStoreFileID, absLocalFileName)
def writeGlobalFileStream(self, cleanup=False):
# TODO: Make this work with caching
return super(CachingFileStore, self).writeGlobalFileStream(cleanup)
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
"""
Downloads a file described by fileStoreID from the file store to the local directory.
The function first looks for the file in the cache and if found, it hardlinks to the
cached copy instead of downloading.
The cache parameter will be used only if the file isn't already in the cache, and
provided user path (if specified) is in the scope of local temp dir.
:param bool cache: If True, a copy of the file will be saved into a cache that can be
used by other workers. caching supports multiple concurrent workers requesting the
same file by allowing only one to download the file while the others wait for it to
complete.
:param bool mutable: If True, the file path returned points to a file that is
modifiable by the user. Using False is recommended as it saves disk by making
multiple workers share a file via hard links. The default is False.
"""
# Check that the file hasn't been deleted by the user
if fileStoreID in self.filesToDelete:
raise RuntimeError('Trying to access a file in the jobStore you\'ve deleted: ' + \
'%s' % fileStoreID)
# Get the name of the file as it would be in the cache
cachedFileName = self.encodedFileID(fileStoreID)
# setup the harbinger variable for the file. This is an identifier that the file is
# currently being downloaded by another job and will be in the cache shortly. It is used
# to prevent multiple jobs from simultaneously downloading the same file from the file
# store.
harbingerFile = self.HarbingerFile(self, cachedFileName=cachedFileName)
# setup the output filename. If a name is provided, use it - This makes it a Named
# Local File. If a name isn't provided, use the base64 encoded name such that we can
# easily identify the files later on.
if userPath is not None:
localFilePath = self._resolveAbsoluteLocalPath(userPath)
if os.path.exists(localFilePath):
# yes, this is illegal now.
raise RuntimeError(' File %s ' % localFilePath + ' exists. Cannot Overwrite.')
fileIsLocal = True if localFilePath.startswith(self.localTempDir) else False
else:
localFilePath = self.getLocalTempFileName()
fileIsLocal = True
# First check whether the file is in cache. If it is, then hardlink the file to
# userPath. Cache operations can only occur on local files.
with self.cacheLock() as lockFileHandle:
if fileIsLocal and self._fileIsCached(fileStoreID):
logger.debug('CACHE: Cache hit on file with ID \'%s\'.' % fileStoreID)
assert not os.path.exists(localFilePath)
if mutable:
shutil.copyfile(cachedFileName, localFilePath)
cacheInfo = self._CacheState._load(self.cacheStateFile)
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.addToJobSpecFiles(fileStoreID, localFilePath, -1, None)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
else:
os.link(cachedFileName, localFilePath)
self.returnFileSize(fileStoreID, localFilePath, lockFileHandle,
fileAlreadyCached=True)
# If the file is not in cache, check whether the .harbinger file for the given
# FileStoreID exists. If it does, the wait and periodically check for the removal
# of the file and the addition of the completed download into cache of the file by
# the other job. Then we link to it.
elif fileIsLocal and harbingerFile.exists():
harbingerFile.waitOnDownload(lockFileHandle)
# If the code reaches here, the harbinger file has been removed. This means
# either the file was successfully downloaded and added to cache, or something
# failed. To prevent code duplication, we recursively call readGlobalFile.
flock(lockFileHandle, LOCK_UN)
return self.readGlobalFile(fileStoreID, userPath=userPath, cache=cache,
mutable=mutable)
# If the file is not in cache, then download it to the userPath and then add to
# cache if specified.
else:
logger.debug('CACHE: Cache miss on file with ID \'%s\'.' % fileStoreID)
if fileIsLocal and cache:
# If caching of the downloaded file is desired, First create the harbinger
# file so other jobs know not to redundantly download the same file. Write
# the PID of this process into the file so other jobs know who is carrying
# out the download.
harbingerFile.write()
# Now release the file lock while the file is downloaded as download could
# take a while.
flock(lockFileHandle, LOCK_UN)
# Use try:finally: so that the .harbinger file is removed whether the
# download succeeds or not.
try:
self.jobStore.readFile(fileStoreID,
'/.'.join(os.path.split(cachedFileName)))
except:
if os.path.exists('/.'.join(os.path.split(cachedFileName))):
os.remove('/.'.join(os.path.split(cachedFileName)))
raise
else:
# If the download succeded, officially add the file to cache (by
# recording it in the cache lock file) if possible.
if os.path.exists('/.'.join(os.path.split(cachedFileName))):
os.rename('/.'.join(os.path.split(cachedFileName)), cachedFileName)
self.addToCache(localFilePath, fileStoreID, 'read', mutable)
# We don't need to return the file size here because addToCache
# already does it for us
finally:
# In any case, delete the harbinger file.
harbingerFile.delete()
else:
# Release the cache lock since the remaining stuff is not cache related.
flock(lockFileHandle, LOCK_UN)
self.jobStore.readFile(fileStoreID, localFilePath)
os.chmod(localFilePath, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# Now that we have the file, we have 2 options. It's modifiable or not.
# Either way, we need to account for FileJobStore making links instead of
# copies.
if mutable:
if self.nlinkThreshold == 2:
# nlinkThreshold can only be 1 or 2 and it can only be 2 iff the
# job store is FilejobStore, and the job store and local temp dir
# are on the same device. An atomic rename removes the nlink on the
# file handle linked from the job store.
shutil.copyfile(localFilePath, localFilePath + '.tmp')
os.rename(localFilePath + '.tmp', localFilePath)
self._JobState.updateJobSpecificFiles(self, fileStoreID, localFilePath,
-1, False)
# If it was immutable
else:
if self.nlinkThreshold == 2:
self._accountForNlinkEquals2(localFilePath)
self._JobState.updateJobSpecificFiles(self, fileStoreID, localFilePath,
0.0, False)
return localFilePath
def exportFile(self, jobStoreFileID, dstUrl):
while jobStoreFileID in self._pendingFileWrites:
# The file is still being writting to the job store - wait for this process to finish prior to
# exporting it
time.sleep(1)
self.jobStore.exportFile(jobStoreFileID, dstUrl)
def readGlobalFileStream(self, fileStoreID):
if fileStoreID in self.filesToDelete:
raise RuntimeError(
"Trying to access a file in the jobStore you've deleted: %s" % fileStoreID)
# If fileStoreID is in the cache provide a handle from the local cache
if self._fileIsCached(fileStoreID):
logger.debug('CACHE: Cache hit on file with ID \'%s\'.' % fileStoreID)
return open(self.encodedFileID(fileStoreID), 'rb')
else:
logger.debug('CACHE: Cache miss on file with ID \'%s\'.' % fileStoreID)
return self.jobStore.readFileStream(fileStoreID)
def deleteLocalFile(self, fileStoreID):
# The local file may or may not have been cached. If it was, we need to do some
# bookkeeping. If it wasn't, we just delete the file and continue with no might need
# some bookkeeping if the file store and cache live on the same filesystem. We can know
# if a file was cached or not based on the value held in the third tuple value for the
# dict item having key = fileStoreID. If it was cached, it holds the value True else
# False.
with self._CacheState.open(self) as cacheInfo:
jobState = self._JobState(cacheInfo.jobState[self.jobID])
if fileStoreID not in list(jobState.jobSpecificFiles.keys()):
# EOENT indicates that the file did not exist
raise OSError(errno.ENOENT, "Attempting to delete a non-local file")
# filesToDelete is a dictionary of file: fileSize
filesToDelete = jobState.jobSpecificFiles[fileStoreID]
allOwnedFiles = jobState.filesToFSIDs
for (fileToDelete, fileSize) in list(filesToDelete.items()):
# Handle the case where a file not in the local temp dir was written to
# filestore
if fileToDelete is None:
filesToDelete.pop(fileToDelete)
allOwnedFiles[fileToDelete].remove(fileStoreID)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
continue
# If the file size is zero (copied into the local temp dir) or -1 (mutable), we
# can safely delete without any bookkeeping
if fileSize in (0, -1):
# Only remove the file if there is only one FSID associated with it.
if len(allOwnedFiles[fileToDelete]) == 1:
try:
os.remove(fileToDelete)
except OSError as err:
if err.errno == errno.ENOENT and fileSize == -1:
logger.debug('%s was read mutably and deleted by the user',
fileToDelete)
else:
raise IllegalDeletionCacheError(fileToDelete)
allOwnedFiles[fileToDelete].remove(fileStoreID)
filesToDelete.pop(fileToDelete)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
continue
# If not, we need to do bookkeeping
# Get the size of the file to be deleted, and the number of jobs using the file
# at the moment.
if not os.path.exists(fileToDelete):
raise IllegalDeletionCacheError(fileToDelete)
fileStats = os.stat(fileToDelete)
if fileSize != fileStats.st_size:
logger.warn("the size on record differed from the real size by " +
"%s bytes" % str(fileSize - fileStats.st_size))
# Remove the file and return file size to the job
if len(allOwnedFiles[fileToDelete]) == 1:
os.remove(fileToDelete)
cacheInfo.sigmaJob += fileSize
filesToDelete.pop(fileToDelete)
allOwnedFiles[fileToDelete].remove(fileStoreID)
jobState.updateJobReqs(fileSize, 'remove')
cacheInfo.jobState[self.jobID] = jobState.__dict__
# If the job is not in the process of cleaning up, then we may need to remove the
# cached copy of the file as well.
if not self.cleanupInProgress:
# If the file is cached and if other jobs are using the cached copy of the file,
# or if retaining the file in the cache doesn't affect the cache equation, then
# don't remove it from cache.
if self._fileIsCached(fileStoreID):
cachedFile = self.encodedFileID(fileStoreID)
jobsUsingFile = os.stat(cachedFile).st_nlink
if not cacheInfo.isBalanced() and jobsUsingFile == self.nlinkThreshold:
os.remove(cachedFile)
cacheInfo.cached -= fileSize
self.logToMaster('Successfully deleted cached copy of file with ID '
'\'%s\'.' % fileStoreID, level=logging.DEBUG)
self.logToMaster('Successfully deleted local copies of file with ID '
'\'%s\'.' % fileStoreID, level=logging.DEBUG)
def deleteGlobalFile(self, fileStoreID):
jobStateIsPopulated = False
with self._CacheState.open(self) as cacheInfo:
if self.jobID in cacheInfo.jobState:
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobStateIsPopulated = True
if jobStateIsPopulated and fileStoreID in list(jobState.jobSpecificFiles.keys()):
# Use deleteLocalFile in the backend to delete the local copy of the file.
self.deleteLocalFile(fileStoreID)
# At this point, the local file has been deleted, and possibly the cached copy. If
# the cached copy exists, it is either because another job is using the file, or
# because retaining the file in cache doesn't unbalance the caching equation. The
# first case is unacceptable for deleteGlobalFile and the second requires explicit
# deletion of the cached copy.
# Check if the fileStoreID is in the cache. If it is, ensure only the current job is
# using it.
cachedFile = self.encodedFileID(fileStoreID)
if os.path.exists(cachedFile):
self.removeSingleCachedFile(fileStoreID)
# Add the file to the list of files to be deleted once the run method completes.
self.filesToDelete.add(fileStoreID)
self.logToMaster('Added file with ID \'%s\' to the list of files to be' % fileStoreID +
' globally deleted.', level=logging.DEBUG)
# Cache related methods
@contextmanager
def cacheLock(self):
"""
This is a context manager to acquire a lock on the Lock file that will be used to
prevent synchronous cache operations between workers.
:yields: File descriptor for cache lock file in w mode
"""
cacheLockFile = open(self.cacheLockFile, 'w')
try:
flock(cacheLockFile, LOCK_EX)
logger.debug("CACHE: Obtained lock on file %s" % self.cacheLockFile)
yield cacheLockFile
except IOError:
logger.critical('CACHE: Unable to acquire lock on %s' % self.cacheLockFile)
raise
finally:
cacheLockFile.close()
logger.debug("CACHE: Released lock")
def _setupCache(self):
"""
Setup the cache based on the provided values for localCacheDir.
"""
# we first check whether the cache directory exists. If it doesn't, create it.
if not os.path.exists(self.localCacheDir):
# Create a temporary directory as this worker's private cache. If all goes well, it
# will be renamed into the cache for this node.
personalCacheDir = ''.join([os.path.dirname(self.localCacheDir), '/.ctmp-',
str(uuid.uuid4())])
os.mkdir(personalCacheDir, 0o755)
self._createCacheLockFile(personalCacheDir)
try:
os.rename(personalCacheDir, self.localCacheDir)
except OSError as err:
# The only acceptable FAIL case is that the destination is a non-empty directory
# directory. Assuming (it's ambiguous) atomic renaming of directories, if the
# dst is non-empty, it only means that another worker has beaten this one to the
# rename.
if err.errno == errno.ENOTEMPTY:
# Cleanup your own mess. It's only polite.
shutil.rmtree(personalCacheDir)
else:
raise
# You can't reach here unless a local cache directory has been created successfully
with self._CacheState.open(self) as cacheInfo:
# Ensure this cache is from the correct attempt at the workflow! If it isn't, we
# need to reset the cache lock file
if cacheInfo.attemptNumber != self.workflowAttemptNumber:
if cacheInfo.nlink == 2:
cacheInfo.cached = 0 # cached file sizes are accounted for by job store
else:
allCachedFiles = [os.path.join(self.localCacheDir, x)
for x in os.listdir(self.localCacheDir)
if not self._isHidden(x)]
cacheInfo.cached = sum([os.stat(cachedFile).st_size
for cachedFile in allCachedFiles])
# TODO: Delete the working directories
cacheInfo.sigmaJob = 0
cacheInfo.attemptNumber = self.workflowAttemptNumber
self.nlinkThreshold = cacheInfo.nlink
def _createCacheLockFile(self, tempCacheDir):
"""
Create the cache lock file file to contain the state of the cache on the node.
:param str tempCacheDir: Temporary directory to use for setting up a cache lock file the
first time.
"""
# The nlink threshold is setup along with the first instance of the cache class on the
# node.
self.setNlinkThreshold()
# Get the free space on the device
freeSpace, _ = getFileSystemSize(tempCacheDir)
# Create the cache lock file.
open(os.path.join(tempCacheDir, os.path.basename(self.cacheLockFile)), 'w').close()
# Setup the cache state file
personalCacheStateFile = os.path.join(tempCacheDir,
os.path.basename(self.cacheStateFile))
# Setup the initial values for the cache state file in a dict
cacheInfo = self._CacheState({
'nlink': self.nlinkThreshold,
'attemptNumber': self.workflowAttemptNumber,
'total': freeSpace,
'cached': 0,
'sigmaJob': 0,
'cacheDir': self.localCacheDir,
'jobState': {}})
cacheInfo.write(personalCacheStateFile)
def encodedFileID(self, jobStoreFileID):
"""
Uses a url safe base64 encoding to encode the jobStoreFileID into a unique identifier to
use as filename within the cache folder. jobstore IDs are essentially urls/paths to
files and thus cannot be used as is. Base64 encoding is used since it is reversible.
:param jobStoreFileID: string representing a job store file ID
:return: outCachedFile: A path to the hashed file in localCacheDir
:rtype: str
"""
base64Text = base64.urlsafe_b64encode(jobStoreFileID.encode('utf-8')).decode('utf-8')
outCachedFile = os.path.join(self.localCacheDir, base64Text)
return outCachedFile
def _fileIsCached(self, jobStoreFileID):
"""
Is the file identified by jobStoreFileID in cache or not.
"""
return os.path.exists(self.encodedFileID(jobStoreFileID))
def decodedFileID(self, cachedFilePath):
"""
Decode a cached fileName back to a job store file ID.
:param str cachedFilePath: Path to the cached file
:return: The jobstore file ID associated with the file
:rtype: str
"""
fileDir, fileName = os.path.split(cachedFilePath)
assert fileDir == self.localCacheDir, 'Can\'t decode uncached file names'
# We encode and decode here because base64 can't work with unencoded text
# Its probably worth, later, converting all file name variables to bytes
# and not text.
return base64.urlsafe_b64decode(fileName.encode('utf-8')).decode('utf-8')
def addToCache(self, localFilePath, jobStoreFileID, callingFunc, mutable=False):
"""
Used to process the caching of a file. This depends on whether a file is being written
to file store, or read from it.
WRITING
The file is in localTempDir. It needs to be linked into cache if possible.
READING
The file is already in the cache dir. Depending on whether it is modifiable or not, does
it need to be linked to the required location, or copied. If it is copied, can the file
still be retained in cache?
:param str localFilePath: Path to the Source file
:param jobStoreFileID: jobStoreID for the file
:param str callingFunc: Who called this function, 'write' or 'read'
:param bool mutable: See modifiable in readGlobalFile
"""
assert callingFunc in ('read', 'write')
with self.cacheLock() as lockFileHandle:
cachedFile = self.encodedFileID(jobStoreFileID)
# The file to be cached MUST originate in the environment of the TOIL temp directory
if (os.stat(self.localCacheDir).st_dev !=
os.stat(os.path.dirname(localFilePath)).st_dev):
raise InvalidSourceCacheError('Attempting to cache a file across file systems '
'cachedir = %s, file = %s.' % (self.localCacheDir,
localFilePath))
if not localFilePath.startswith(self.localTempDir):
raise InvalidSourceCacheError('Attempting a cache operation on a non-local file '
'%s.' % localFilePath)
if callingFunc == 'read' and mutable:
shutil.copyfile(cachedFile, localFilePath)
fileSize = os.stat(cachedFile).st_size
cacheInfo = self._CacheState._load(self.cacheStateFile)
cacheInfo.cached += fileSize if cacheInfo.nlink != 2 else 0
if not cacheInfo.isBalanced():
os.remove(cachedFile)
cacheInfo.cached -= fileSize if cacheInfo.nlink != 2 else 0
logger.debug('Could not download both download ' +
'%s as mutable and add to ' % os.path.basename(localFilePath) +
'cache. Hence only mutable copy retained.')
else:
logger.info('CACHE: Added file with ID \'%s\' to the cache.' %
jobStoreFileID)
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.addToJobSpecFiles(jobStoreFileID, localFilePath, -1, False)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
else:
# There are two possibilities, read and immutable, and write. both cases do
# almost the same thing except for the direction of the os.link hence we're
# writing them together.
if callingFunc == 'read': # and mutable is inherently False
src = cachedFile
dest = localFilePath
# To mirror behaviour of shutil.copyfile
if os.path.exists(dest):
os.remove(dest)
else: # write
src = localFilePath
dest = cachedFile
try:
os.link(src, dest)
except OSError as err:
if err.errno != errno.EEXIST:
raise
# If we get the EEXIST error, it can only be from write since in read we are
# explicitly deleting the file. This shouldn't happen with the .partial
# logic hence we raise a cache error.
raise CacheError('Attempting to recache a file %s.' % src)
else:
# Chmod the cached file. Cached files can never be modified.
os.chmod(cachedFile, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# Return the filesize of cachedFile to the job and increase the cached size
# The values passed here don't matter since rFS looks at the file only for
# the stat
self.returnFileSize(jobStoreFileID, localFilePath, lockFileHandle,
fileAlreadyCached=False)
if callingFunc == 'read':
logger.debug('CACHE: Read file with ID \'%s\' from the cache.' %
jobStoreFileID)
else:
logger.debug('CACHE: Added file with ID \'%s\' to the cache.' %
jobStoreFileID)
def returnFileSize(self, fileStoreID, cachedFileSource, lockFileHandle,
fileAlreadyCached=False):
"""
Returns the fileSize of the file described by fileStoreID to the job requirements pool
if the file was recently added to, or read from cache (A job that reads n bytes from
cache doesn't really use those n bytes as a part of it's job disk since cache is already
accounting for that disk space).
:param fileStoreID: fileStore ID of the file bein added to cache
:param str cachedFileSource: File being added to cache
:param file lockFileHandle: Open file handle to the cache lock file
:param bool fileAlreadyCached: A flag to indicate whether the file was already cached or
not. If it was, then it means that you don't need to add the filesize to cache again.
"""
fileSize = os.stat(cachedFileSource).st_size
cacheInfo = self._CacheState._load(self.cacheStateFile)
# If the file isn't cached, add the size of the file to the cache pool. However, if the
# nlink threshold is not 1 - i.e. it is 2 (it can only be 1 or 2), then don't do this
# since the size of the file is accounted for by the file store copy.
if not fileAlreadyCached and self.nlinkThreshold == 1:
cacheInfo.cached += fileSize
cacheInfo.sigmaJob -= fileSize
if not cacheInfo.isBalanced():
self.logToMaster('CACHE: The cache was not balanced on returning file size',
logging.WARN)
# Add the info to the job specific cache info
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.addToJobSpecFiles(fileStoreID, cachedFileSource, fileSize, True)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
@staticmethod
def _isHidden(filePath):
"""
This is a function that checks whether filePath is hidden
:param str filePath: Path to the file under consideration
:return: A boolean indicating whether the file is hidden or not.
:rtype: bool
"""
assert isinstance(filePath, (str, bytes))
# I can safely assume i will never see an empty string because this is always called on
# the results of an os.listdir()
return filePath[0] in ('.', '_')
def cleanCache(self, newJobReqs):
"""
Cleanup all files in the cache directory to ensure that at lead newJobReqs are available
for use.
:param float newJobReqs: the total number of bytes of files allowed in the cache.
"""
with self._CacheState.open(self) as cacheInfo:
# Add the new job's disk requirements to the sigmaJobDisk variable
cacheInfo.sigmaJob += newJobReqs
# Initialize the job state here. we use a partial in the jobSpecificFiles call so
# that this entire thing is pickleable. Based on answer by user Nathaniel Gentile at
# http://stackoverflow.com/questions/2600790
assert self.jobID not in cacheInfo.jobState
cacheInfo.jobState[self.jobID] = {
'jobName': self.jobName,
'jobReqs': newJobReqs,
'jobDir': self.localTempDir,
'jobSpecificFiles': defaultdict(partial(defaultdict,int)),
'filesToFSIDs': defaultdict(set),
'pid': os.getpid(),
'deferredFunctions': []}
# If the caching equation is balanced, do nothing.
if cacheInfo.isBalanced():
return None
# List of deletable cached files. A deletable cache file is one
# that is not in use by any other worker (identified by the number of symlinks to
# the file)
allCacheFiles = [os.path.join(self.localCacheDir, x)
for x in os.listdir(self.localCacheDir)
if not self._isHidden(x)]
allCacheFiles = [(path, os.stat(path)) for path in allCacheFiles]
# TODO mtime vs ctime
deletableCacheFiles = {(path, inode.st_mtime, inode.st_size)
for path, inode in allCacheFiles
if inode.st_nlink == self.nlinkThreshold}
# Sort in descending order of mtime so the first items to be popped from the list
# are the least recently created.
deletableCacheFiles = sorted(deletableCacheFiles, key=lambda x: (-x[1], -x[2]))
logger.debug('CACHE: Need %s bytes for new job. Detecting an estimated %s (out of a '
'total %s) bytes available for running the new job. The size of the cache '
'is %s bytes.', newJobReqs,
(cacheInfo.total - (cacheInfo.cached + cacheInfo.sigmaJob - newJobReqs)),
cacheInfo.total, cacheInfo.cached)
logger.debug('CACHE: Evicting files to make room for the new job.')
# Now do the actual file removal
totalEvicted = 0
while not cacheInfo.isBalanced() and len(deletableCacheFiles) > 0:
cachedFile, fileCreateTime, cachedFileSize = deletableCacheFiles.pop()
os.remove(cachedFile)
cacheInfo.cached -= cachedFileSize if self.nlinkThreshold != 2 else 0
totalEvicted += cachedFileSize
assert cacheInfo.cached >= 0
logger.debug('CACHE: Evicted file with ID \'%s\' (%s bytes)' %
(self.decodedFileID(cachedFile), cachedFileSize))
logger.debug('CACHE: Evicted a total of %s bytes. Available space is now %s bytes.',
totalEvicted,
(cacheInfo.total - (cacheInfo.cached + cacheInfo.sigmaJob - newJobReqs)))
if not cacheInfo.isBalanced():
raise CacheUnbalancedError()
def removeSingleCachedFile(self, fileStoreID):
"""
Removes a single file described by the fileStoreID from the cache forcibly.
"""
with self._CacheState.open(self) as cacheInfo:
cachedFile = self.encodedFileID(fileStoreID)
cachedFileStats = os.stat(cachedFile)
# We know the file exists because this function was called in the if block. So we
# have to ensure nothing has changed since then.
assert cachedFileStats.st_nlink == self.nlinkThreshold, 'Attempting to delete ' + \
'a global file that is in use by another job.'
# Remove the file size from the cached file size if the jobstore is not fileJobStore
# and then delete the file
os.remove(cachedFile)
if self.nlinkThreshold != 2:
cacheInfo.cached -= cachedFileStats.st_size
if not cacheInfo.isBalanced():
self.logToMaster('CACHE: The cache was not balanced on removing single file',
logging.WARN)
self.logToMaster('CACHE: Successfully removed file with ID \'%s\'.' % fileStoreID)
return None
def setNlinkThreshold(self):
# FIXME Can't do this at the top because of loopy (circular) import errors
from toil.jobStores.fileJobStore import FileJobStore
if (isinstance(self.jobStore, FileJobStore) and
os.stat(os.path.dirname(self.localCacheDir)).st_dev == os.stat(
self.jobStore.jobStoreDir).st_dev):
self.nlinkThreshold = 2
else:
self.nlinkThreshold = 1
def _accountForNlinkEquals2(self, localFilePath):
"""
This is a utility function that accounts for the fact that if nlinkThreshold == 2, the
size of the file is accounted for by the file store copy of the file and thus the file
size shouldn't be added to the cached file sizes.
:param str localFilePath: Path to the local file that was linked to the file store copy.
"""
fileStats = os.stat(localFilePath)
assert fileStats.st_nlink >= self.nlinkThreshold
with self._CacheState.open(self) as cacheInfo:
cacheInfo.sigmaJob -= fileStats.st_size
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.updateJobReqs(fileStats.st_size, 'remove')
def returnJobReqs(self, jobReqs):
"""
This function returns the effective job requirements back to the pool after the job
completes. It also deletes the local copies of files with the cache lock held.
:param float jobReqs: Original size requirement of the job
"""
# Since we are only reading this job's specific values from the state file, we don't
# need a lock
jobState = self._JobState(self._CacheState._load(self.cacheStateFile
).jobState[self.jobID])
for x in list(jobState.jobSpecificFiles.keys()):
self.deleteLocalFile(x)
with self._CacheState.open(self) as cacheInfo:
cacheInfo.sigmaJob -= jobReqs
# assert cacheInfo.isBalanced() # commenting this out for now. God speed
class _CacheState(FileStore._StateFile):
"""
Utility class to read and write the cache lock file. Also for checking whether the
caching equation is balanced or not. It extends the _StateFile class to add other cache
related functions.
"""
@classmethod
@contextmanager
def open(cls, outer=None):
"""
This is a context manager that opens the cache state file and reads it into an object
that is returned to the user in the yield
"""
assert outer is not None
with outer.cacheLock():
cacheInfo = cls._load(outer.cacheStateFile)
yield cacheInfo
cacheInfo.write(outer.cacheStateFile)
def isBalanced(self):
"""
Checks for the inequality of the caching equation, i.e.
cachedSpace + sigmaJobDisk <= totalFreeSpace
Essentially, the sum of all cached file + disk requirements of all running jobs
should always be less than the available space on the system
:return: Boolean for equation is balanced (T) or not (F)
:rtype: bool
"""
return self.cached + self.sigmaJob <= self.total
def purgeRequired(self, jobReqs):
"""
Similar to isBalanced, however it looks at the actual state of the system and
decides whether an eviction is required.
:return: Is a purge required(T) or no(F)
:rtype: bool
"""
return not self.isBalanced()
# totalStats = os.statvfs(self.cacheDir)
# totalFree = totalStats.f_bavail * totalStats.f_frsize
# return totalFree < jobReqs
# Methods related to the deferred function logic
@classmethod
def findAndHandleDeadJobs(cls, nodeInfo, batchSystemShutdown=False):
"""
:param toil.fileStore.CachingFileStore._CacheState nodeInfo: The state of the node cache as
a _CacheState object
"""
# A list of tuples of (hashed job id, pid or process running job)
registeredJobs = [(jid, state['pid']) for jid, state in list(nodeInfo.jobState.items())]
for jobID, jobPID in registeredJobs:
if not cls._pidExists(jobPID):
jobState = CachingFileStore._JobState(nodeInfo.jobState[jobID])
logger.warning('Detected that job (%s) prematurely terminated. Fixing the state '
'of the cache.', jobState.jobName)
if not batchSystemShutdown:
logger.debug("Returning dead job's used disk to cache.")
# Delete the old work directory if it still exists, to remove unwanted nlinks.
# Do this only during the life of the program and dont' do it during the
# batch system cleanup. Leave that to the batch system cleanup code.
if os.path.exists(jobState.jobDir):
shutil.rmtree(jobState.jobDir)
nodeInfo.sigmaJob -= jobState.jobReqs
logger.debug('Running user-defined deferred functions.')
cls._runDeferredFunctions(jobState.deferredFunctions)
# Remove job from the cache state file
nodeInfo.jobState.pop(jobID)
def _registerDeferredFunction(self, deferredFunction):
with self._CacheState.open(self) as cacheInfo:
cacheInfo.jobState[self.jobID]['deferredFunctions'].append(deferredFunction)
logger.debug('Registered "%s" with job "%s".', deferredFunction, self.jobName)
class _JobState(object):
"""
This is a utility class to handle the state of a job in terms of it's current disk
requirements, working directory, and job specific files.
"""
def __init__(self, dictObj):
assert isinstance(dictObj, dict)
self.__dict__.update(dictObj)
@classmethod
def updateJobSpecificFiles(cls, outer, jobStoreFileID, filePath, fileSize, cached):
"""
This method will update the job specifc files in the job state object. It deals with
opening a cache lock file, etc.
:param toil.fileStore.CachingFileStore outer: An instance of CachingFileStore
:param str jobStoreFileID: job store Identifier for the file
:param str filePath: The path to the file
:param float fileSize: The size of the file (may be deprecated soon)
:param bool cached: T : F : None :: cached : not cached : mutably read
"""
with outer._CacheState.open(outer) as cacheInfo:
jobState = cls(cacheInfo.jobState[outer.jobID])
jobState.addToJobSpecFiles(jobStoreFileID, filePath, fileSize, cached)
cacheInfo.jobState[outer.jobID] = jobState.__dict__
def addToJobSpecFiles(self, jobStoreFileID, filePath, fileSize, cached):
"""
This is the real method that actually does the updations.
:param jobStoreFileID: job store Identifier for the file
:param filePath: The path to the file
:param fileSize: The size of the file (may be deprecated soon)
:param cached: T : F : None :: cached : not cached : mutably read
"""
# If there is no entry for the jsfID, make one. self.jobSpecificFiles is a default
# dict of default dicts and the absence of a key will return an empty dict
# (equivalent to a None for the if)
if not self.jobSpecificFiles[jobStoreFileID]:
self.jobSpecificFiles[jobStoreFileID][filePath] = fileSize
else:
# If there's no entry for the filepath, create one
if not self.jobSpecificFiles[jobStoreFileID][filePath]:
self.jobSpecificFiles[jobStoreFileID][filePath] = fileSize
# This should never happen
else:
raise RuntimeError()
# Now add the file to the reverse mapper. This will speed up cleanup and local file
# deletion.
self.filesToFSIDs[filePath].add(jobStoreFileID)
if cached:
self.updateJobReqs(fileSize, 'add')
def updateJobReqs(self, fileSize, actions):
"""
This method will update the current state of the disk required by the job after the
most recent cache operation.
:param fileSize: Size of the last file added/removed from the cache
:param actions: 'add' or 'remove'
"""
assert actions in ('add', 'remove')
multiplier = 1 if actions == 'add' else -1
# If the file was added to the cache, the value is subtracted from the requirements,
# and it is added if the file was removed form the cache.
self.jobReqs -= (fileSize * multiplier)
def isPopulated(self):
return self.__dict__ != {}
class HarbingerFile(object):
"""
Represents the placeholder file that harbinges the arrival of a local copy of a file in
the job store.
"""
def __init__(self, fileStore, fileStoreID=None, cachedFileName=None):
"""
Returns the harbinger file name for a cached file, or for a job store ID
:param class fileStore: The 'self' object of the fileStore class
:param str fileStoreID: The file store ID for an input file
:param str cachedFileName: The cache file name corresponding to a given file
"""
# We need either a file store ID, or a cached file name, but not both (XOR).
assert (fileStoreID is None) != (cachedFileName is None)
if fileStoreID is not None:
self.fileStoreID = fileStoreID
cachedFileName = fileStore.encodedFileID(fileStoreID)
else:
self.fileStoreID = fileStore.decodedFileID(cachedFileName)
self.fileStore = fileStore
self.harbingerFileName = '/.'.join(os.path.split(cachedFileName)) + '.harbinger'
def write(self):
self.fileStore.logToMaster('CACHE: Creating a harbinger file for (%s). '
% self.fileStoreID, logging.DEBUG)
with open(self.harbingerFileName + '.tmp', 'w') as harbingerFile:
harbingerFile.write(str(os.getpid()))
# Make this File read only to prevent overwrites
os.chmod(self.harbingerFileName + '.tmp', 0o444)
os.rename(self.harbingerFileName + '.tmp', self.harbingerFileName)
def waitOnDownload(self, lockFileHandle):
"""
This method is called when a readGlobalFile process is waiting on another process to
write a file to the cache.
:param lockFileHandle: The open handle to the cache lock file
"""
while self.exists():
logger.info('CACHE: Waiting for another worker to download file with ID %s.'
% self.fileStoreID)
# Ensure that the process downloading the file is still alive. The PID will
# be in the harbinger file.
pid = self.read()
if FileStore._pidExists(pid):
# Release the file lock and then wait for a bit before repeating.
flock(lockFileHandle, LOCK_UN)
time.sleep(20)
# Grab the file lock before repeating.
flock(lockFileHandle, LOCK_EX)
else:
# The process that was supposed to download the file has died so we need
# to remove the harbinger.
self._delete()
def read(self):
return int(open(self.harbingerFileName).read())
def exists(self):
return os.path.exists(self.harbingerFileName)
def delete(self):
"""
Acquires the cache lock then attempts to delete the harbinger file.
"""
with self.fileStore.cacheLock():
self._delete()
def _delete(self):
"""
This function assumes you already have the cache lock!
"""
assert self.exists()
self.fileStore.logToMaster('CACHE: Deleting the harbinger file for (%s)' %
self.fileStoreID, logging.DEBUG)
os.remove(self.harbingerFileName)
# Functions related to async updates
def asyncWrite(self):
"""
A function to write files asynchronously to the job store such that subsequent jobs are
not delayed by a long write operation.
"""
try:
while True:
try:
# Block for up to two seconds waiting for a file
args = self.queue.get(timeout=2)
except Empty:
# Check if termination event is signaled
# (set in the event of an exception in the worker)
if self._terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting")
continue
# Normal termination condition is getting None from queue
if args is None:
break
inputFileHandle, jobStoreFileID = args
cachedFileName = self.encodedFileID(jobStoreFileID)
# Ensure that the harbinger exists in the cache directory and that the PID
# matches that of this writing thread.
# If asyncWrite is ported to subprocesses instead of threads in the future,
# insert logic here to securely overwrite the harbinger file.
harbingerFile = self.HarbingerFile(self, cachedFileName=cachedFileName)
assert harbingerFile.exists()
assert harbingerFile.read() == int(os.getpid())
# We pass in a fileHandle, rather than the file-name, in case
# the file itself is deleted. The fileHandle itself should persist
# while we maintain the open file handle
with self.jobStore.updateFileStream(jobStoreFileID) as outputFileHandle:
shutil.copyfileobj(inputFileHandle, outputFileHandle)
inputFileHandle.close()
# Remove the file from the lock files
with self._pendingFileWritesLock:
self._pendingFileWrites.remove(jobStoreFileID)
# Remove the harbinger file
harbingerFile.delete()
except:
self._terminateEvent.set()
raise
def _updateJobWhenDone(self):
"""
Asynchronously update the status of the job on the disk, first waiting \
until the writing threads have finished and the input blockFn has stopped \
blocking.
"""
def asyncUpdate():
try:
# Wait till all file writes have completed
for i in range(len(self.workers)):
self.queue.put(None)
for thread in self.workers:
thread.join()
# Wait till input block-fn returns - in the event of an exception
# this will eventually terminate
self.inputBlockFn()
# Check the terminate event, if set we can not guarantee
# that the workers ended correctly, therefore we exit without
# completing the update
if self._terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting before update")
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobGraph.filesToDelete = list(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobGraph)
# Delete any remnant jobs
list(map(self.jobStore.delete, self.jobsToDelete))
# Delete any remnant files
list(map(self.jobStore.deleteFile, self.filesToDelete))
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobGraph.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobGraph)
except:
self._terminateEvent.set()
raise
finally:
# Indicate that _blockFn can return
# This code will always run
self.updateSemaphore.release()
# The update semaphore is held while the job is written to the job store
try:
self.updateSemaphore.acquire()
t = Thread(target=asyncUpdate)
t.start()
except:
# This is to ensure that the semaphore is released in a crash to stop a deadlock
# scenario
self.updateSemaphore.release()
raise
def _blockFn(self):
self.updateSemaphore.acquire()
self.updateSemaphore.release() # Release so that the block function can be recalled
# This works, because once acquired the semaphore will not be acquired
# by _updateJobWhenDone again.
return
@classmethod
def shutdown(cls, dir_):
"""
:param dir_: The directory that will contain the cache state file.
"""
cacheInfo = cls._CacheState._load(os.path.join(dir_, '_cacheState'))
cls.findAndHandleDeadJobs(cacheInfo, batchSystemShutdown=True)
shutil.rmtree(dir_)
def __del__(self):
"""
Cleanup function that is run when destroying the class instance that ensures that all the
file writing threads exit.
"""
self.updateSemaphore.acquire()
for i in range(len(self.workers)):
self.queue.put(None)
for thread in self.workers:
thread.join()
self.updateSemaphore.release()
class NonCachingFileStore(FileStore):
def __init__(self, jobStore, jobGraph, localTempDir, inputBlockFn):
self.jobStore = jobStore
self.jobGraph = jobGraph
self.jobName = str(self.jobGraph)
self.localTempDir = os.path.abspath(localTempDir)
self.inputBlockFn = inputBlockFn
self.jobsToDelete = set()
self.loggingMessages = []
self.filesToDelete = set()
super(NonCachingFileStore, self).__init__(jobStore, jobGraph, localTempDir, inputBlockFn)
# This will be defined in the `open` method.
self.jobStateFile = None
self.localFileMap = defaultdict(list)
@contextmanager
def open(self, job):
jobReqs = job.disk
startingDir = os.getcwd()
self.localTempDir = makePublicDir(os.path.join(self.localTempDir, str(uuid.uuid4())))
self.findAndHandleDeadJobs(self.workFlowDir)
self.jobStateFile = self._createJobStateFile()
freeSpace, diskSize = getFileSystemSize(self.localTempDir)
if freeSpace <= 0.1 * diskSize:
logger.warning('Starting job %s with less than 10%% of disk space remaining.',
self.jobName)
try:
os.chdir(self.localTempDir)
yield
finally:
diskUsed = getDirSizeRecursively(self.localTempDir)
logString = ("Job {jobName} used {percent:.2f}% ({humanDisk}B [{disk}B] used, "
"{humanRequestedDisk}B [{requestedDisk}B] requested) at the end of "
"its run.".format(jobName=self.jobName,
percent=(float(diskUsed) / jobReqs * 100 if
jobReqs > 0 else 0.0),
humanDisk=bytes2human(diskUsed),
disk=diskUsed,
humanRequestedDisk=bytes2human(jobReqs),
requestedDisk=jobReqs))
self.logToMaster(logString, level=logging.DEBUG)
if diskUsed > jobReqs:
self.logToMaster("Job used more disk than requested. Consider modifying the user "
"script to avoid the chance of failure due to incorrectly "
"requested resources. " + logString, level=logging.WARNING)
os.chdir(startingDir)
jobState = self._readJobState(self.jobStateFile)
deferredFunctions = jobState['deferredFunctions']
failures = self._runDeferredFunctions(deferredFunctions)
for failure in failures:
self.logToMaster('Deferred function "%s" failed.' % failure, logging.WARN)
# Finally delete the job from the worker
os.remove(self.jobStateFile)
def writeGlobalFile(self, localFileName, cleanup=False):
absLocalFileName = self._resolveAbsoluteLocalPath(localFileName)
cleanupID = None if not cleanup else self.jobGraph.jobStoreID
fileStoreID = self.jobStore.writeFile(absLocalFileName, cleanupID)
self.localFileMap[fileStoreID].append(absLocalFileName)
return FileID.forPath(fileStoreID, absLocalFileName)
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
if userPath is not None:
localFilePath = self._resolveAbsoluteLocalPath(userPath)
if os.path.exists(localFilePath):
raise RuntimeError(' File %s ' % localFilePath + ' exists. Cannot Overwrite.')
else:
localFilePath = self.getLocalTempFileName()
self.jobStore.readFile(fileStoreID, localFilePath, symlink=symlink)
self.localFileMap[fileStoreID].append(localFilePath)
return localFilePath
@contextmanager
def readGlobalFileStream(self, fileStoreID):
with self.jobStore.readFileStream(fileStoreID) as f:
yield f
def exportFile(self, jobStoreFileID, dstUrl):
self.jobStore.exportFile(jobStoreFileID, dstUrl)
def deleteLocalFile(self, fileStoreID):
try:
localFilePaths = self.localFileMap.pop(fileStoreID)
except KeyError:
raise OSError(errno.ENOENT, "Attempting to delete a non-local file")
else:
for localFilePath in localFilePaths:
os.remove(localFilePath)
def deleteGlobalFile(self, fileStoreID):
try:
self.deleteLocalFile(fileStoreID)
except OSError as e:
if e.errno == errno.ENOENT:
# the file does not exist locally, so no local deletion necessary
pass
else:
raise
self.filesToDelete.add(fileStoreID)
def _blockFn(self):
# there is no asynchronicity in this file store so no need to block at all
return True
def _updateJobWhenDone(self):
try:
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobGraph.filesToDelete = list(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobGraph)
# Delete any remnant jobs
list(map(self.jobStore.delete, self.jobsToDelete))
# Delete any remnant files
list(map(self.jobStore.deleteFile, self.filesToDelete))
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobGraph.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobGraph)
except:
self._terminateEvent.set()
raise
def __del__(self):
"""
Cleanup function that is run when destroying the class instance. Nothing to do since there
are no async write events.
"""
pass
# Functions related to the deferred function logic
@classmethod
def findAndHandleDeadJobs(cls, nodeInfo, batchSystemShutdown=False):
"""
Look at the state of all jobs registered in the individual job state files, and handle them
(clean up the disk, and run any registered defer functions)
:param str nodeInfo: The location of the workflow directory on the node.
:param bool batchSystemShutdown: Is the batch system in the process of shutting down?
:return:
"""
# A list of tuples of (job name, pid or process running job, registered defer functions)
for jobState in cls._getAllJobStates(nodeInfo):
if not cls._pidExists(jobState['jobPID']):
# using same logic to prevent races as CachingFileStore._setupCache
myPID = str(os.getpid())
cleanupFile = os.path.join(jobState['jobDir'], '.cleanup')
with open(os.path.join(jobState['jobDir'], '.' + myPID), 'w') as f:
f.write(myPID)
while True:
try:
os.rename(f.name, cleanupFile)
except OSError as err:
if err.errno == errno.ENOTEMPTY:
with open(cleanupFile, 'r') as f:
cleanupPID = f.read()
if cls._pidExists(int(cleanupPID)):
# Cleanup your own mess. It's only polite.
os.remove(f.name)
break
else:
os.remove(cleanupFile)
continue
else:
raise
else:
logger.warning('Detected that job (%s) prematurely terminated. Fixing the '
'state of the job on disk.', jobState['jobName'])
if not batchSystemShutdown:
logger.debug("Deleting the stale working directory.")
# Delete the old work directory if it still exists. Do this only during
# the life of the program and dont' do it during the batch system
# cleanup. Leave that to the batch system cleanup code.
shutil.rmtree(jobState['jobDir'])
# Run any deferred functions associated with the job
logger.debug('Running user-defined deferred functions.')
cls._runDeferredFunctions(jobState['deferredFunctions'])
break
@staticmethod
def _getAllJobStates(workflowDir):
"""
Generator function that deserializes and yields the job state for every job on the node,
one at a time.
:param str workflowDir: The location of the workflow directory on the node.
:return: dict with keys (jobName, jobPID, jobDir, deferredFunctions)
:rtype: dict
"""
jobStateFiles = []
for root, dirs, files in os.walk(workflowDir):
for filename in files:
if filename == '.jobState':
jobStateFiles.append(os.path.join(root, filename))
for filename in jobStateFiles:
try:
yield NonCachingFileStore._readJobState(filename)
except IOError as e:
if e.errno == 2:
# job finished & deleted its jobState file since the jobState files were discovered
continue
else:
raise
@staticmethod
def _readJobState(jobStateFileName):
with open(jobStateFileName, 'rb') as fH:
state = dill.load(fH)
return state
def _registerDeferredFunction(self, deferredFunction):
with open(self.jobStateFile, 'rb') as fH:
jobState = dill.load(fH)
jobState['deferredFunctions'].append(deferredFunction)
with open(self.jobStateFile + '.tmp', 'wb') as fH:
dill.dump(jobState, fH)
os.rename(self.jobStateFile + '.tmp', self.jobStateFile)
logger.debug('Registered "%s" with job "%s".', deferredFunction, self.jobName)
def _createJobStateFile(self):
"""
Create the job state file for the current job and fill in the required
values.
:return: Path to the job state file
:rtype: str
"""
jobStateFile = os.path.join(self.localTempDir, '.jobState')
jobState = {'jobPID': os.getpid(),
'jobName': self.jobName,
'jobDir': self.localTempDir,
'deferredFunctions': []}
with open(jobStateFile + '.tmp', 'wb') as fH:
dill.dump(jobState, fH)
os.rename(jobStateFile + '.tmp', jobStateFile)
return jobStateFile
@classmethod
def shutdown(cls, dir_):
"""
:param dir_: The workflow directory that will contain all the individual worker directories.
"""
cls.findAndHandleDeadJobs(dir_, batchSystemShutdown=True)
class FileID(str):
"""
A class to wrap the job store file id returned by writeGlobalFile and any attributes we may want
to add to it.
"""
def __new__(cls, fileStoreID, *args):
return super(FileID, cls).__new__(cls, fileStoreID)
def __init__(self, fileStoreID, size):
# Don't pass an argument to parent class's __init__.
# In Python 3 we can have super(FileID, self) hand us object's __init__ which chokes on any arguments.
super(FileID, self).__init__()
self.size = size
@classmethod
def forPath(cls, fileStoreID, filePath):
return cls(fileStoreID, os.stat(filePath).st_size)
def shutdownFileStore(workflowDir, workflowID):
"""
Run the deferred functions from any prematurely terminated jobs still lingering on the system
and carry out any necessary filestore-specific cleanup.
This is a destructive operation and it is important to ensure that there are no other running
processes on the system that are modifying or using the file store for this workflow.
This is the intended to be the last call to the file store in a Toil run, called by the
batch system cleanup function upon batch system shutdown.
:param str workflowDir: The path to the cache directory
:param str workflowID: The workflow ID for this invocation of the workflow
"""
cacheDir = os.path.join(workflowDir, cacheDirName(workflowID))
if os.path.exists(cacheDir):
# The presence of the cacheDir suggests this was a cached run. We don't need the cache lock
# for any of this since this is the final cleanup of a job and there should be no other
# conflicting processes using the cache.
CachingFileStore.shutdown(cacheDir)
else:
# This absence of cacheDir suggests otherwise.
NonCachingFileStore.shutdown(workflowDir)
class CacheError(Exception):
"""
Error Raised if the user attempts to add a non-local file to cache
"""
def __init__(self, message):
super(CacheError, self).__init__(message)
class CacheUnbalancedError(CacheError):
"""
Raised if file store can't free enough space for caching
"""
message = 'Unable unable to free enough space for caching. This error frequently arises due ' \
'to jobs using more disk than they have requested. Turn on debug logging to see ' \
'more information leading up to this error through cache usage logs.'
def __init__(self):
super(CacheUnbalancedError, self).__init__(self.message)
class IllegalDeletionCacheError(CacheError):
"""
Error Raised if the Toil detects the user deletes a cached file
"""
def __init__(self, deletedFile):
message = 'Cache tracked file (%s) deleted explicitly by user. Use deleteLocalFile to ' \
'delete such files.' % deletedFile
super(IllegalDeletionCacheError, self).__init__(message)
class InvalidSourceCacheError(CacheError):
"""
Error Raised if the user attempts to add a non-local file to cache
"""
def __init__(self, message):
super(InvalidSourceCacheError, self).__init__(message)
|
request.py | import base64
import enum
import json
import threading
import time
import urllib.parse as parse
from dataclasses import dataclass
from typing import List, Tuple, Union
from urllib.parse import urlparse
import requests
from author.models import Author
from backend.settings import NETLOC
from comment.models import Comment
from django.core.cache import cache
from django.http import HttpRequest
from nodes.models import Node
from posts.models import Post
from requests.exceptions import RequestException
from rest_framework.request import Request
from rest_framework.response import Response
class ClassType(enum.Enum):
AUTHOR = 0
POST = 1
COMMENT = 2
class ParsedRequest(Request):
def __init__(self, request: Request, islocal: bool, id: str):
self.__dict__ = request.__dict__ # inherit all methods and fields of a instance of Request
self.id = id
self.islocal = islocal
def getId(link: str, type: ClassType):
items = link.split("/")
if type == ClassType.AUTHOR:
target = items[items.index("author") + 1]
elif type == ClassType.POST:
target = items[items.index("posts") + 1]
elif type == ClassType.COMMENT:
target = items[items.index("comments") + 1]
else:
raise Exception("unknown class type at getid, util.request")
return target
def parseIncomingRequest(methodToCheck: List[str] = None, type: ClassType = ClassType.AUTHOR):
def decorateFunc(func):
def inner(request: Union[Request, HttpRequest], *args, **kwargs):
if request.method not in methodToCheck:
# this method dont need parsing
return func(request, *args, **kwargs)
url = request.get_full_path()
parsedId = getId(url, type).replace("~", "/")
target = f"https://{parsedId}" # link id has / replaced with - to make them url safe
parsed = parse.urlparse(target)
if parsed.netloc == "" or parsed.path == "":
parsedRequest = ParsedRequest(
request, islocal=True, id=parsedId
) # the id provided dont seem like a valid url, treat as normal id instead. good luck.
return func(parsedRequest, *args, **kwargs)
if parsed.netloc == NETLOC:
# the netloc found is our domain, parse for real id.
parsedId = getId(parsedId, type)
print("parsed id", parsedId)
parsedRequest = ParsedRequest(request, islocal=True, id=parsedId) # is local is true and id is author id provided by frontend
return func(parsedRequest, *args, **kwargs)
# now we know the netloc is neither blank nor our domain, lets check if we have the domain in our nodes db
if not Node.objects.filter(netloc=parsed.netloc).exists():
# the domain requested is not registered
return func(ParsedRequest(request, islocal=False, id=None), *args, **kwargs)
if target[-1] != "/":
target += "/"
parsedRequest = ParsedRequest(request, islocal=False, id=target)
return func(parsedRequest, *args, **kwargs)
return inner
return decorateFunc
@dataclass
class QueryResponse:
content: str
status_code: int
def makeRequest(method: str, url: str, data: Union[dict, None] = None, queryParams = None) -> QueryResponse:
cacheKey = f"{method}_{url}"
if cacheKey in cache: # if the request has recently been gotten, just return the cached version
return cache.get(cacheKey)
parsed = urlparse(url)
print("!!!!!", parsed)
if not parsed.scheme or (parsed.scheme != "http" and parsed.scheme != "https"):
return QueryResponse("error, invalid url", 400)
# . TODO varifying netloc is pointless :?
# if not Node.objects.filter(netloc = parsed.netloc).exists():
# return Response({"error": "requested domain is not registered"}, status=400)
# node the request is refering to definitely exists
try:
node: Node = Node.objects.get(netloc=parsed.netloc)
except Node.DoesNotExist:
return QueryResponse("node doesnt exist", 400)
if not node.allowOutgoing:
return QueryResponse("error, outgoing request to this node is blocked by admin", 400)
fixedurl = f"{node.url}{url[url.find('author'):]}"
if fixedurl[-1] != "/":
fixedurl += "/"
try:
s = f"{node.outgoingName}:{node.outgoingPassword}".encode("utf-8")
print(s)
result = requests.request(
method,
fixedurl,
data=json.dumps(data) if type(data) is dict else data,
params=queryParams if queryParams is not None else {},
headers=({"Authorization": f"Basic {base64.b64encode(s).decode('utf-8')}", "Accept": "*/*", "Content-Type": "application/json"}),
)
except RequestException as e:
print("execption occured in utils.request", str(e))
return QueryResponse(f"error {str(e)}", 400)
response = QueryResponse(result.content, result.status_code)
if 200<=result.status_code < 300:
cache.set(cacheKey, response)
return response
@dataclass
class IsLocalResponse:
isLocal: bool
type: ClassType
id: str # just <postid>
long: str # http://domain.com/authors/<id>/posts/<postid>
def checkIsLocal(fullId: str, type: ClassType = None) -> IsLocalResponse:
"""
fullid can be either a short or long id, but if using short id, type must be specified
returns a IsLocalResponse object
"""
shortId = None
items = fullId.split("/")
if len(items) > 1:
try:
if "comments" in items:
shortId = items[items.index("comments") + 1]
type = ClassType.COMMENT
elif "posts" in items:
shortId = items[items.index("posts") + 1]
type = ClassType.POST
elif "author" in items:
shortId = items[items.index("author") + 1]
type = ClassType.AUTHOR
else:
return None
except Exception as e:
print("error occured converting types in uti.request.checkislocal", e)
return None
elif type is None:
print("type is None and only short id is provided in utils.request.checkIsLocal")
return None
# if has isolate id of the item, and know the type of the item, then just lookup in the respective table to chech for existance.
isLocal = {ClassType.AUTHOR: Author, ClassType.POST: Post, ClassType.COMMENT: Comment}[type].objects.filter(pk=(fullId)).exists()
return IsLocalResponse(isLocal or len(items) < 2, type, shortId if len(items) > 1 else fullId, fullId)
def returnGETRequest(url: str, params : dict = None) -> Response:
if url is None:
return Response("The requested address is not registered with this server yet.", status=404)
result = makeRequest("GET", url, queryParams=params)
print("get,", result.status_code)
try:
return Response(json.loads(result.content), status=result.status_code)
except :
return Response(result.content, status=result.status_code)
def returnPOSTRequest(url: str, data: Union[str, dict]) -> Response:
if url is None:
return Response("The requested address is not registered with this server yet.", status=404)
result = makeRequest("POST", url, data if type(data) is str else json.dumps(data))
print("post,", result.status_code, url)
print(json.dumps(data))
try:
return Response(json.loads(result.content), status=result.status_code)
except :
return Response(result.content, status=result.status_code)
def makeMultipleGETs(
urls: List[str],
) -> List[Tuple[str, QueryResponse]]:
output = []
threads = []
t0 = time.time()
for url in urls:
t = threading.Thread(target=lambda link: output.append((link, makeRequest("GET", link))), args=(url,))
threads.append(t)
t.start()
for thread in threads:
thread.join()
print(f"makeMultipleGETs fetched {len(output)} urls, taking {time.time() - t0} ms")
return output
|
acamar.py | #!/usr/bin/env python
import sys, requests, json, os, threading
from bs4 import BeautifulSoup as bs
def enterRes(e):
e = e.split('/', 1)[0]
if e not in result:
result.append(e)
def enumHackertarget():
print('[!] Enumerating hackertarget.com')
r = requests.get('https://api.hackertarget.com/hostsearch/?q=' + domain).text
e = r.split('\n')
print('\t - proceeding JSON output')
for i in e:
enterRes(i.split(',')[0])
def enumPtrarchive():
print('[!] Enumerating ptrarchive.com')
c = requests.Session()
h = {
'Referer': 'http://www.ptrarchive.com',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:62.0) Gecko/20100101 Firefox/62.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5'
}
t = {'pa_id': '1337'}
print('\t - hooking up page via fake-cookie "pa_id: 1337"')
r = c.get('http://www.ptrarchive.com/tools/search4.htm?label=' + domain + '&date=ALL', headers=h, cookies=t).text
s = bs(r, 'html.parser')
e = s.find('pre').text.split('\n')
print('\t - proceeding HTML for filtering out the result')
for i in e:
e = i[i.find(']'):].split(' ')
try:
if e[1].endswith('.' + domain) and not e[1].startswith('*'):
enterRes(e[1])
except IndexError:
pass
def enumCertspotter():
print('[!] Enumerating certspotter.com')
r = requests.get('https://certspotter.com/api/v0/certs?domain=' + domain).text
j = json.loads(r)
print('\t - proceeding JSON output')
for i in j:
for e in i['dns_names']:
if e.endswith('.' + domain) and not e.startswith('*'):
enterRes(e)
def enumRiddler():
print('[!] Enumerating riddler.io')
r = requests.get('https://riddler.io/search?q=pld:' + domain).text
s = bs(r, 'html.parser')
e = s.findAll('td', class_='col-lg-5 col-md-5 col-sm-5')
print('\t - proceeding HTML for filtering out the result')
for i in e:
enterRes(i.text.strip())
def enumCrt():
print('[-] Enumerating crt.sh')
r = requests.get('https://crt.sh/?q=%25' + domain).text
s = bs(r, 'html.parser')
try:
e = s.findAll('table')[1].findAll('tr')
except IndexError:
print('\t - crt.sh did not respond, continueing')
else:
print('\t - proceeding HTML for filtering out the result')
for i in e:
e = i.findAll('td')
try:
e = e[4].text
if e.endswith('.' + domain) and not e.startswith('*'):
enterRes(e)
except IndexError:
pass
def enumSecuritytrails():
print('[!] Enumerating securitytrails.com')
r = requests.get('https://securitytrails.com/list/apex_domain/' + domain).text
s = bs(r, 'html.parser')
e = s.findAll('td')
print('\t - proceeding HTML for filtering out the result')
for i in e:
e = i.find('a')
if e:
enterRes(e.text)
def enumThreatminer():
print('[!] Enumerating threatminer.org')
try:
r = requests.get('https://api.threatminer.org/v2/domain.php?q=' + domain + '&rt=5', timeout=6).text
j = json.loads(r)
print('\t - proceeding JSON output')
for i in j['results']:
enterRes(i)
except requests.exceptions.Timeout:
print('\t - API "api.threatminer.org/v2" looks down from here!')
pass
def enumVirustotal():
print('[!] Enumerating virustotal.com')
r = requests.get('https://www.virustotal.com/ui/domains/' + domain + '/subdomains?limit=40').text
j = json.loads(r)
try:
n = str(j['links']['next'])
c = 1
for i in j['data']:
enterRes(i['id'])
while type(n) is str:
print('\t proceeding result set: ' + str(c))
r = requests.get(n).text
j = json.loads(r)
for i in j['data']:
enterRes(i['id'])
try:
n = str(j['links']['next'])
c = c + 1
except KeyError:
break
except KeyError:
print('\t - result-set consists of < 40 entries')
for i in j['data']:
enterRes(i['id'])
def enumThreatcrowd():
print('[!] Enumerating threadcrowd.com')
r = requests.get('https://threatcrowd.org/searchApi/v2/domain/report/?domain=' + domain).text
j = json.loads(r)
print('\t - proceeding JSON output')
if "subdomains" not in j:
print('\t - JSON output seems to be empty!')
return
for e in j['subdomains']:
enterRes(e)
def enumFindsubdomains():
print('[!] Enumerating findsubdomains.com')
r = requests.get('https://findsubdomains.com/subdomains-of/' + domain).text
s = bs(r, 'html.parser')
e = s.findAll('td', {'data-field': 'Domain'})
print('\t - proceeding HTML for filtering out the result')
for i in e:
if i.get('title'):
enterRes(i.get('title'))
def enumDNSDumpster():
print('[!] Enumerating dnsdumpster.com')
print('\t - requesting valid session')
c = requests.Session()
r = c.get('https://dnsdumpster.com').text
h = {'Referer': 'https://dnsdumpster.com'}
t = c.cookies.get_dict()['csrftoken']
print('\t - got valid session: ' + t + ', proceeding output')
r = c.post('https://dnsdumpster.com', data={'csrfmiddlewaretoken': t, 'targetip': domain}, headers=h).text
s = bs(r, 'html.parser')
t = s.findAll('table')[-1].findAll('td', class_='col-md-4')
for i in t:
t = i.text.split()[0]
enterRes(t)
try:
domain = sys.argv[1]
except IndexError:
print('[X] No domain passed')
sys.exit()
result = []
output = open('results/' + domain + '.txt', 'w')
print('[~] Acamar.py v.0.1 written by @SI9INT | Target set to: ' + domain)
functions = [
enumDNSDumpster,
enumFindsubdomains,
enumThreatcrowd,
enumThreatminer,
enumVirustotal,
enumSecuritytrails,
enumHackertarget,
enumCrt,
enumCertspotter,
enumRiddler,
enumPtrarchive
]
threads = []
if __name__ == '__main__':
for f in functions:
t = threading.Thread(target=f)
t.start()
threads.append(t)
for t in threads:
t.join()
filtered = list(filter(None, result))
try:
for i in filtered:
output.write(i + '\n')
finally:
output.close()
print('[!] Finished, printing result:')
os.system('cat results/' + domain + '.txt')
print('[!] Counting ' + str(len(result)) + ' unique subdomains')
|
sensorReporter.py | #!/usr/bin/python
"""
Copyright 2016 Richard Koshak
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Script: sensorReporter.py
Author: Rich Koshak
Date: February 10, 2016
Purpose: Uses the REST API or MQTT to report updates to the configured sensors, activates actuators based on MQTT messages
"""
print("Starting...")
import logging
import logging.handlers
import ConfigParser
import signal
import sys
import time
import traceback
from threading import *
from signalProc import *
import importlib
# Globals
logger = logging.getLogger('sensorReporter')
config = ConfigParser.ConfigParser(allow_no_value=True)
sensors = {}
actuators = {}
connections = {}
#------------------------------------------------------------------------------
# Main event loops
def on_message(client, userdata, msg):
"""Called when a message is received from a connection, send the current sensor state.
We don't care what the message is."""
try:
logger.info("Received a request for current state, publishing")
if msg is not None:
print(msg.topic)
logger.info("Topic: " + msg.topic + " Message: " + str(msg.payload))
logger.info("getting states")
for s in sensors:
if sensors[s].poll > 0:
sensors[s].checkState()
# sensors[s].publishState() # As far as I can tell checkState calls publishState so this is not needed
except:
logger.info("Unexpected error:", sys.exc_info()[0])
def main():
"""Polls the sensor pins and publishes any changes"""
if len(sys.argv) < 2:
print "No config file specified on the command line!"
sys.exit(1)
loadConfig(sys.argv[1])
for s in sensors:
sensors[s].lastPoll = time.time()
logger.info("Kicking off polling threads...")
while True:
# Kick off a poll of the sensor in a separate process
for s in sensors:
if sensors[s].poll > 0 and (time.time() - sensors[s].lastPoll) > sensors[s].poll:
sensors[s].lastPoll = time.time()
Thread(target=check, args=(sensors[s],)).start()
time.sleep(0.5) # give the processor a chance if REST is being slow
#------------------------------------------------------------------------------
# Signal Processing
# The decorators below causes the creation of a SignalHandler attached to this function for each of the
# signals we care about using the handles function above. The resultant SignalHandler is registered with
# the signal.signal so cleanup_and_exit is called when they are received.
@handles(signal.SIGTERM)
@handles(signal.SIGHUP)
@handles(signal.SIGINT)
def cleanup_and_exit():
""" Signal handler to ensure we disconnect cleanly in the event of a SIGTERM or SIGINT. """
logger.warn("Terminating the program")
try:
for key in connections:
try:
connections[key].disconnect()
except AttributeError:
pass
for s in sensors:
try:
sensors[s].cleanup()
except AttributeError:
pass
except:
pass
sys.exit(0)
# This decorator registers the function with the SignalHandler blocks_on so the SignalHandler knows
# when the function is running
@cleanup_and_exit.blocks_on
def check(s):
"""Gets the current state of the passed in sensor and publishes it"""
s.checkState()
#------------------------------------------------------------------------------
# Initialization
def configLogger(file, size, num, syslog):
"""Configure a rotating log"""
logger.setLevel(logging.DEBUG)
if syslog != "YES":
print "Configuring logger: file = " + file + " size = " + str(size) + " num = " + str(num)
fh = logging.handlers.RotatingFileHandler(file, mode='a', maxBytes=size, backupCount=num)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
elif syslog == "YES":
print "Configuring syslogging"
sh = logging.handlers.SysLogHandler('/dev/log', facility=logging.handlers.SysLogHandler.LOG_SYSLOG)
sh.encodePriority(sh.LOG_SYSLOG, sh.LOG_INFO)
slFormatter = logging.Formatter('[sensorReporter] %(levelname)s - %(message)s')
sh.setFormatter(slFormatter)
logger.addHandler(sh)
logger.info("---------------Started")
def createDevice(config, section):
"""Configure a sensor or actuator"""
try:
module_name, class_name = config.get(section, "Class").rsplit(".", 1)
MyDevice = getattr(importlib.import_module(module_name), class_name)
params = lambda key: config.get(section, key)
devConns = []
try:
for connStr in params("Connection").split(","):
devConns.append(connections[connStr])
except ConfigParser.NoOptionError:
# No connection is valid e.g. an actuator connection target
pass
d = MyDevice(devConns, logger, params, sensors, actuators)
if config.getfloat(section, "Poll") == -1:
Thread(target=d.checkState).start() # don't need to use cleanup-on-exit for non-polling sensors
logger.info("Started thread to to run sensor")
return d
except ImportError:
logger.error("%s.%s is not supported on this platform" % module_name, class_name)
def createConnection(config, section):
try:
name = config.get(section, "Name")
logger.info("Creating connection %s" % (name))
module_name, class_name = config.get(section, "Class").rsplit(".", 1)
MyConn = getattr(importlib.import_module(module_name), class_name)
params = lambda key: config.get(section, key)
connections[name] = MyConn(on_message, logger, params, sensors, actuators)
except ImportError:
logger.error("%s.%s is not supported on this platform" % module_name, class_name)
def loadConfig(configFile):
"""Read in the config file, set up the logger, and populate the sensors"""
print "Loading " + configFile
config.read(configFile)
configLogger(config.get("Logging", "File"),
config.getint("Logging", "MaxSize"),
config.getint("Logging", "NumFiles"),
config.get("Logging", "Syslog"))
# create connections first
logger.info("Creating connections...")
for section in config.sections():
if section.startswith("Connection"):
createConnection(config, section)
logger.info("Populating the sensor/actuator list...")
for section in config.sections():
if section.startswith("Sensor"):
sensors[section] = createDevice(config, section)
elif section.startswith("Actuator"):
logger.debug("Adding actuator " + section)
actuators[section] = createDevice(config, section)
return sensors
if __name__ == "__main__":
main()
|
response_recorder.py | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import time
from threading import Thread
import struct
from numpy import array, vstack
from traits.api import Array, Any, Instance, Float
from pychron.core.helpers.binpack import pack
from pychron.core.helpers.formatting import floatfmt
from pychron.loggable import Loggable
from pychron.managers.data_managers.csv_data_manager import CSVDataManager
class ResponseRecorder(Loggable):
period = Float(2)
response_data = Array
output_data = Array
setpoint_data = Array
_alive = False
output_device = Any
response_device = Any
response_device_secondary = Any
data_manager = Instance(CSVDataManager)
_start_time = 0
_write_data = False
def start(self, base_frame_name=None):
if self._alive:
self.debug('response recorder already alive')
return
t = time.time()
self._start_time = t
self.response_data = array([(t, 0)])
self.output_data = array([(t, 0)])
self.setpoint_data = array([(t, 0)])
self._write_data = False
if base_frame_name:
self._write_data = True
self.data_manager = CSVDataManager()
self.data_manager.new_frame(base_frame_name=base_frame_name)
self.data_manager.write_to_frame(('#time', self.output_device.name,
self.response_device.name,
self.response_device_secondary.name))
t = Thread(target=self.run)
t.setDaemon(True)
t.start()
def run(self):
self.debug('start response recorder')
self._alive = True
st = self._start_time
p = self.period
rd = self.response_device
rds = self.response_device_secondary
od = self.output_device
dm = self.data_manager
wd = self._write_data
odata = self.output_data
rdata = self.response_data
sdata = self.setpoint_data
r2 = None
while self._alive:
to = time.time()
t = to - st
out = od.get_output()
odata = vstack((odata, (t, out)))
sp = od.get_setpoint()
sdata = vstack((sdata, (t, sp)))
r = rd.get_response(force=True)
rdata = vstack((rdata, (t, r)))
self.debug('response t={}, out={}, setpoint={}, response={}'.format(t, out, sp, r))
if rds:
r2 = rds.get_response(force=True)
if wd:
if r2:
datum = (t, out, sp, r, r2)
else:
datum = (t, out, sp, r)
datum = [floatfmt(x, n=3) for x in datum]
dm.write_to_frame(datum)
et = time.time() - to
slt = p - et - 0.001
if slt > 0:
time.sleep(slt)
self.output_data = odata
self.response_data = rdata
self.setpoint_data = sdata
def check_reached_setpoint(self, v, n, tol, std=None):
"""
return True if response is OK, i.e. average of last n points is within tol of v.
if std is not None then standard dev must be less than std
:param v:
:param n:
:param tol:
:param std:
:return:
"""
pts = self.response_data[-n:, 1]
std_bit = True
if std:
std_bit = pts.std() < std
error_bit = abs(pts.mean() - v) < tol
return std_bit and error_bit
def stop(self):
self.debug('stop response recorder')
self._alive = False
if self.data_manager:
self.data_manager.close_file()
def get_response_blob(self):
if len(self.response_data):
# return ''.join([struct.pack('<ff', x, y) for x, y in self.response_data])
return pack('<ff', self.response_data)
def get_output_blob(self):
if len(self.output_data):
return pack('<ff', self.output_data)
# return ''.join([struct.pack('<ff', x, y) for x, y in self.output_data])
def get_setpoint_blob(self):
if len(self.setpoint_data):
return pack('<ff', self.setpoint_data)
# return ''.join([struct.pack('<ff', x, y) for x, y in self.setpoint_data])
@property
def max_response(self):
if len(self.response_data):
return self.response_data.max()
# ============= EOF =============================================
|
simple.py | #!/usr/bin/env python3
#
# Copyright (C) 2019 Jayson
#
import time
import threading
def say_hello(name):
for i in range(10):
print("hello {}".format(name))
thread1 = threading.Thread(target=say_hello, args=('small red',))
thread2 = threading.Thread(target=say_hello, args=('small light',))
thread1.start()
thread2.start()
|
watcher_plugin.py | import multiprocessing
import cv2
from Plugins import base
from FacialRecognition.model import FacialRecognitionModel
from utils import parallelization
class WatcherPlugin(base.Plugin):
def __init__(self):
self.name = 'WatcherPlugin'
super().__init__(name=self.name, command_hooks={}, modifiers={}) # ok for plugin to not have commands
multiprocessing.Process(target=self._process_video).start()
def _process_video(self):
data_processing_queue = parallelization.TwoWayProcessQueue()
model = FacialRecognitionModel()
process = multiprocessing.Process(target=self._process_faces,
args=(data_processing_queue,
model))
process.start()
video_capture = cv2.VideoCapture(0)
last_frame_data = None
while True:
ret, frame = video_capture.read()
data_processing_queue.server_put(frame)
if data_processing_queue.client_empty():
frame_data = last_frame_data
else:
frame_data = data_processing_queue.client_get()
if frame_data is not None:
for d in frame_data:
left = d['location']['left']
right = d['location']['right']
top = d['location']['top']
bottom = d['location']['bottom']
cv2.rectangle(img=frame,
pt1=(left, top),
pt2=(right, bottom),
color=self._color_matcher(d['distance']),
thickness=2)
cv2.putText(frame,
d['name'],
(left, int(bottom + (15 * (bottom - top) / 100))),
cv2.FONT_HERSHEY_DUPLEX,
(bottom - top) / 200,
(255, 255, 255),
lineType=cv2.LINE_AA)
last_frame_data = frame_data
# Display the resulting frame
cv2.imshow('Video', frame)
cv2.waitKey(1)
self.to_cache(frame_data) # cache the frame data so all can have access to recent frames
@staticmethod
def _process_faces(video_queue, model):
while True:
if not video_queue.server_empty():
frame = video_queue.server_get()
frame_data = model.process(frame)
video_queue.client_put(frame_data) # return data to be displayed
@staticmethod
def _color_matcher(dist):
cmap = {6: (229, 181, 0),
5: (222, 216, 0),
4: (183, 216, 0),
3: (139, 210, 0),
2: (98, 203, 0),
1: (59, 197, 0),
0: (22, 191, 0)}
dist = str(dist)
dec = dist.split('.')[-1]
for k, v in cmap.items():
if int(dec[0]) == k:
return v[::-1] # opencv works in BGR
return 0, 0, 0 # if there is some error in the mapping just use black
|
populations.py | import torch
import torch.multiprocessing as mp
class Population:
def __init__(self):
self.swarms = []
def add(self, swarm) -> None:
self.swarms.append(swarm)
def run(self,
data: torch.Tensor,
episodes: int,
) -> None:
# cpus = mp.cpu_count() - 1 if mp.cpu_count() > 1 else 1
processes = []
for swarm in self.swarms:
processes.append(mp.Process(target=swarm.train, args=(data, episodes)))
for process in processes:
process.start()
for process in processes:
process.join()
|
xdcrbasetests.py | import sys
import time
import datetime
import unittest
import logger
import logging
import copy
import string
import random
from threading import Thread
from membase.api.rest_client import RestConnection, Bucket
from couchbase_helper.cluster import Cluster
from couchbase_helper.document import View
from TestInput import TestInputSingleton
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper
from memcached.helper.data_helper import MemcachedClientHelper
from remote.remote_util import RemoteMachineShellConnection
from remote.remote_util import RemoteUtilHelper
from couchbase_helper.stats_tools import StatsCommon
from scripts.collect_server_info import cbcollectRunner
from tasks.future import TimeoutError
from security.rbac_base import RbacBase
from couchbase_helper.documentgenerator import BlobGenerator
from membase.api.exception import ServerUnavailableException, XDCRException
from testconstants import STANDARD_BUCKET_PORT
from lib.ep_mc_bin_client import MemcachedClient
from lib.mc_bin_client import MemcachedClient as MC_MemcachedClient
#===============================================================================
# class: XDCRConstants
# Constants used in XDCR application testing. It includes input parameters as well
# as internal constants used in the code.
# All string literals used in the XDCR code shall be defined in this class.
#===============================================================================
class XDCRConstants:
INPUT_PARAM_REPLICATION_DIRECTION = "rdirection"
INPUT_PARAM_CLUSTER_TOPOLOGY = "ctopology"
INPUT_PARAM_SEED_DATA = "sdata"
INPUT_PARAM_SEED_DATA_MODE = "sdata_mode"
INPUT_PARAM_SEED_DATA_OPERATION = "sdata_op"
INPUT_PARAM_POLL_INTERVAL = "poll_interval" # in seconds
INPUT_PARAM_POLL_TIMEOUT = "poll_timeout" # in seconds
# CLUSTER_TOPOLOGY_TYPE_LINE = "line"
CLUSTER_TOPOLOGY_TYPE_CHAIN = "chain"
CLUSTER_TOPOLOGY_TYPE_STAR = "star"
CLUSTER_TOPOLOGY_TYPE_RING = "ring"
REPLICATION_TYPE_CONTINUOUS = "continuous"
REPLICATION_DIRECTION_UNIDIRECTION = "unidirection"
REPLICATION_DIRECTION_BIDIRECTION = "bidirection"
SEED_DATA_MODE_SYNC = "sync"
#===============================================================================
# class: XDCRBaseTest
# This class is the base class (super class) for all the test classes written
# for XDCR. It configures the cluster as defined in the <cluster-file>.ini
# passed as an command line argument "-i" while running the test using testrunner.
# The child classes can override the following methods if required.
# - init_parameters_extended
# - setup_extendedself._input.param(XDCRConstants.INPUT_PARAM_REPLICATION_DIRECTION,
# - teardown_extended
# The reason for providing extended functions is so that inheriting classes can take advantage of
# clean_broken _setup in case an exception comes during test setUp.
# Note:B
# It assumes that the each node is physically up and running.
# In ini file , the cluster name should start with "cluster" (prefix).
#===============================================================================
class XDCRBaseTest(unittest.TestCase):
def setUp(self):
try:
self.log = logger.Logger.get_logger()
self._input = TestInputSingleton.input
if self._input.param("log_level", None):
self.log.setLevel(level=0)
for hd in self.log.handlers:
if str(hd.__class__).find('FileHandler') != -1:
hd.setLevel(level=logging.DEBUG)
else:
hd.setLevel(level=getattr(logging, self._input.param("log_level", None)))
self._init_parameters()
if not hasattr(self, 'cluster'):
self.cluster = Cluster()
# REPLICATION RATE STATS
self._local_replication_rate = {}
self._xdc_replication_ops = {}
self._xdc_replication_rate = {}
self._start_replication_time = {}
self.log.info("============== XDCRbasetests setup is started for test #{0} {1}=============="\
.format(self.case_number, self._testMethodName))
if not self._input.param("skip_cleanup", False) and str(self.__class__).find('upgradeXDCR') == -1:
self._cleanup_previous_setup()
self._init_clusters(self._disabled_consistent_view)
# XDCR global settings
# Set this by default to 1 for all tests
self.set_xdcr_param('xdcrFailureRestartInterval', 1)
if self._checkpoint_interval:
self.set_xdcr_param('xdcrCheckpointInterval', self._checkpoint_interval)
self._optimistic_xdcr_threshold = self._input.param("optimistic_xdcr_threshold", 256)
if self.src_master.ip != self.dest_master.ip: # Only if it's not a cluster_run
if self._optimistic_xdcr_threshold != 256:
self.set_xdcr_param('xdcrOptimisticReplicationThreshold', self._optimistic_xdcr_threshold)
self.setup_extended()
self.log.info("============== XDCRbasetests setup is finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
# # THREADS FOR STATS KEEPING
self.__stats_threads = []
if str(self.__class__).find('upgradeXDCR') == -1 and \
str(self.__class__).find('tuq_xdcr') == -1 and self.print_stats:
self.__stats_threads.append(Thread(target=self._replication_stat_keeper, args=["replication_data_replicated", self.src_master]))
self.__stats_threads.append(Thread(target=self._replication_stat_keeper, args=["xdc_ops", self.dest_master]))
self.__stats_threads.append(Thread(target=self._replication_stat_keeper, args=["data_replicated", self.src_master]))
if self._replication_direction_str == XDCRConstants.REPLICATION_DIRECTION_BIDIRECTION:
self.__stats_threads.append(Thread(target=self._replication_stat_keeper, args=["replication_data_replicated", self.dest_master]))
self.__stats_threads.append(Thread(target=self._replication_stat_keeper, args=["xdc_ops", self.src_master]))
self.__stats_threads.append(Thread(target=self._replication_stat_keeper, args=["data_replicated", self.dest_master]))
[st_thread.setDaemon(True) for st_thread in self.__stats_threads]
[st_thread.start() for st_thread in self.__stats_threads]
self._log_start(self)
except Exception as e:
self.log.error(str(e))
self.log.error("Error while setting up clusters: %s", sys.exc_info())
self._cleanup_broken_setup()
raise
def tearDown(self):
try:
test_failed = (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures or self._resultForDoCleanups.errors)) \
or (hasattr(self, '_exc_info') and self._exc_info()[1] is not None)
self._end_replication_flag = 1
if str(self.__class__).find('tuq_xdcr') != -1:
self._do_cleanup()
return
# Grab cbcollect before we cleanup
if test_failed and TestInputSingleton.input.param("get-cbcollect-info", False):
self.__get_cbcollect_info()
cluster_run = len({server.ip for server in self._servers}) == 1
if test_failed and not cluster_run and self.collect_data_files:
self.__collect_data_files()
if test_failed and (str(self.__class__).find('upgradeXDCR') != -1 or TestInputSingleton.input.param("stop-on-failure", False)):
self.log.warning("CLEANUP WAS SKIPPED")
return
if self.print_stats:
self.log.info("============== XDCRbasetests stats for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
[st_thread.join() for st_thread in self.__stats_threads]
self.log.info("Type of run: %s XDCR" % XDCRConstants.REPLICATION_DIRECTION_BIDIRECTION.upper())
self._print_stats(self.src_master)
if self._replication_direction_str == XDCRConstants.REPLICATION_DIRECTION_BIDIRECTION:
self._print_stats(self.dest_master)
self.log.info("============== = = = = = = = = END = = = = = = = = = = ==============")
self.log.info("============== XDCRbasetests cleanup is started for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
self.teardown_extended()
self._do_cleanup()
self.log.info("============== XDCRbasetests cleanup is finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
finally:
self.log.info("closing all ssh connections")
for ins in RemoteMachineShellConnection.get_instances():
#self.log.info(str(ins))
ins.disconnect()
self.log.info("closing all memcached connections")
for ins in MemcachedClient.get_instances():
#self.log.info(str(ins))
ins.close()
for ins in MC_MemcachedClient.get_instances():
#self.log.info(str(ins))
ins.close()
self.cluster.shutdown(force=True)
self._log_finish(self)
def __get_cbcollect_info(self):
self.cluster.shutdown(force=True)
self._log_finish(self)
path = self._input.param("logs_folder", "/tmp")
for server in self.src_nodes + self.dest_nodes:
print("grabbing cbcollect from {0}".format(server.ip))
path = path or "."
try:
cbcollectRunner(server, path).run()
TestInputSingleton.input.test_params["get-cbcollect-info"] = False
except:
print("IMPOSSIBLE TO GRAB CBCOLLECT FROM {0}".format(server.ip))
def __collect_data_files(self):
self.cluster.shutdown(force=True)
self._log_finish(self)
logs_folder = self._input.param("logs_folder", "/tmp")
from scripts import collect_data_files
nodes = self.src_nodes + self.dest_nodes
for server in nodes:
collect_data_files.cbdatacollectRunner(server, logs_folder).run()
def _print_stats(self, node):
if node == self.src_master:
node1 = self.dest_master
else:
node1 = self.src_master
self.log.info("STATS with source at {0} and destination at {1}".format(node.ip, node1.ip))
for the_bucket in self._get_cluster_buckets(node):
self.log.info("Bucket: {0}".format(the_bucket.name))
if node in self._local_replication_rate:
if the_bucket.name in self._local_replication_rate[node]:
if self._local_replication_rate[node][the_bucket.name] is not None:
_sum_ = 0
for _i in self._local_replication_rate[node][the_bucket.name]:
_sum_ += _i
self.log.info("\tAverage local replica creation rate for bucket '{0}': {1} KB per second"\
.format(the_bucket.name, (float)(_sum_) / (len(self._local_replication_rate[node][the_bucket.name]) * 1000)))
if node1 in self._xdc_replication_ops:
if the_bucket.name in self._xdc_replication_ops[node1]:
if self._xdc_replication_ops[node1][the_bucket.name] is not None:
_sum_ = 0
for _i in self._xdc_replication_ops[node1][the_bucket.name]:
_sum_ += _i
self._xdc_replication_ops[node1][the_bucket.name].sort()
_mid = len(self._xdc_replication_ops[node1][the_bucket.name]) // 2
if len(self._xdc_replication_ops[node1][the_bucket.name]) % 2 == 0:
_median_value_ = (float)(self._xdc_replication_ops[node1][the_bucket.name][_mid] +
self._xdc_replication_ops[node1][the_bucket.name][_mid - 1]) // 2
else:
_median_value_ = self._xdc_replication_ops[node1][the_bucket.name][_mid]
self.log.info("\tMedian XDC replication ops for bucket '{0}': {1} K ops per second"\
.format(the_bucket.name, (float)(_median_value_) / 1000))
self.log.info("\tMean XDC replication ops for bucket '{0}': {1} K ops per second"\
.format(the_bucket.name, (float)(_sum_) / (len(self._xdc_replication_ops[node1][the_bucket.name]) * 1000)))
if node in self._xdc_replication_rate:
if the_bucket.name in self._xdc_replication_rate[node]:
if self._xdc_replication_rate[node][the_bucket.name] is not None:
_sum_ = 0
for _i in self._xdc_replication_rate[node][the_bucket.name]:
_sum_ += _i
self.log.info("\tAverage XDCR data replication rate for bucket '{0}': {1} KB per second"\
.format(the_bucket.name, (float)(_sum_) / (len(self._xdc_replication_rate[node][the_bucket.name]) * 1000)))
def _cleanup_previous_setup(self):
self.teardown_extended()
self._do_cleanup()
def _init_parameters(self):
self.log.info("Initializing input parameters started...")
self._clusters_dic = self._input.clusters # clusters is declared as dic in TestInput which is unordered.
self._clusters_keys_olst = list(range(
len(self._clusters_dic))) # clusters are populated in the dic in testrunner such that ordinal is the key.
# orderedDic cannot be used in order to maintain the compatibility with python 2.6
self._cluster_counter_temp_int = 0
self._cluster_names_dic = self._get_cluster_names()
self._servers = self._input.servers
self._disabled_consistent_view = self._input.param("disabled_consistent_view", None)
self._floating_servers_set = self._get_floating_servers() # These are the servers defined in .ini file but not linked to any cluster.
self._cluster_counter_temp_int = 0 # TODO: fix the testrunner code to pass cluster name in params.
self.buckets = []
# max items number to verify in ValidateDataTask, None - verify all
self.max_verify = self._input.param("max_verify", None)
self._checkpoint_interval = self._input.param("checkpoint_interval", 1800)
self._default_bucket = self._input.param("default_bucket", True)
self._end_replication_flag = self._input.param("end_replication_flag", 0)
"""
ENTER: sasl_buckets=[no.] or standard_buckets=[no.]
"""
self._standard_buckets = self._input.param("standard_buckets", 0)
self._sasl_buckets = self._input.param("sasl_buckets", 0)
self._extra_buckets = self._input.param("extra_buckets", 0) # number of buckets to add inside test body
if self._default_bucket:
self.default_bucket_name = "default"
self._num_replicas = self._input.param("replicas", 1)
self._mem_quota_int = 0 # will be set in subsequent methods
self.eviction_policy = self._input.param("eviction_policy", 'valueOnly') # or 'fullEviction'
self.mixed_priority = self._input.param("mixed_priority", None)
# if mixed priority is set by user, set high priority for sasl and standard buckets
if self.mixed_priority:
self.bucket_priority = 'high'
else:
self.bucket_priority = None
self.num_items = self._input.param("items", 1000)
self._value_size = self._input.param("value_size", 256)
self._dgm_run = self._input.param("dgm_run", False)
self.active_resident_threshold = int(self._input.param("active_resident_threshold", 0))
self.rep_type = self._input.param("replication_type", "xmem")
self._poll_interval = self._input.param(XDCRConstants.INPUT_PARAM_POLL_INTERVAL, 5)
self._poll_timeout = self._input.param(XDCRConstants.INPUT_PARAM_POLL_TIMEOUT, 120)
self.init_parameters_extended()
self._doc_ops = self._input.param("doc-ops", "None")
if self._doc_ops is not None:
self._doc_ops = self._doc_ops.split("-")
self._doc_ops_dest = self._input.param("doc-ops-dest", "None")
# semi-colon separator is not accepted for some reason here
if self._doc_ops_dest is not None:
self._doc_ops_dest = self._doc_ops_dest.split("-")
self.case_number = self._input.param("case_number", 0)
self._expires = self._input.param("expires", 0)
self.wait_timeout = self._input.param("timeout", 60)
self._percent_update = self._input.param("upd", 30)
self._percent_delete = self._input.param("del", 30)
self._warmup = self._input.param("warm", None)
self._failover = self._input.param("failover", None)
self._graceful = self._input.param("graceful", False)
self._demand_encryption = self._input.param("demand_encryption", 0)
self._rebalance = self._input.param("rebalance", None)
self._use_hostanames = self._input.param("use_hostnames", False)
self.print_stats = self._input.param("print_stats", False)
self._wait_for_expiration = self._input.param("wait_for_expiration", False)
self.sdk_compression = self._input.param("sdk_compression", True)
self.collect_data_files = False
if self._warmup is not None:
self._warmup = self._warmup.split("-")
if self._failover is not None:
self._failover = self._failover.split("-")
if self._rebalance is not None:
self._rebalance = self._rebalance.split("-")
self._num_rebalance = self._input.param("num_rebalance", 1)
"""
CREATE's a set of items,
UPDATE's UPD% of the items starting from 0,
DELETE's DEL% of the items starting from the end (count(items)).
"""
self.gen_create = BlobGenerator('loadOne', 'loadOne', self._value_size, end=self.num_items)
self.gen_delete = BlobGenerator('loadOne', 'loadOne-', self._value_size,
start=int((self.num_items) * (float)(100 - self._percent_delete) / 100), end=self.num_items)
self.gen_update = BlobGenerator('loadOne', 'loadOne-', self._value_size, start=0,
end=int(self.num_items * (float)(self._percent_update) / 100))
self.gen_append = BlobGenerator('loadOne', 'loadOne', self._value_size, end=self.num_items)
self.ord_keys = self._clusters_keys_olst
self.ord_keys_len = len(self.ord_keys)
self.src_nodes = copy.copy(self._clusters_dic[0])
self.dest_nodes = copy.copy(self._clusters_dic[1])
if int(self.src_nodes[0].port) in range(9091, 9991):
temp = self.dest_nodes
self.dest_nodes = self.src_nodes
self.src_nodes = temp
self.src_master = self.src_nodes[0]
self.dest_master = self.dest_nodes[0]
self._defaul_map_func = "function (doc) {\n emit(doc._id, doc);\n}"
self._default_view_name = "default_view"
self._default_view = View(self._default_view_name, self._defaul_map_func, None)
self._num_views = self._input.param("num_views", 5)
self._is_dev_ddoc = self._input.param("is_dev_ddoc", True)
self.fragmentation_value = self._input.param("fragmentation_value", 80)
self.disable_src_comp = self._input.param("disable_src_comp", True)
self.disable_dest_comp = self._input.param("disable_dest_comp", True)
self.log.info("Initializing input parameters completed.")
@staticmethod
def _log_start(self):
try:
msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
RestConnection(self.src_master[0]).log_client_error(msg)
RestConnection(self.dest_master[0]).log_client_error(msg)
except:
pass
@staticmethod
def _log_finish(self):
try:
msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
RestConnection(self.src_master[0]).log_client_error(msg)
RestConnection(self.dest_master[0]).log_client_error(msg)
except:
pass
def sleep(self, timeout=1, message=""):
self.log.info("sleep for {0} secs. {1} ...".format(timeout, message))
time.sleep(timeout)
def _replication_stat_keeper(self, arg, node):
while not self._end_replication_flag == 1:
if node in self._clusters_dic[0]:
consider_node = self.src_master
elif node in self._clusters_dic[1]:
consider_node = self.dest_master
if arg == "replication_data_replicated":
rest = RestConnection(consider_node)
for the_bucket in self._get_cluster_buckets(consider_node):
stats = rest.fetch_bucket_stats(bucket=the_bucket.name)["op"]["samples"]
_x_ = stats[arg][-1]
if _x_ > 0:
_y_ = datetime.datetime.now()
if node not in self._local_replication_rate:
self._local_replication_rate[node] = {}
if the_bucket.name not in self._local_replication_rate[node]:
self._local_replication_rate[node][the_bucket.name] = []
self._local_replication_rate[node][the_bucket.name].append((float)(_x_) / (_y_ - self._start_replication_time[the_bucket.name]).seconds)
elif arg == "xdc_ops":
rest = RestConnection(consider_node)
for the_bucket in self._get_cluster_buckets(consider_node):
stats = rest.fetch_bucket_stats(bucket=the_bucket.name)["op"]["samples"]
_x_ = stats[arg][-1]
if _x_ > 0:
if node not in self._xdc_replication_ops:
self._xdc_replication_ops[node] = {}
if the_bucket.name not in self._xdc_replication_ops[node]:
self._xdc_replication_ops[node][the_bucket.name] = []
self._xdc_replication_ops[node][the_bucket.name].append((float)(_x_))
elif arg == "data_replicated":
rest1 = RestConnection(consider_node)
if consider_node == self.src_master:
rest2 = RestConnection(self.dest_master)
else:
rest2 = RestConnection(self.src_master)
dest_uuid = rest2.get_pools_info()["uuid"]
for the_bucket in self._get_cluster_buckets(consider_node):
argument = "{0}/{1}/{2}/{3}/{4}".format("replications", dest_uuid, the_bucket.name, the_bucket.name, arg)
stats = rest1.fetch_bucket_stats(bucket=the_bucket.name)["op"]["samples"]
_x_ = stats[argument][-1]
if _x_ > 0:
_y_ = datetime.datetime.now()
if node not in self._xdc_replication_rate:
self._xdc_replication_rate[node] = {}
if the_bucket.name not in self._xdc_replication_rate[node]:
self._xdc_replication_rate[node][the_bucket.name] = []
self._xdc_replication_rate[node][the_bucket.name].append((float)(_x_) / (_y_ - self._start_replication_time[the_bucket.name]).seconds)
def _get_floating_servers(self):
cluster_nodes = []
floating_servers = copy.copy(self._servers)
for key, node in list(self._clusters_dic.items()):
cluster_nodes.extend(node)
for c_node in cluster_nodes:
for node in floating_servers:
if node.ip in str(c_node) and node.port in str(c_node):
floating_servers.remove(node)
return floating_servers
def _init_clusters(self, disabled_consistent_view=None):
for key in self._clusters_keys_olst:
for node in self._clusters_dic[key]:
rest = RestConnection(node)
rest.init_node()
self._setup_cluster(self._clusters_dic[key], disabled_consistent_view)
# This method shall be overridden in case there are parameters that need to be initialized.
def init_parameters_extended(self):
pass
# This method shall be overridden in case there are custom steps involved during setup.
def setup_extended(self):
pass
# This method shall be overridden in case there are custom steps involved during teardown.
def teardown_extended(self):
pass
def __stop_rebalance(self, nodes):
rest = RestConnection(nodes[0])
if rest._rebalance_progress_status() == 'running':
self.log.warning("rebalancing is still running, test should be verified")
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
def _do_cleanup(self):
for key in self._clusters_keys_olst:
nodes = self._clusters_dic[key]
self.log.info("cleanup cluster{0}: {1}".format(key + 1, nodes))
self.__stop_rebalance(nodes)
for node in nodes:
BucketOperationHelper.delete_all_buckets_or_assert([node], self)
if self._input.param("forceEject", False) and node not in [self.src_nodes[0], self.dest_nodes[0] ]:
try:
rest = RestConnection(node)
rest.force_eject_node()
except BaseException as e:
self.log.error(e)
else:
ClusterOperationHelper.cleanup_cluster([node], self)
ClusterOperationHelper.wait_for_ns_servers_or_assert([node], self)
def _cleanup_broken_setup(self):
try:
self.tearDown()
except:
self.log.info("Error while cleaning broken setup.")
def _get_cluster_names(self):
cs_names = {}
for key in self._clusters_keys_olst:
cs_names[key] = "cluster{0}".format(self._cluster_counter_temp_int)
self._cluster_counter_temp_int += 1
return cs_names
def _setup_cluster(self, nodes, disabled_consistent_view=None):
self._init_nodes(nodes, disabled_consistent_view)
self.cluster.async_rebalance(nodes, nodes[1:], [], use_hostnames=self._use_hostanames).result()
if str(self.__class__).find('upgradeXDCR') != -1:
self._create_buckets(self, nodes)
else:
self._create_buckets(nodes)
def _init_nodes(self, nodes, disabled_consistent_view=None):
_tasks = []
for node in nodes:
self._add_built_in_server_user(node=node)
_tasks.append(self.cluster.async_init_node(node, disabled_consistent_view))
for task in _tasks:
mem_quota_node = task.result()
if mem_quota_node < self._mem_quota_int or self._mem_quota_int == 0:
self._mem_quota_int = mem_quota_node
if self._use_hostanames:
if not hasattr(self, 'hostnames'):
self.hostnames = {}
self.hostnames.update(self._rename_nodes(nodes))
def _rename_nodes(self, servers):
hostnames = {}
for server in servers:
shell = RemoteMachineShellConnection(server)
try:
hostname = shell.get_full_hostname()
rest = RestConnection(server)
renamed, content = rest.rename_node(hostname, username=server.rest_username, password=server.rest_password)
self.assertTrue(renamed, "Server %s is not renamed!Hostname %s. Error %s" % (
server, hostname, content))
hostnames[server] = hostname
finally:
shell.disconnect()
for i in range(len(servers)):
if servers[i] in hostnames:
if server and server.ip != servers[i].ip:
continue
servers[i].hostname = hostnames[servers[i]]
return hostnames
def _add_built_in_server_user(self, testuser=None, rolelist=None, node=None):
"""
From spock, couchbase server is built with some users that handles
some specific task such as:
cbadminbucket
Default added user is cbadminbucket with admin role
"""
if testuser is None:
testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket',
'password': 'password'}]
if rolelist is None:
rolelist = [{'id': 'cbadminbucket', 'name': 'cbadminbucket',
'roles': 'admin'}]
if node is None:
node = self.master
self.log.info("**** add built-in '%s' user to node %s ****" % (testuser[0]["name"],
node.ip))
RbacBase().create_user_source(testuser, 'builtin', node)
self.log.info("**** add '%s' role to '%s' user ****" % (rolelist[0]["roles"],
testuser[0]["name"]))
RbacBase().add_user_role(rolelist, RestConnection(node), 'builtin')
def _create_bucket_params(self, server, replicas=1, size=0, port=11211, password=None,
bucket_type='membase', enable_replica_index=1, eviction_policy='valueOnly',
bucket_priority=None, flush_enabled=1, lww=False):
"""Create a set of bucket_parameters to be sent to all of the bucket_creation methods
Parameters:
server - The server to create the bucket on. (TestInputServer)
bucket_name - The name of the bucket to be created. (String)
port - The port to create this bucket on. (String)
password - The password for this bucket. (String)
size - The size of the bucket to be created. (int)
enable_replica_index - can be 0 or 1, 1 enables indexing of replica bucket data (int)
replicas - The number of replicas for this bucket. (int)
eviction_policy - The eviction policy for the bucket, can be valueOnly or fullEviction. (String)
bucket_priority - The priority of the bucket:either none, low, or high. (String)
bucket_type - The type of bucket. (String)
flushEnabled - Enable or Disable the flush functionality of the bucket. (int)
lww = determine the conflict resolution type of the bucket. (Boolean)
Returns:
bucket_params - A dictionary containing the parameters needed to create a bucket."""
bucket_params = {}
bucket_params['server'] = server
bucket_params['replicas'] = replicas
bucket_params['size'] = size
bucket_params['port'] = port
bucket_params['password'] = password
bucket_params['bucket_type'] = bucket_type
bucket_params['enable_replica_index'] = enable_replica_index
bucket_params['eviction_policy'] = eviction_policy
bucket_params['bucket_priority'] = bucket_priority
bucket_params['flush_enabled'] = flush_enabled
bucket_params['lww'] = lww
return bucket_params
def _create_sasl_buckets(self, server, num_buckets, server_id, bucket_size):
bucket_tasks = []
sasl_params = self._create_bucket_params(server=server, password='password', size=bucket_size,
replicas=self._num_replicas,
eviction_policy=self.eviction_policy,
bucket_priority=self.bucket_priority)
for i in range(num_buckets):
name = "sasl_bucket_" + str(i + 1)
bucket_tasks.append(self.cluster.async_create_sasl_bucket(name=name, password='password', bucket_params=sasl_params))
self.buckets.append(Bucket(name=name, authType="sasl", saslPassword="password",
num_replicas=self._num_replicas, bucket_size=bucket_size,
master_id=server_id, eviction_policy=self.eviction_policy))
for task in bucket_tasks:
task.result(self.wait_timeout * 10)
def _create_standard_buckets(self, server, num_buckets, server_id, bucket_size):
bucket_tasks = []
standard_params = self._create_bucket_params(server=server, size=bucket_size, replicas=self._num_replicas,
eviction_policy=self.eviction_policy,
bucket_priority=self.bucket_priority)
for i in range(num_buckets):
name = "standard_bucket_" + str(i + 1)
bucket_tasks.append(self.cluster.async_create_standard_bucket(name=name, port=STANDARD_BUCKET_PORT+i,
bucket_params=standard_params))
self.buckets.append(Bucket(name=name, authType=None, saslPassword=None,
num_replicas=self._num_replicas, bucket_size=bucket_size,
port=STANDARD_BUCKET_PORT + i, master_id=server_id, eviction_policy=self.eviction_policy))
for task in bucket_tasks:
task.result(self.wait_timeout * 10)
def _create_buckets(self, nodes):
if self._dgm_run:
self._mem_quota_int = 256
master_node = nodes[0]
total_buckets = self._sasl_buckets + self._default_bucket + self._standard_buckets + self._extra_buckets
#to enable tests to skip creating buckets in setup and create them later
if not total_buckets:
return
bucket_size = self._get_bucket_size(self._mem_quota_int, total_buckets)
rest = RestConnection(master_node)
master_id = rest.get_nodes_self().id
if len({server.ip for server in self._servers}) != 1: # if not cluster run use ip addresses instead of lh
master_id = master_id.replace("127.0.0.1", master_node.ip).replace("localhost", master_node.ip)
self._create_sasl_buckets(master_node, self._sasl_buckets, master_id, bucket_size)
self._create_standard_buckets(master_node, self._standard_buckets, master_id, bucket_size)
if self._default_bucket:
bucket_params = self._create_bucket_params(server=master_node, size=bucket_size,
replicas=self._num_replicas,
eviction_policy=self.eviction_policy)
self.cluster.create_default_bucket(bucket_params)
self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
num_replicas=self._num_replicas, bucket_size=bucket_size, master_id=master_id,
eviction_policy=self.eviction_policy))
def _get_bucket_size(self, mem_quota, num_buckets):
#min size is 100MB now
return max(100, int(float(mem_quota) / float(num_buckets)))
def _get_cluster_buckets(self, master_server):
rest = RestConnection(master_server)
master_id = rest.get_nodes_self().id
if master_id.find('es') != 0:
# verify if node_ids were changed for cluster_run
for bucket in self.buckets:
if ("127.0.0.1" in bucket.master_id and "127.0.0.1" not in master_id) or \
("localhost" in bucket.master_id and "localhost" not in master_id):
new_ip = master_id[master_id.index("@") + 1:]
bucket.master_id = bucket.master_id.replace("127.0.0.1", new_ip).\
replace("localhost", new_ip)
if len({server.ip for server in self._servers}) != 1: # if not cluster run use ip addresses instead of lh
master_id = master_id.replace("127.0.0.1", master_server.ip).replace("localhost", master_server.ip)
buckets = [bucket for bucket in self.buckets if bucket.master_id == master_id]
if not buckets:
self.log.warning("No bucket(s) found on the server %s" % master_server)
return buckets
def do_a_warm_up(self, node):
shell = RemoteMachineShellConnection(node)
shell.stop_couchbase()
self.sleep(5)
shell.start_couchbase()
shell.disconnect()
def adding_back_a_node(self, master, server):
rest = RestConnection(master)
nodes = rest.node_statuses()
for node in nodes:
if server.ip == node.ip and int(server.port) == int(node.port):
rest.add_back_node(node.id)
def _get_active_replica_count_from_cluster(self, master):
buckets = self._get_cluster_buckets(master)
for bucket in buckets:
keys_loaded = sum([len(kv_store) for kv_store in list(bucket.kvs.values())])
self.log.info("Keys loaded into bucket {0}:{1}".format(bucket.name,
keys_loaded))
self.log.info("Stat: vb_active_curr_items = {0}".
format(MemcachedClientHelper.direct_client(master,
bucket.name).stats()['vb_active_curr_items']))
self.log.info("Stat: vb_replica_curr_items = {0}".
format(MemcachedClientHelper.direct_client(master,
bucket.name).stats()['vb_replica_curr_items']))
def _async_failover(self, nodes, failover_nodes):
self.log.info("Printing pre-failover stats")
self._get_active_replica_count_from_cluster(nodes[0])
tasks = []
tasks.append(self.cluster.async_failover(nodes, failover_nodes, graceful=self._graceful))
return tasks
def _async_rebalance(self, nodes, to_add_node, to_remove_node):
tasks = []
tasks.append(self.cluster.async_rebalance(nodes, to_add_node, to_remove_node))
return tasks
# Change the master_id of buckets to new master node
def __change_masterid_buckets(self, new_master, buckets):
new_master_id = RestConnection(new_master).get_nodes_self().id
for bucket in buckets:
bucket.master_id = new_master_id
# Initiates a rebalance-out asynchronously on both clusters
def _async_rebalance_out(self, master=False):
tasks = []
if "source" in self._rebalance and self._num_rebalance < len(self.src_nodes):
src_buckets = self._get_cluster_buckets(self.src_master)
tasks += self.__async_rebalance_out_cluster(self.src_nodes, self.src_master, master=master)
if master:
self.src_master = self.src_nodes[0]
self.__change_masterid_buckets(self.src_master, src_buckets)
if "destination" in self._rebalance and self._num_rebalance < len(self.dest_nodes):
dest_buckets = self._get_cluster_buckets(self.dest_master)
tasks += self.__async_rebalance_out_cluster(self.dest_nodes, self.dest_master, master=master, cluster_type="source")
if master:
self.dest_master = self.dest_nodes[0]
self.__change_masterid_buckets(self.dest_master, dest_buckets)
return tasks
def __async_rebalance_out_cluster(self, cluster_nodes, master_node, master=False, cluster_type="source"):
remove_nodes = []
if master:
remove_nodes = [master_node]
else:
remove_nodes = cluster_nodes[len(cluster_nodes) - self._num_rebalance:]
if self._failover and cluster_type in self._failover:
tasks = self._async_failover(cluster_nodes, remove_nodes)
for task in tasks:
task.result()
tasks = self._async_rebalance(cluster_nodes, [], remove_nodes)
remove_node_ips = [remove_node.ip for remove_node in remove_nodes]
self.log.info(" Starting rebalance-out nodes:{0} at {1} cluster {2}".
format(remove_node_ips, cluster_type, master_node.ip))
list(map(cluster_nodes.remove, remove_nodes))
return tasks
# Initiates a rebalance-in asynchronously on both clusters
def _async_rebalance_in(self):
tasks = []
if "source" in self._rebalance:
tasks += self.__async_rebalance_in_cluster(self.src_nodes, self.src_master)
if "destination" in self._rebalance:
tasks += self.__async_rebalance_in_cluster(self.dest_nodes, self.dest_master, cluster_type="destination")
return tasks
def __async_rebalance_in_cluster(self, cluster_nodes, master_node, cluster_type="source"):
add_nodes = self._floating_servers_set[0:self._num_rebalance]
list(map(self._floating_servers_set.remove, add_nodes))
tasks = self._async_rebalance(cluster_nodes, add_nodes, [])
add_nodes_ips = [node.ip for node in add_nodes]
self.log.info(" Starting rebalance-in nodes:{0} at {1} cluster {2}".
format(add_nodes_ips, cluster_type, master_node.ip))
list(map(cluster_nodes.append, add_nodes))
return tasks
# Initiates a swap-rebalance asynchronously on both clusters
def _async_swap_rebalance(self, master=False):
tasks = []
if "source" in self._rebalance and self._num_rebalance < len(self.src_nodes):
src_buckets = self._get_cluster_buckets(self.src_master)
tasks += self.__async_swap_rebalance_cluster(self.src_nodes, self.src_master, master=master)
if master:
self.src_master = self.src_nodes[0]
self.__change_masterid_buckets(self.src_master, src_buckets)
if "destination" in self._rebalance and self._num_rebalance < len(self.dest_nodes):
dest_buckets = self._get_cluster_buckets(self.dest_master)
tasks += self.__async_swap_rebalance_cluster(self.dest_nodes, self.dest_master, master=master, cluster_type="destination")
if master:
self.dest_master = self.dest_nodes[0]
self.__change_masterid_buckets(self.dest_master, dest_buckets)
return tasks
def __async_swap_rebalance_cluster(self, cluster_nodes, master_node, master=False, cluster_type="source"):
add_node = self._floating_servers_set.pop()
if master:
remove_node = master_node
else:
remove_node = cluster_nodes[len(cluster_nodes) - 1]
tasks = self._async_rebalance(cluster_nodes, [add_node], [remove_node])
self.log.info(" Starting swap-rebalance [remove_node:{0}] -> [add_node:{1}] at {2} cluster {3}"
.format(remove_node.ip, add_node.ip, cluster_type, master_node.ip))
cluster_nodes.remove(remove_node)
cluster_nodes.append(add_node)
return tasks
def get_servers_in_cluster(self, member):
nodes = [node for node in RestConnection(member).get_nodes()]
servers = []
cluster_run = len({server.ip for server in self._servers}) == 1
for server in self._servers:
for node in nodes:
if ((server.hostname if server.hostname else server.ip) == str(node.ip) or cluster_run)\
and server.port == str(node.port):
servers.append(server)
return servers
def wait_warmup_completed(self, warmupnodes, bucket_names=["default"]):
if isinstance(bucket_names, str):
bucket_names = [bucket_names]
for server in warmupnodes:
for bucket in bucket_names:
mc = MemcachedClientHelper.direct_client(server, bucket)
start = time.time()
while time.time() - start < 150:
if mc.stats()["ep_warmup_thread"] == "complete":
self.log.info("Warmed up: %s items " % (mc.stats()["curr_items_tot"]))
self.sleep(10)
break
elif mc.stats()["ep_warmup_thread"] == "running":
self.log.info(
"Still warming up .. curr_items_tot : %s" % (mc.stats()["curr_items_tot"]))
continue
else:
self.log.info("Value of ep_warmup_thread does not exist, exiting from this server")
break
if mc.stats()["ep_warmup_thread"] == "running":
self.log.info("ERROR: ep_warmup_thread's status not complete")
mc.close
def wait_node_restarted(self, server, wait_time=120, wait_if_warmup=False, check_service=False):
now = time.time()
if check_service:
self.wait_service_started(server, wait_time)
wait_time = now + wait_time - time.time()
num = 0
while num < wait_time // 10:
try:
ClusterOperationHelper.wait_for_ns_servers_or_assert(
[server], self, wait_time=wait_time - num * 10, wait_if_warmup=wait_if_warmup)
break
except ServerUnavailableException:
num += 1
self.sleep(10)
def wait_service_started(self, server, wait_time=120):
shell = RemoteMachineShellConnection(server)
os_type = shell.extract_remote_info().distribution_type
if os_type.lower() == 'windows':
cmd = "sc query CouchbaseServer | grep STATE"
else:
cmd = "service couchbase-server status"
now = time.time()
while time.time() - now < wait_time:
output, error = shell.execute_command(cmd)
if str(output).lower().find("running") != -1:
self.log.info("Couchbase service is running")
return
self.sleep(10, "couchbase service is not running")
self.fail("Couchbase service is not running after {0} seconds".format(wait_time))
def _enable_firewall(self, server):
is_bidirectional = self._replication_direction_str == "bidirection"
RemoteUtilHelper.enable_firewall(server, bidirectional=is_bidirectional, xdcr=True)
def _disable_firewall(self, server):
shell = RemoteMachineShellConnection(server)
o, r = shell.execute_command("iptables -F")
shell.log_command_output(o, r)
o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:65535 -j ACCEPT")
shell.log_command_output(o, r)
if self._replication_direction_str == "bidirection":
o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:65535 -j ACCEPT")
shell.log_command_output(o, r)
o, r = shell.execute_command("/sbin/iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT")
shell.log_command_output(o, r)
self.log.info("enabled firewall on {0}".format(server))
o, r = shell.execute_command("/sbin/iptables --list")
shell.log_command_output(o, r)
shell.disconnect()
""" Reboot node, wait till nodes are warmed up """
def reboot_node(self, node):
self.log.info("Rebooting node '{0}'....".format(node.ip))
shell = RemoteMachineShellConnection(node)
if shell.extract_remote_info().type.lower() == 'windows':
o, r = shell.execute_command("shutdown -r -f -t 0")
elif shell.extract_remote_info().type.lower() == 'linux':
o, r = shell.execute_command("reboot")
shell.log_command_output(o, r)
# wait for restart and warmup on all node
self.sleep(self.wait_timeout * 5)
# disable firewall on these nodes
self._disable_firewall(node)
# wait till node is ready after warmup
ClusterOperationHelper.wait_for_ns_servers_or_assert([node], self, wait_if_warmup=True)
def disable_compaction(self, server=None, bucket="default"):
server = server or self.src_master
new_config = {"viewFragmntThresholdPercentage" : None,
"dbFragmentThresholdPercentage" : None,
"dbFragmentThreshold" : None,
"viewFragmntThreshold" : None}
self.cluster.modify_fragmentation_config(server, new_config, bucket)
def make_default_views(self, prefix, count, is_dev_ddoc=False,):
ref_view = self._default_view
ref_view.name = (prefix, ref_view.name)[prefix is None]
return [View(ref_view.name + str(i), ref_view.map_func, None, is_dev_ddoc) for i in range(count)]
def async_create_views(self, server, design_doc_name, views, bucket="default"):
tasks = []
if len(views):
for view in views:
t_ = self.cluster.async_create_view(server, design_doc_name, view, bucket)
tasks.append(t_)
else:
t_ = self.cluster.async_create_view(server, design_doc_name, None, bucket)
tasks.append(t_)
return tasks
def _get_num_items_ratio(self, op_type):
if op_type in ["update", "delete"]:
return self.num_items // 3
else:
return self.num_items
def _load_gen_data(self, cname, node):
for op_type in self._seed_data_ops_lst:
num_items_ratio = self._get_num_items_ratio(op_type)
load_gen = BlobGenerator(cname, cname, self._value_size, end=num_items_ratio)
self.log.info("Starting Load operation '{0}' for items (ratio) '{1}' on node '{2}'....".format(op_type,
num_items_ratio, cname))
if self._seed_data_mode_str == XDCRConstants.SEED_DATA_MODE_SYNC:
self._load_all_buckets(node, load_gen, op_type, 0)
self.log.info("Completed Load of {0}".format(op_type))
else:
self._async_load_all_buckets(node, load_gen, op_type, 0)
self.log.info("Started async Load of {0}".format(op_type))
def _async_load_bucket(self, bucket, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1000, pause_secs=1, timeout_secs=30):
gen = copy.deepcopy(kv_gen)
task = self.cluster.async_load_gen_docs(server, bucket.name, gen,
bucket.kvs[kv_store], op_type,
exp, flag, only_store_hash,
batch_size, pause_secs, timeout_secs,
compression=self.sdk_compression)
return task
def _async_load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1, pause_secs=1, timeout_secs=30):
tasks = []
buckets = self._get_cluster_buckets(server)
for bucket in buckets:
gen = copy.deepcopy(kv_gen)
tasks.append(self.cluster.async_load_gen_docs(server, bucket.name, gen,
bucket.kvs[kv_store],
op_type, exp, flag, only_store_hash,
batch_size, pause_secs, timeout_secs,
compression=self.sdk_compression))
return tasks
def _load_bucket(self, bucket, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1000, pause_secs=1, timeout_secs=30):
task = self._async_load_bucket(bucket, server, kv_gen, op_type, exp, kv_store, flag, only_store_hash, batch_size, pause_secs, timeout_secs)
task.result()
def key_generator(self, size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def _load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1000, pause_secs=1, timeout_secs=30):
tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp, kv_store, flag, only_store_hash, batch_size, pause_secs, timeout_secs)
for task in tasks:
task.result()
if self.active_resident_threshold:
stats_all_buckets = {}
for bucket in self._get_cluster_buckets(server):
stats_all_buckets[bucket.name] = StatsCommon()
for bucket in self._get_cluster_buckets(server):
threshold_reached = False
while not threshold_reached:
active_resident = stats_all_buckets[bucket.name].get_stats([server], bucket, '', 'vb_active_perc_mem_resident')[server]
if int(active_resident) > self.active_resident_threshold:
self.log.info("resident ratio is %s greater than %s for %s in bucket %s. Continue loading to the cluster" %
(active_resident, self.active_resident_threshold, server.ip, bucket.name))
random_key = self.key_generator()
generate_load = BlobGenerator(random_key, '%s-' % random_key, self._value_size, end=batch_size * 50)
self._load_bucket(bucket, server, generate_load, "create", exp=0, kv_store=1, flag=0, only_store_hash=True, batch_size=batch_size, pause_secs=5, timeout_secs=60)
else:
threshold_reached = True
self.log.info("DGM state achieved for %s in bucket %s!" % (server.ip, bucket.name))
break
def _modify_src_data(self):
"""Setting up creates/updates/deletes at source nodes"""
if self._doc_ops is not None:
if "create" in self._doc_ops:
self._load_all_buckets(self.src_master, self.gen_create, "create", 0)
if "update" in self._doc_ops:
self._load_all_buckets(self.src_master, self.gen_update, "update", self._expires)
if "delete" in self._doc_ops:
self._load_all_buckets(self.src_master, self.gen_delete, "delete", 0)
self._wait_flusher_empty(self.src_master, self.src_nodes)
if self._wait_for_expiration and self._expires:
self.sleep(self._expires, "Waiting for expiration of updated items")
def _async_modify_data(self):
tasks = []
"""Setting up creates/updates/deletes at source nodes"""
if self._doc_ops is not None:
# allows multiple of them but one by one
if "update" in self._doc_ops:
tasks.extend(self._async_load_all_buckets(self.src_master, self.gen_update, "update", self._expires))
if "create" in self._doc_ops:
tasks.extend(self._async_load_all_buckets(self.src_master, self.gen_create, "create", 0))
if "delete" in self._doc_ops:
tasks.extend(self._async_load_all_buckets(self.src_master, self.gen_delete, "delete", 0))
for task in tasks:
task.result()
if self._wait_for_expiration and self._expires:
self.sleep(self._expires, "Waiting for expiration of updated items")
def _async_update_delete_data(self):
self.log.info("The tasks:-")
tasks = []
# Setting up doc-ops at source nodes and doc-ops-dest at destination nodes
if self._doc_ops is not None:
# allows multiple of them but one by one on either of the clusters
if "update" in self._doc_ops:
tasks.extend(self._async_load_all_buckets(self.src_master, self.gen_update, "update", self._expires))
if "delete" in self._doc_ops:
tasks.extend(self._async_load_all_buckets(self.src_master, self.gen_delete, "delete", 0))
self.sleep(self.wait_timeout // 6)
if self._doc_ops_dest is not None:
if "update" in self._doc_ops_dest:
tasks.extend(self._async_load_all_buckets(self.dest_master, self.gen_update2, "update", self._expires))
if "delete" in self._doc_ops_dest:
tasks.extend(self._async_load_all_buckets(self.dest_master, self.gen_delete2, "delete", 0))
self.sleep(self.wait_timeout // 6)
for task in tasks:
task.result()
if self._wait_for_expiration and self._expires:
self.sleep(self._expires, "Waiting for expiration of updated items")
#===============================================================================
# class: XDCRReplicationBaseTest
# This class links the different clusters defined in ".ini" file and starts the
# replication. It supports different topologies for cluster which can be defined
# by input parameter "ctopology". The various values supported are as follows
# - chain
# - star (Not Implemented yet)
# - tree (Not Implemented yet)
# The direction of replication can be defined using parameter "rdirection".
# It supports following values
# - unidirection
# - bidirection
# Note:
# The first node is consideredn as master node in the respective cluster section
# of .ini file.
# Cluster/Node ordinals are determined by the sequence of entry of node in .ini file.
#===============================================================================
class XDCRReplicationBaseTest(XDCRBaseTest):
def setup_extended(self):
self._setup_topology()
self._load_data()
def teardown_extended(self):
for _, clusters in list(self._clusters_dic.items()):
rest = RestConnection(clusters[0])
rest.remove_all_replications()
rest.remove_all_remote_clusters()
rest.remove_all_recoveries()
# #TODO should be added 'stop replication' when API to stop will be implemented
# for (rest_conn, cluster_ref, rep_database, rep_id) in self._cluster_state_arr:
# rest_conn.stop_replication(rep_database, rep_id)
# rest_conn.remove_remote_cluster(cluster_ref)
def init_parameters_extended(self):
self._cluster_state_arr = []
self._cluster_topology_str = self._input.param(XDCRConstants.INPUT_PARAM_CLUSTER_TOPOLOGY,
XDCRConstants.CLUSTER_TOPOLOGY_TYPE_CHAIN)
self._replication_direction_str = self._input.param(XDCRConstants.INPUT_PARAM_REPLICATION_DIRECTION,
XDCRConstants.REPLICATION_DIRECTION_BIDIRECTION)
self._seed_data = self._input.param(XDCRConstants.INPUT_PARAM_SEED_DATA, False)
self._seed_data_ops_lst = self._input.param(XDCRConstants.INPUT_PARAM_SEED_DATA_OPERATION,
"create").split('|')
self._seed_data_mode_str = self._input.param(XDCRConstants.INPUT_PARAM_SEED_DATA_MODE,
XDCRConstants.SEED_DATA_MODE_SYNC)
def _setup_topology(self):
if self._cluster_topology_str == XDCRConstants.CLUSTER_TOPOLOGY_TYPE_CHAIN:
if str(self.__class__).find('upgradeXDCR') != -1:
self._setup_topology_chain(self)
else:
self._setup_topology_chain()
elif self._cluster_topology_str == XDCRConstants.CLUSTER_TOPOLOGY_TYPE_STAR:
if str(self.__class__).find('upgradeXDCR') != -1:
self._set_topology_star(self)
else:
self._set_topology_star()
elif self._cluster_topology_str == XDCRConstants.CLUSTER_TOPOLOGY_TYPE_RING:
self._set_topology_ring()
else:
raise Exception("Not supported cluster topology : %s", self._cluster_topology_str)
def _load_data(self):
if not self._seed_data:
return
if self._cluster_topology_str == XDCRConstants.CLUSTER_TOPOLOGY_TYPE_CHAIN:
self._load_data_chain()
elif self._cluster_topology_str == XDCRConstants.CLUSTER_TOPOLOGY_TYPE_STAR:
self._load_data_star()
else:
raise Exception("Seed data not supported cluster topology : %s", self._cluster_topology_str)
def _setup_topology_chain(self):
ord_keys = self._clusters_keys_olst
ord_keys_len = len(ord_keys)
dest_key_index = 1
for src_key in ord_keys:
if dest_key_index == ord_keys_len:
break
dest_key = ord_keys[dest_key_index]
src_cluster_name = self._cluster_names_dic[src_key]
dest_cluster_name = self._cluster_names_dic[dest_key]
self._join_clusters(src_cluster_name, self.src_master, dest_cluster_name, self.dest_master)
dest_key_index += 1
def _set_topology_star(self):
src_master_identified = False
for key in self._clusters_keys_olst:
nodes = self._clusters_dic[key]
if not src_master_identified:
src_cluster_name = self._cluster_names_dic[key]
self.src_master = nodes[0]
src_master_identified = True
continue
dest_cluster_name = self._cluster_names_dic[key]
self.dest_master = nodes[0]
self._join_clusters(src_cluster_name, self.src_master, dest_cluster_name, self.dest_master)
def _set_topology_ring(self):
src_master_identified = False
src_cluster_name = ""
dest_cluster_name = ""
for key in self._clusters_keys_olst:
nodes = self._clusters_dic[key]
if not src_master_identified:
src_cluster_name = self._cluster_names_dic[key]
self.src_master = nodes[0]
src_master_identified = True
continue
dest_cluster_name = self._cluster_names_dic[key]
self.dest_master = nodes[0]
self._join_clusters(self._cluster_names_dic[key - 1], self._clusters_dic[key - 1][0],
dest_cluster_name, self.dest_master)
self._join_clusters(dest_cluster_name, self.dest_master, src_cluster_name, self.src_master)
self.sleep(30)
def _load_data_chain(self):
for key in self._clusters_keys_olst:
cluster_node = self._clusters_dic[key][0]
cluster_name = self._cluster_names_dic[key]
self.log.info("Starting Load # items {0} node {1} , cluster {2}....".format(self.num_items, cluster_node,
cluster_name))
self._load_gen_data(cluster_name, cluster_node)
if self._replication_direction_str == XDCRConstants.REPLICATION_DIRECTION_UNIDIRECTION:
break
self.log.info("Completed Load")
def set_xdcr_param(self, param, value):
self.log.info("Setting {0} to {1} ..".format(param, value))
src_rest = RestConnection(self.src_master)
replications = src_rest.get_replications()
for repl in replications:
src_bucket = repl.get_src_bucket()
dst_bucket = repl.get_dest_bucket()
src_rest.set_xdcr_param(src_bucket.name, dst_bucket.name, param, value)
if self._replication_direction_str == XDCRConstants.REPLICATION_DIRECTION_BIDIRECTION:
dst_rest = RestConnection(self.dest_master)
replications = dst_rest.get_replications()
for repl in replications:
src_bucket = repl.get_src_bucket()
dst_bucket = repl.get_dest_bucket()
dst_rest.set_xdcr_param(src_bucket.name, dst_bucket.name, param, value)
def _join_clusters(self, src_cluster_name, src_master, dest_cluster_name, dest_master):
self._link_clusters(src_master, dest_cluster_name, dest_master)
if int(self.dest_master.port) in range(9091, 9991):
self.rep_type = 'capi'
self._replicate_clusters(src_master, dest_cluster_name)
if self._replication_direction_str == XDCRConstants.REPLICATION_DIRECTION_BIDIRECTION:
self._link_clusters(dest_master, src_cluster_name, src_master)
self._replicate_clusters(dest_master, src_cluster_name)
# Set up cluster reference
def _link_clusters(self, src_master, remote_cluster_name, dest_master):
rest_conn_src = RestConnection(src_master)
certificate = ""
if self._demand_encryption:
rest_conn_dest = RestConnection(dest_master)
certificate = rest_conn_dest.get_cluster_ceritificate()
if int(dest_master.port) in range(9091, 9991):
rest_conn_src.add_remote_cluster(dest_master.ip, dest_master.port,
dest_master.es_username,
dest_master.es_password, remote_cluster_name,
demandEncryption=self._demand_encryption, certificate=certificate)
else:
rest_conn_src.add_remote_cluster(dest_master.ip, dest_master.port,
dest_master.rest_username,
dest_master.rest_password, remote_cluster_name,
demandEncryption=self._demand_encryption, certificate=certificate)
def _modify_clusters(self, src_cluster_name, src_master, dest_cluster_name, dest_master, require_encryption=None):
rest_conn_src = RestConnection(src_master)
certificate = ""
if require_encryption:
rest_conn_dest = RestConnection(dest_master)
certificate = rest_conn_dest.get_cluster_ceritificate()
rest_conn_src.modify_remote_cluster(dest_master.ip, dest_master.port,
dest_master.rest_username,
dest_master.rest_password, dest_cluster_name,
demandEncryption=require_encryption, certificate=certificate)
# Switch to encrypted xdcr
def _switch_to_encryption(self, cluster):
if "source" in cluster:
src_remote_clusters = RestConnection(self.src_master).get_remote_clusters()
for remote_cluster in src_remote_clusters:
self._modify_clusters(None, self.src_master, remote_cluster['name'], self.dest_master,
require_encryption=1)
if "destination" in cluster:
dest_remote_clusters = RestConnection(self.dest_master).get_remote_clusters()
for remote_cluster in dest_remote_clusters:
self._modify_clusters(None, self.dest_master, remote_cluster['name'], self.src_master,
require_encryption=1)
# Set up replication
def _replicate_clusters(self, src_master, dest_cluster_name):
rest_conn_src = RestConnection(src_master)
for bucket in self._get_cluster_buckets(src_master):
rep_id = rest_conn_src.start_replication(XDCRConstants.REPLICATION_TYPE_CONTINUOUS,
bucket.name, dest_cluster_name, self.rep_type)
self._start_replication_time[bucket.name] = datetime.datetime.now()
self._cluster_state_arr.append((rest_conn_src, dest_cluster_name, rep_id))
self.sleep(5)
"""merge 2 different kv strores from different clsusters/buckets
assume that all elements in the second kvs are more relevant.
Returns:
merged kvs, that we expect to get on both clusters
"""
def __merge_keys(self, kv_store_first, kv_store_second, kvs_num=1):
valid_keys_first, deleted_keys_first = kv_store_first[kvs_num].key_set()
valid_keys_second, deleted_keys_second = kv_store_second[kvs_num].key_set()
for key in valid_keys_second:
# replace/add the values for each key in first kvs
if key not in valid_keys_first:
partition1 = kv_store_first[kvs_num].acquire_partition(key)
partition2 = kv_store_second[kvs_num].acquire_partition(key)
key_add = partition2.get_key(key)
partition1.set(key, key_add["value"], key_add["expires"], key_add["flag"])
kv_store_first[kvs_num].release_partition(key)
kv_store_second[kvs_num].release_partition(key)
for key in deleted_keys_second:
# add deleted keys to first kvs if the where deleted only in second kvs
if key not in deleted_keys_first:
partition1 = kv_store_first[kvs_num].acquire_partition(key)
partition1.delete(key)
kv_store_first[kvs_num].release_partition(key)
def __do_merge_buckets(self, src_master, dest_master, bidirection):
src_buckets = self._get_cluster_buckets(src_master)
dest_buckets = self._get_cluster_buckets(dest_master)
for src_bucket in src_buckets:
for dest_bucket in dest_buckets:
if src_bucket.name == dest_bucket.name:
if bidirection:
self.__merge_keys(src_bucket.kvs, dest_bucket.kvs, kvs_num=1)
dest_bucket.kvs[1] = src_bucket.kvs[1]
def merge_buckets(self, src_master, dest_master, bidirection=True):
self.log.info("merge buckets {0}->{1}, bidirection:{2}".format(src_master.ip, dest_master.ip, bidirection))
# Wait for expiration if not already done
if self._expires and not self._wait_for_expiration:
self.sleep(self._expires, "Waiting for expiration of updated items")
if self._cluster_topology_str == XDCRConstants.CLUSTER_TOPOLOGY_TYPE_CHAIN:
self.__do_merge_buckets(src_master, dest_master, bidirection)
elif self._cluster_topology_str == XDCRConstants.CLUSTER_TOPOLOGY_TYPE_STAR:
for i in range(1, len(self._clusters_dic)):
dest_cluster = self._clusters_dic[i]
self.__do_merge_buckets(src_master, dest_cluster[0], bidirection)
def do_merge_bucket(self, src_master, dest_master, bidirection, bucket):
# Wait for expiration if not already done
if self._expires and not self._wait_for_expiration:
self.sleep(self._expires, "Waiting for expiration of updated items")
src_buckets = self._get_cluster_buckets(src_master)
dest_buckets = self._get_cluster_buckets(dest_master)
for src_bucket in src_buckets:
for dest_bucket in dest_buckets:
if src_bucket.name == dest_bucket.name and bucket.name == src_bucket.name:
if bidirection:
self.__merge_keys(src_bucket.kvs, dest_bucket.kvs, kvs_num=1)
dest_bucket.kvs[1] = src_bucket.kvs[1]
def _wait_for_replication_to_catchup(self, timeout=1200):
self._expiry_pager(self.src_master)
self._expiry_pager(self.dest_master)
self.sleep(15)
rest1 = RestConnection(self.src_master)
rest2 = RestConnection(self.dest_master)
# 20 minutes by default
end_time = time.time() + timeout
for bucket in self.buckets:
_count1 = rest1.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
_count2 = rest2.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
while _count1 != _count2 and (time.time() - end_time) < 0:
self.sleep(60, "Expected: {0} items, found: {1}. Waiting for replication to catch up ..".format(_count1, _count2))
_count1 = rest1.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
_count2 = rest2.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
if _count1 != _count2:
self.fail("not all items replicated in {0} sec for {1} bucket. on source cluster:{2}, on dest:{3}".\
format(timeout, bucket.name, _count1, _count2))
self.log.info("Replication caught up for bucket {0}: {1}".format(bucket.name, _count1))
""" Gets the required xdcr stat value for the given bucket from a server """
def get_xdcr_stat(self, server, bucket_name, param):
return int(RestConnection(server).fetch_bucket_stats(bucket_name)
['op']['samples'][param][-1])
def _verify_revIds(self, src_server, dest_server, kv_store=1, timeout=900):
error_count = 0
tasks = []
for src_bucket in self._get_cluster_buckets(src_server):
for dest_bucket in self._get_cluster_buckets(dest_server):
if src_bucket.name == dest_bucket.name:
self.log.info("Verifying RevIds for {0} -> {1}, bucket: {2}"
.format(src_server.ip, dest_server.ip, src_bucket))
task_info = self.cluster.async_verify_revid(
src_server,
dest_server,
src_bucket,
src_bucket.kvs[kv_store],
dest_bucket.kvs[kv_store],
compression=self.sdk_compression)
tasks.append(task_info)
for task in tasks:
task.result(timeout)
error_count += task.err_count
if task.err_count:
for ip, values in task.keys_not_found.items():
if values:
self.log.error("%s keys not found on %s:%s" % (len(values), ip, values))
return error_count
def _expiry_pager(self, master, val=10):
buckets = self._get_cluster_buckets(master)
for bucket in buckets:
ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", val, bucket)
self.log.info("wait for expiry pager to run on all these nodes")
def _wait_flusher_empty(self, master, servers, timeout=120):
tasks = []
buckets = self._get_cluster_buckets(master)
self.assertTrue(buckets, "No buckets recieved from the server {0} for verification".format(master.ip))
for server in servers:
for bucket in buckets:
tasks.append(self.cluster.async_wait_for_stats([server], bucket, '', 'ep_queue_size', '==', 0))
for task in tasks:
task.result(timeout)
def _verify_data_all_buckets(self, server, kv_store=1, timeout=None, max_verify=None, only_store_hash=True, batch_size=1000):
tasks = []
buckets = self._get_cluster_buckets(server)
self.assertTrue(buckets, "No buckets recieved from the server {0} for verification".format(server.ip))
for bucket in buckets:
tasks.append(self.cluster.async_verify_data(server, bucket, bucket.kvs[kv_store], max_verify,
only_store_hash, batch_size, timeout_sec=60,
compression=self.sdk_compression))
for task in tasks:
task.result(timeout)
def _verify_item_count(self, master, servers, timeout=120):
stats_tasks = []
buckets = self._get_cluster_buckets(master)
self.assertTrue(buckets, "No buckets received from the server {0} for verification".format(master.ip))
for bucket in buckets:
items = sum([len(kv_store) for kv_store in list(bucket.kvs.values())])
for stat in ['curr_items', 'vb_active_curr_items']:
stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
stat, '==', items))
if self._num_replicas >= 1 and len(servers) > 1:
stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
'vb_replica_curr_items', '==', items * self._num_replicas))
try:
for task in stats_tasks:
task.result(timeout)
return items
except TimeoutError:
self.log.error('ERROR: Timed-out waiting for item count to match.'
' Will proceed to do metadata checks.')
# CBQE-1695 Wait for replication_changes_left (outbound mutations) to be 0.
def __wait_for_outbound_mutations_zero(self, master_node, timeout=180):
self.log.info("Waiting for Outbound mutation to be zero on cluster node: %s" % master_node.ip)
buckets = self._get_cluster_buckets(master_node)
curr_time = time.time()
end_time = curr_time + timeout
rest = RestConnection(master_node)
while curr_time < end_time:
found = 0
for bucket in buckets:
mutations = int(rest.get_xdc_queue_size(bucket.name))
self.log.info("Current outbound mutations on cluster node: %s for bucket %s is %s" % (master_node.ip, bucket.name, mutations))
if mutations == 0:
found = found + 1
if found == len(buckets):
break
self.sleep(10)
end_time = end_time - 10
else:
# MB-9707: Updating this code from fail to warning to avoid test to abort, as per this
# bug, this particular stat i.e. replication_changes_left is buggy.
self.log.error("Timeout occurs while waiting for mutations to be replicated")
return False
return True
"""Verify the stats at the destination cluster
1. Data Validity check - using kvstore-node key-value check
2. Item count check on source versus destination
3. For deleted and updated items, check the CAS/SeqNo/Expiry/Flags for same key on source/destination
* Make sure to call expiry_pager function to flush out temp items(deleted/expired items)"""
def verify_xdcr_stats(self, src_nodes, dest_nodes, timeout=200):
self.collect_data_files = True
self._expiry_pager(self.src_nodes[0], val=10)
self._expiry_pager(self.dest_nodes[0], val=10)
self.sleep(10)
if self._failover is not None or self._rebalance is not None:
timeout *= 3
# Wait for ep_queue_size on source to become 0
self.log.info("Verify xdcr replication stats at Source Cluster : {0}".format(self.src_master.ip))
self._wait_flusher_empty(self.src_master, src_nodes)
# Wait for ep_queue_size on dest to become 0
self.log.info("Verify xdcr replication stats at Destination Cluster : {0}".format(self.dest_master.ip))
self._wait_flusher_empty(self.dest_master, dest_nodes)
mutations_replicated = True
data_verified = False
try:
# Source validations
mutations_replicated &= self.__wait_for_outbound_mutations_zero(self.dest_master)
self._verify_item_count(self.src_master, src_nodes, timeout=timeout)
self._verify_data_all_buckets(self.src_master, max_verify=self.max_verify)
# Dest validations
mutations_replicated &= self.__wait_for_outbound_mutations_zero(self.src_master)
self._verify_item_count(self.dest_master, dest_nodes, timeout=timeout)
self._verify_data_all_buckets(self.dest_master, max_verify=self.max_verify)
data_verified = True
finally:
errors_caught = 0
bidirection = self._replication_direction_str == XDCRConstants.REPLICATION_DIRECTION_BIDIRECTION
if self._doc_ops is not None and ("update" in self._doc_ops or "delete" in self._doc_ops):
errors_caught += self._verify_revIds(self.src_master, self.dest_master)
if bidirection and self._doc_ops_dest is not None and ("update" in self._doc_ops_dest or "delete" in self._doc_ops_dest):
errors_caught += self._verify_revIds(self.dest_master, self.src_master)
if errors_caught > 0:
self.fail("Mismatches on Meta Information on xdcr-replicated items!")
if data_verified:
self.collect_data_files = False
if not mutations_replicated:
if str(self.__class__).find('cbrecovery'):
self.log.error("Test should be failed because Outbound mutations has not become zero(MB-9707), check the test logs above. \
but we skip the failure for current test")
else:
self.fail("Test is failed as Outbound mutations has not become zero, check the test logs above.")
def verify_results(self):
dest_key_index = 1
if len(self.ord_keys) == 2:
src_nodes = self.get_servers_in_cluster(self.src_master)
dest_nodes = self.get_servers_in_cluster(self.dest_master)
self.verify_xdcr_stats(src_nodes, dest_nodes)
else:
# Checking replication at destination clusters when more then 2 clusters defined
for cluster_num in self.ord_keys[1:]:
if dest_key_index == self.ord_keys_len:
break
self.dest_nodes = self._clusters_dic[cluster_num]
self.verify_xdcr_stats(self.src_nodes, self.dest_nodes)
|
sdk_worker_main.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK Fn Harness entry point."""
from __future__ import absolute_import
import http.server
import json
import logging
import os
import re
import sys
import threading
import traceback
from builtins import object
from google.protobuf import text_format
from apache_beam.internal import pickler
from apache_beam.options import pipeline_options
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.internal import names
from apache_beam.runners.worker.log_handler import FnApiLogRecordHandler
from apache_beam.runners.worker.sdk_worker import SdkHarness
from apache_beam.utils import profiler
# This module is experimental. No backwards-compatibility guarantees.
class StatusServer(object):
@classmethod
def get_thread_dump(cls):
lines = []
frames = sys._current_frames() # pylint: disable=protected-access
for t in threading.enumerate():
lines.append('--- Thread #%s name: %s ---\n' % (t.ident, t.name))
lines.append(''.join(traceback.format_stack(frames[t.ident])))
return lines
def start(self, status_http_port=0):
"""Executes the serving loop for the status server.
Args:
status_http_port(int): Binding port for the debug server.
Default is 0 which means any free unsecured port
"""
class StatusHttpHandler(http.server.BaseHTTPRequestHandler):
"""HTTP handler for serving stacktraces of all threads."""
def do_GET(self): # pylint: disable=invalid-name
"""Return all thread stacktraces information for GET request."""
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
for line in StatusServer.get_thread_dump():
self.wfile.write(line)
def log_message(self, f, *args):
"""Do not log any messages."""
pass
self.httpd = httpd = http.server.HTTPServer(
('localhost', status_http_port), StatusHttpHandler)
logging.info('Status HTTP server running at %s:%s', httpd.server_name,
httpd.server_port)
httpd.serve_forever()
def main(unused_argv):
"""Main entry point for SDK Fn Harness."""
if 'LOGGING_API_SERVICE_DESCRIPTOR' in os.environ:
logging_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['LOGGING_API_SERVICE_DESCRIPTOR'],
logging_service_descriptor)
# Send all logs to the runner.
fn_log_handler = FnApiLogRecordHandler(logging_service_descriptor)
# TODO(BEAM-5468): This should be picked up from pipeline options.
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(fn_log_handler)
logging.info('Logging handler created.')
else:
fn_log_handler = None
# Start status HTTP server thread.
thread = threading.Thread(target=StatusServer().start)
thread.daemon = True
thread.setName('status-server-demon')
thread.start()
if 'PIPELINE_OPTIONS' in os.environ:
sdk_pipeline_options = _parse_pipeline_options(
os.environ['PIPELINE_OPTIONS'])
else:
sdk_pipeline_options = PipelineOptions.from_dictionary({})
if 'SEMI_PERSISTENT_DIRECTORY' in os.environ:
semi_persistent_directory = os.environ['SEMI_PERSISTENT_DIRECTORY']
else:
semi_persistent_directory = None
logging.info('semi_persistent_directory: %s', semi_persistent_directory)
try:
_load_main_session(semi_persistent_directory)
except Exception: # pylint: disable=broad-except
exception_details = traceback.format_exc()
logging.error(
'Could not load main session: %s', exception_details, exc_info=True)
try:
logging.info('Python sdk harness started with pipeline_options: %s',
sdk_pipeline_options.get_all_options(drop_default=True))
service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['CONTROL_API_SERVICE_DESCRIPTOR'],
service_descriptor)
# TODO(robertwb): Support credentials.
assert not service_descriptor.oauth2_client_credentials_grant.url
SdkHarness(
control_address=service_descriptor.url,
worker_count=_get_worker_count(sdk_pipeline_options),
profiler_factory=profiler.Profile.factory_from_options(
sdk_pipeline_options.view_as(pipeline_options.ProfilingOptions))
).run()
logging.info('Python sdk harness exiting.')
except: # pylint: disable=broad-except
logging.exception('Python sdk harness failed: ')
raise
finally:
if fn_log_handler:
fn_log_handler.close()
def _parse_pipeline_options(options_json):
options = json.loads(options_json)
# Check the options field first for backward compatibility.
if 'options' in options:
return PipelineOptions.from_dictionary(options.get('options'))
else:
# Remove extra urn part from the key.
portable_option_regex = r'^beam:option:(?P<key>.*):v1$'
return PipelineOptions.from_dictionary({
re.match(portable_option_regex, k).group('key')
if re.match(portable_option_regex, k) else k: v
for k, v in options.items()
})
def _get_worker_count(pipeline_options):
"""Extract worker count from the pipeline_options.
This defines how many SdkWorkers will be started in this Python process.
And each SdkWorker will have its own thread to process data. Name of the
experimental parameter is 'worker_threads'
Example Usage in the Command Line:
--experimental worker_threads=1
Note: worker_threads is an experimental flag and might not be available in
future releases.
Returns:
an int containing the worker_threads to use. Default is 12
"""
experiments = pipeline_options.view_as(DebugOptions).experiments
experiments = experiments if experiments else []
for experiment in experiments:
# There should only be 1 match so returning from the loop
if re.match(r'worker_threads=', experiment):
return int(
re.match(r'worker_threads=(?P<worker_threads>.*)',
experiment).group('worker_threads'))
return 12
def _load_main_session(semi_persistent_directory):
"""Loads a pickled main session from the path specified."""
if semi_persistent_directory:
session_file = os.path.join(semi_persistent_directory, 'staged',
names.PICKLED_MAIN_SESSION_FILE)
if os.path.isfile(session_file):
pickler.load_session(session_file)
else:
logging.warning(
'No session file found: %s. Functions defined in __main__ '
'(interactive session) may fail.', session_file)
else:
logging.warning(
'No semi_persistent_directory found: Functions defined in __main__ '
'(interactive session) may fail.')
if __name__ == '__main__':
main(sys.argv)
|
Local.py | # Copyright 2007 by Tiago Antao <tiagoantao@gmail.com>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
'''
Asynchronous local execution.
Supports multicore architectures.
'''
from Bio.PopGen.Async import Async
import threading
__docformat__ = "restructuredtext en"
class Local(Async):
'''Execution on Local machine.
'''
def __init__(self, num_cores=1):
'''Constructor.
parameters:
- num_cores - Number of cores (for multiprocessor machines,
multiply accordingly)
'''
Async.__init__(self)
self.num_cores = num_cores
self.cores_used = 0
def _run_program(self, id, hook, parameters, input_files):
'''Run program.
For parameters, please check Async.run_program.
Either runs a program if a core is available or
schedules it.
'''
self.access_ds.acquire()
self.waiting.append((id, hook, parameters, input_files))
if self.cores_used < self.num_cores:
self.cores_used += 1
threading.Thread(target=self.start_work).run()
self.access_ds.release()
def start_work(self):
'''Starts work.
Thread initial point.
While there are tasks to be done, runs them.
The thread dies as soon as there is nothing waiting to be
executed.
'''
self.access_ds.acquire()
while (len(self.waiting) > 0):
id, hook, parameters, input_files = self.waiting[0]
del self.waiting[0]
self.running[id] = True
self.access_ds.release()
ret_code, output_files = hook.run_job(parameters, input_files)
self.access_ds.acquire()
del self.running[id]
self.done[id] = ret_code, output_files
self.cores_used -= 1
self.access_ds.release()
|
PyNmapScanner.py | import nmap, argparse
from threading import Thread
def nmapScan(tgtHost, tgtPort):
nScan = nmap.PortScanner()
nScan.scan(tgtHost,tgtPort)
state=nScan[tgtHost]['tcp'][int(tgtPort)]['state']
print " [*] " + tgtHost + " tcp/"+tgtPort +" "+state
def main():
parser = argparse.ArgumentParser(description="A simple Python port scanner using the nmap library")
parser.add_argument('-H', type=str, help='specify target host')
parser.add_argument('-p', metavar='N', type=str, nargs='+', help='specify port numbers')
args = parser.parse_args()
tgtHost = args.H
tgtPorts = args.p
if (tgtHost == None) | (tgtPorts[0] == None):
print '[-] You must specify a target host and port[s].'
exit(0)
for tgtPort in tgtPorts:
t = Thread(target=nmapScan, args=(tgtHost, tgtPort))
t.start()
if __name__ == '__main__':
main()
|
autotaskbackdealer.py | """task feedback data dealer"""
# -*- coding:utf-8 -*-
import math
import queue
import threading
import time
import traceback
from commonbaby.helpers import helper_time
from datacontract import (DataMatcher, DataSeg, ECommandStatus, InputData,
AutomatedTask, AutotaskBack, AutotaskBatchBack)
from dataparser import DataParser
from outputmanagement import OutputManagement
from ..dbmanager import EDBAutomic
from .taskbackdealerbase import TaskBackDealerBase
class AutoTaskBackDealer(TaskBackDealerBase):
"""deal task feedback data"""
def __init__(
self,
uniquename: str,
datamatcher: DataMatcher,
relation_inputer_src: list = None,
):
TaskBackDealerBase.__init__(self, uniquename, datamatcher,
relation_inputer_src)
# 处理失败的 TaskBatchBack 队列,用于重新处理
self._tbberrorq: queue.Queue = queue.Queue()
# 处理总任务回馈文件的队列
self._tbqueue: queue.Queue = queue.Queue()
# 总任务处理线程
self._t_taskback_deal = threading.Thread(
target=self._taskback_deal, name='taskback_deal', daemon=True)
# self._t_errortb_rehandle = threading.Thread(
# target=self._errortb_rehandle,
# name='errortb_rehandle',
# daemon=True)
def _startsub(self):
"""开启处理线程"""
if not self._t_taskback_deal._started._flag:
self._t_taskback_deal.start()
# if not self._t_errortb_rehandle._started._flag:
# self._t_errortb_rehandle.start()
def _parse_data_back(self, data: InputData) -> iter:
"""解析数据,并返回每个数据段对应的 TaskBatchBack 结构"""
try:
for seg in DataParser.parse_standard_data(data.stream):
seg: DataSeg = seg
try:
tb: AutotaskBatchBack = AutotaskBatchBack.create_from_dataseg(
seg, data._platform)
tb.inputdata = data
yield tb
except Exception:
self._logger.error(
"Parse one data segment error:\ndata:{}\nsegindex:{}\nerror:{}"
.format(data._source, seg.segindex,
traceback.format_exc()))
# 解析数据时,只要出错一个数据段,就算作错误数据
data.on_complete(False)
except Exception:
self._logger.error(
"Parse TaskBatchBack data error:\ndata:{}\nerror:{}".format(
data._source, traceback.format_exc()))
def _deal_data_back(self, tb: AutotaskBatchBack) -> bool:
"""处理TaskBatchBack数据"""
# ==================
# 更新指定的子任务的cmdstatus命令状态等字段
# 综合判断总任务是否完成或结束,
# 返回总任务完成进度或总任务最终结果
# 当总任务下的所有子任务的命令状态均为以下枚举值时
# Failed/Succeed/Cancelled/Timeout
# 判定为总任务已结束
# 子任务状态为 NeedSmsCode 时,需要在数据库添加
# 标记,以确认中心是否收到消息并下发了验证码,若长时间
# 未收到中心的验证码,且也未收到来自采集端的任务超时错误,
# 需要控制端进行超时处理(此时认为采集端的此条任务挂了?)
# ==================
res: bool = False
# 更新数据库子任务状态
currperiodnum = tb.periodnum
with self._dbmanager.get_automic_locker(
EDBAutomic.ReduplicateAutoTask):
task: AutomatedTask = self._dbmanager.get_auto_batch_task(
tb._platform, tb.taskid, tb.batchid)
if tb.periodnum < task.periodnum:
self._logger.error(
"AutotaskBatchBack periodnum({}) < task.periodnum({}) in db:\ntaskid:{}\nbatchid:{}"
.format(tb.periodnum, task.periodnum, task.taskid,
task.batchid))
return res
# if tb._sequence > task._sequence:
# 自循环任务不需要
if not self._deal_equal_period(tb, task):
return res
# 回传子任务回馈文件
if not OutputManagement.output(tb):
tb._cmdrcvmsg = "输出AutomatedTask子任务回馈数据失败"
self._logger.error(
"Output AutotaskBatchBackBack file to center failed:\ndata:{}\ntaskid:{}\nbatchid:{}"
.format(tb.inputdata.name, tb.taskid, tb.batchid))
return res
# 顺利跑到这里,子任务处理就是成功的了
tb.ok = True
self._logger.info(
"AutotaskBatchBackBack dealt [{} {}]:\ndata:{}\ntaskid:{}\nbatchid:{}\nperiodnum:{}"
.format(tb.cmdstatus.name, tb.progress, tb.inputdata.name,
tb.taskid, tb.batchid, currperiodnum))
self._tbqueue.put(task)
res = True
return res
def _deal_equal_period(self, tb: AutotaskBatchBack,
task: AutomatedTask) -> bool:
res: bool = False
try:
# 查询对应的总任务对象
# 子任务是这些状态,那么总任务的 batchcompletecount+=1
# 把当前AutotaskBatchBack子任务查出来
if tb._cmdstatus==ECommandStatus.Failed or \
tb._cmdstatus==ECommandStatus.Succeed or \
tb._cmdstatus==ECommandStatus.Timeout or \
tb._cmdstatus==ECommandStatus.Cancelled:
tb.progress = 1
if not self._dbmanager.is_autobtask_batchcompletecount_increaced(
tb._platform, tb.taskid, tb.batchid):
if not self._dbmanager.increace_autotask_batch_complete_count(
tb._platform, tb.taskid, tb.batchid):
tb._cmdrcvmsg = "全自动子任务完成数量增加到数据库失败"
self._logger.error(
"Increace AutomatedTask batch complete count failed:\ndata:{}\ntaskid={}\nbatchid={}\nperiodnum={}\nbatchtask_cmdstatus={}"
.format(tb.inputdata.name, tb.taskid, tb.batchid,
tb.periodnum, tb._cmdstatus.name))
return res
elif not task.isbatchcompletecountincreased:
task.batchcompletecount += 1
task.isbatchcompletecountincreased = True
# PeriodNum=?,
# Status=?,
# Progress=?,
# CmdRcvMsg=?,
# Sequence=?,
# UpdateTime=?
if not self._dbmanager.update_automated_batch_task(
tb._platform,
tb.taskid,
tb.batchid,
updatefields={
# 'PeriodNum': tb.periodnum,
'Status': tb.cmdstatus.value,
'Progress': 1,
'CmdRcvMsg': tb.cmdrcvmsg,
'Sequence': tb.sequence,
'UpdateTime': tb.time,
'LastEndTime': helper_time.ts_since_1970_tz(),
}):
tb._cmdrcvmsg = "更新Autom子任务状态到本地数据库失败"
self._logger.error(
"Update AutotaskBatchBack to db failed:\ndata:{}\ntaskid:{}\nbatchid:{}"
.format(tb.inputdata.name, tb.taskid, tb.batchid))
return res
res = True
except Exception:
self._logger.error(
"Deal first period task back error:\ntaskid:{}\nbatchid:{}\nclientid:{}\nerror:{}"
.format(task.taskid, task.batchid, task._clientid,
traceback.format_exc()))
return res
def _output_taskback(self, tb, status: ECommandStatus, msg: str):
"""返回总任务回馈"""
if not isinstance(tb, AutotaskBatchBack):
self._logger.error(
"Invalid AutotaskBatchBack object for output taskback: {}".
format(type(tb)))
return
tb: AutotaskBatchBack = tb
tb.cmdstatus = status
tb.cmdrcvmsg = msg
if not OutputManagement.output(tb):
self._logger.error(
"Output TaskBack failed:\ntaskid={}\ndata={}".format(
tb._task.taskid, tb._task.inputdata._source))
def _generate_err_taskbatchback(self, tb: AutotaskBatchBack):
"""返回控制端内部处理错误信息到中心"""
try:
tb._cmdrcvmsg = "内部错误:{}".format(tb._cmdrcvmsg)
tb.cmdstatus = ECommandStatus.Failed
if not OutputManagement.output(tb):
self._logger.error(
"Generate error AutotaskBatchBack to center error:\ndata:{}\ntaskid:{}\nbatchid:{}\nerror:{}"
.format(tb.inputdata.name, tb.taskid, tb.batchid,
traceback.format_exc()))
except Exception:
self._logger.error(
"Generate error AutotaskBatchBack to center error:\ndata:{}\ntaskid:{}\nbatchid:{}\nerror:{}"
.format(tb.inputdata.name, tb.taskid, tb.batchid,
traceback.format_exc()))
def _taskback_deal(self):
"""总任务回馈数据处理线程,失败重试机制"""
got: bool = False
while True:
try:
got = False
task: AutomatedTask = self._tbqueue.get(timeout=3)
got = True
# 返回总任务回馈
# 百分比回馈数据若失败重试,会涉及到sequence不对问题,暂时不重试
# if task.batchtotalcount > task.batchcompletecount:
# 如果总任务下的 部分子任务 是已完成状态,
# 则返回总任务回馈数据百分比
# 不是每一个子任务回馈数据都要对应返回一个总任务完成百分比
# 可以根据一定条件返回,比如取模
seq_in_db: int = self._dbmanager.get_autotask_sequence(
task._platform, task.taskid)
task.sequence_set(seq_in_db)
self._generate_task_back_percent(task)
# 返回一次总任务已完成,由于sequence问题暂时不做失败重试
if task.batchtotalcount <= task.batchcompletecount:
# 判定总任务是否完成,并返回总任务回馈数据
# 只要有一个成功的,就当作成功的返回
succcount: int = self._dbmanager.get_autobtask_count_by_cmdstatus(
task._platform, task.taskid, ECommandStatus.Succeed)
if succcount > 0:
task.cmdstatus = ECommandStatus.Succeed
task.cmdrcvmsg = "任务执行完成"
else:
task.cmdstatus = ECommandStatus.Failed
task.cmdrcvmsg = "任务执行失败"
# 先回传 AutomatedTaskBack 回中心,提升其 sequence 字段计数
# 若回传失败,则再上面加的那个循环检查线程中重复回传此回馈数据
taskback: AutotaskBack = AutotaskBack.create_from_task(
task,
task.cmdstatus,
task.cmdrcvmsg,
batchcompletecount=task.batchtotalcount)
if not OutputManagement.output(taskback):
self._logger.error(
"Output AutomatedTaskBack failed:\ntaskid={}".
format(task.taskid))
else:
self._logger.info(
"AutomatedTaskBack generated [{}]:\ntaskid={}".
format(task.cmdstatus.name, task.taskid))
# 更新总任务状态到数据库
# (加一个线程检测是否有总任务一直没有更新状态的,检查是否应该更新,并执行更新操作)
# task.periodnum += 1 子任务那加了1的
task.lastendtime = helper_time.ts_since_1970_tz()
with self._dbmanager.get_automic_locker(
EDBAutomic.ReduplicateAutoTask):
if not self._dbmanager.update_automated_task2(task):
self._logger.error(
"Update AutomatedTask status failed:\ntaskid={}\ncmdstatus={}"
.format(task.taskid, task.cmdstatus.name))
except queue.Empty:
pass
except Exception:
self._logger.error("Taskback deal error: {}".format(
traceback.format_exc()))
finally:
if got:
self._tbqueue.task_done()
def _generate_task_back_percent(self, task: AutomatedTask) -> bool:
"""返回总任务百分比"""
res: bool = False
try:
currpercent = math.floor(
task.batchcompletecount / task.batchtotalcount * 100)
lastprogress = task.progress * 100
if task.batchtotalcount > 100:
# 子任务数大于100个的,每1%返回一次
if currpercent - lastprogress < 1:
res = True
elif 50 < task.batchtotalcount <= 100:
# 50<x<=100个子任务的,总共返回25个百分比文件,每%4返回一次
if currpercent - lastprogress < 4:
res = True
else:
# 0<x<=50个子任务的,每%10左右返回一次
if currpercent - lastprogress < 10:
res = True
if res:
return res
# 最新总任务Progress更新到数据库并发送回馈数据
task.progress = currpercent / 100
task.cmdstatus = ECommandStatus.Progress
if not self._dbmanager.update_automated_task2(task):
self._logger.error(
"Update AutomatedTask with progress failed: taskid={} objecttype={} progress={}"
.format(task.taskid, task._objecttype.name, task.progress))
return res
taskback: AutotaskBack = AutotaskBack.create_from_task(
task,
ECommandStatus.Progress,
recvmsg='{}%'.format(currpercent),
batchcompletecount=task.batchtotalcount)
if not OutputManagement.output(taskback):
res = False
self._logger.error(
"Output AutotaskBack progress failed:\ntaskid:{}\ncmdstatus:{}\nprogress:{}"
.format(task.taskid, task.cmdstatus.name, currpercent))
return res
res = True
self._logger.info(
"AutotaskBack generated [Progress {}]:\ntaskid={}".format(
task.progress, task.taskid))
except Exception:
res = False
self._logger.error(
"Generate AutomatedTaskBack for batch complete percent error:\ntaskid:{}\nerror:{}"
.format(task.taskid, traceback.format_exc()))
finally:
pass
return res
|
inb4404.py | #!/usr/bin/python
import urllib2, argparse, logging
import os, re, time
import httplib
import fileinput
from multiprocessing import Process
log = logging.getLogger('inb4404')
workpath = os.path.dirname(os.path.realpath(__file__))
args = None
def load(url):
req = urllib2.Request(url, headers={'User-Agent': '4chan Browser'})
return urllib2.urlopen(req).read()
def main():
global args
parser = argparse.ArgumentParser(description='inb4404')
parser.add_argument('thread', nargs=1, help='url of the thread (or filename; one url per line)')
parser.add_argument('-n', '--use-names', action='store_true', help='use thread names instead of the thread ids (...4chan.org/board/thread/thread-id/thread-name)')
parser.add_argument('-r', '--reload', action='store_true', help='reload the file every 5 minutes')
parser.add_argument('-l', '--less', action='store_true', help='shows less information (surpresses checking messages)')
parser.add_argument('-d', '--date', action='store_true', help='show date as well')
args = parser.parse_args()
if args.date:
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(message)s', datefmt='%Y-%m-%d %I:%M:%S %p')
else:
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(message)s', datefmt='%I:%M:%S %p')
if args.thread[0][:4].lower() == 'http':
download_thread(args.thread[0])
else:
download_from_file(args.thread[0])
def download_thread(thread_link):
board = thread_link.split('/')[3]
thread = thread_link.split('/')[5].split('#')[0]
if len(thread_link.split('/')) > 6:
thread_tmp = thread_link.split('/')[6].split('#')[0]
if args.use_names or os.path.exists(os.path.join(workpath, 'downloads', board, thread_tmp)):
thread = thread_tmp
directory = os.path.join(workpath, 'downloads', board, thread)
if not os.path.exists(directory):
os.makedirs(directory)
while True:
try:
regex = '(\/\/i(?:s|)\d*\.(?:4cdn|4chan)\.org\/\w+\/(\d+\.(?:jpg|png|gif|webm)))'
for link, img in list(set(re.findall(regex, load(thread_link)))):
img_path = os.path.join(directory, img)
if not os.path.exists(img_path):
data = load('https:' + link)
log.info(board + '/' + thread + '/' + img)
with open(img_path, 'w') as f:
f.write(data)
##################################################################################
# saves new images to a seperate directory
# if you delete them there, they are not downloaded again
# if you delete an image in the 'downloads' directory, it will be downloaded again
copy_directory = os.path.join(workpath, 'new', board, thread)
if not os.path.exists(copy_directory):
os.makedirs(copy_directory)
copy_path = os.path.join(copy_directory, img)
with open(copy_path, 'w') as f:
f.write(data)
##################################################################################
except urllib2.HTTPError, err:
time.sleep(10)
try:
load(thread_link)
except urllib2.HTTPError, err:
log.info('%s 404\'d', thread_link)
break
continue
except (urllib2.URLError, httplib.BadStatusLine, httplib.IncompleteRead):
log.warning('Something went wrong')
if not args.less:
log.info('Checking ' + board + '/' + thread)
time.sleep(20)
def download_from_file(filename):
running_links = []
while True:
processes = []
for link in filter(None, [line.strip() for line in open(filename) if line[:4] == 'http']):
if link not in running_links:
running_links.append(link)
log.info('Added ' + link)
process = Process(target=download_thread, args=(link, ))
process.start()
processes.append([process, link])
if len(processes) == 0:
log.warning(filename + ' empty')
if args.reload:
time.sleep(60 * 5) # 5 minutes
links_to_remove = []
for process, link in processes:
if not process.is_alive():
links_to_remove.append(link)
else:
process.terminate()
for link in links_to_remove:
for line in fileinput.input(filename, inplace=True):
print line.replace(link, '-' + link),
running_links.remove(link)
log.info('Removed ' + link)
if not args.less:
log.info('Reloading ' + args.thread[0]) # thread = filename here; reloading on next loop
else:
break
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
crossref.py | # =============================================================================
#
# EZID :: crossref.py
#
# Interface to Crossref <http://www.crossref.org/>.
#
# Author:
# Greg Janee <gjanee@ucop.edu>
#
# License:
# Copyright (c) 2014, Regents of the University of California
# http://creativecommons.org/licenses/BSD/
#
# -----------------------------------------------------------------------------
import re
import threading
import time
import urllib
import urllib2
import uuid
import django.conf
import django.core.mail
import django.db
import django.db.models
import lxml.etree
import config
import ezid
import ezidapp.models
import log
import util
import util2
_enabled = None
_depositorName = None
_depositorEmail = None
_realServer = None
_testServer = None
_depositUrl = None
_resultsUrl = None
_username = None
_password = None
_daemonEnabled = None
_threadName = None
_idleSleep = None
_ezidUrl = None
_dataciteEnabled = None
def loadConfig ():
global _enabled, _depositorName, _depositorEmail, _realServer, _testServer
global _depositUrl, _resultsUrl, _username, _password
global _daemonEnabled, _threadName, _idleSleep, _ezidUrl, _dataciteEnabled
_enabled = (config.get("crossref.enabled").lower() == "true")
_depositorName = config.get("crossref.depositor_name")
_depositorEmail = config.get("crossref.depositor_email")
_realServer = config.get("crossref.real_server")
_testServer = config.get("crossref.test_server")
_depositUrl = config.get("crossref.deposit_url")
_resultsUrl = config.get("crossref.results_url")
_username = config.get("crossref.username")
_password = config.get("crossref.password")
_idleSleep = int(config.get("daemons.crossref_processing_idle_sleep"))
_daemonEnabled = (
django.conf.settings.DAEMON_THREADS_ENABLED
and config.get("daemons.crossref_enabled").lower() == "true"
)
_ezidUrl = config.get("DEFAULT.ezid_base_url")
_dataciteEnabled = (config.get("datacite.enabled").lower() == "true")
if _daemonEnabled:
_threadName = uuid.uuid1().hex
t = threading.Thread(target=_daemonThread, name=_threadName)
t.setDaemon(True)
t.start()
_prologRE = re.compile("<\?xml\s+version\s*=\s*['\"]([-\w.:]+)[\"']" +\
"(\s+encoding\s*=\s*['\"]([-\w.]+)[\"'])?" +\
"(\s+standalone\s*=\s*['\"](yes|no)[\"'])?\s*\?>\s*")
_utf8RE = re.compile("UTF-?8$", re.I)
_schemaLocation = "{http://www.w3.org/2001/XMLSchema-instance}schemaLocation"
_schemaLocationTemplate =\
"http://www.crossref.org/schema/deposit/crossref%s.xsd"
_tagRE =\
re.compile("\{(http://www\.crossref\.org/schema/(4\.[34]\.\d))\}([-\w.]+)$")
_rootTags = ["journal", "book", "conference", "sa_component", "dissertation",
"report-paper", "standard", "database", "peer_review", "posted_content"]
def _notOne (n):
if n == 0:
return "no"
else:
return "more than one"
def _addDeclaration (document):
# We don't use lxml's xml_declaration argument because it doesn't
# allow us to add a basic declaration without also adding an
# encoding declaration, which we don't want.
return "<?xml version=\"1.0\"?>\n" + document
def validateBody (body):
"""
Validates and normalizes an immediate child element of a <body>
element of a Crossref metadata submission document. 'body' should
be a Unicode string. Either a normalized XML document is returned
or an assertion error is raised. Validation is limited to checking
that 'body' is well-formed XML, that it appears to be a <body> child
element, and that the elements that EZID cares about are present and
well-formed. Normalization includes stripping off any <doi_batch>
or <body> elements enclosing the child element, and normalizing the
one and only <doi_data> element.
"""
# Strip off any prolog.
m = _prologRE.match(body)
if m:
assert m.group(1) == "1.0", "unsupported XML version"
if m.group(2) != None:
assert _utf8RE.match(m.group(3)), "XML encoding must be UTF-8"
if m.group(4) != None:
assert m.group(5) == "yes", "XML document must be standalone"
body = body[len(m.group(0)):]
# Parse the document.
try:
root = lxml.etree.XML(body)
except Exception, e:
assert False, "XML parse error: " + str(e)
m = _tagRE.match(root.tag)
assert m is not None, "not Crossref submission metadata"
namespace = m.group(1)
version = m.group(2)
ns = { "N": namespace }
# Locate the <body> child element.
if m.group(3) == "doi_batch":
root = root.find("N:body", namespaces=ns)
assert root is not None, "malformed Crossref submission metadata"
m = _tagRE.match(root.tag)
if m.group(3) == "body":
assert len(list(root)) == 1, "malformed Crossref submission metadata"
root = root[0]
m = _tagRE.match(root.tag)
assert m is not None, "malformed Crossref submission metadata"
assert m.group(3) in _rootTags,\
"XML document root is not a Crossref <body> child element"
# Locate and normalize the one and only <doi_data> element.
doiData = root.xpath("//N:doi_data", namespaces=ns)
assert len(doiData) == 1, "XML document contains %s <doi_data> element" %\
_notOne(len(doiData))
doiData = doiData[0]
doi = doiData.findall("N:doi", namespaces=ns)
assert len(doi) == 1,\
"<doi_data> element contains %s <doi> subelement" % _notOne(len(doi))
doi = doi[0]
doi.text = "(:tba)"
resource = doiData.findall("N:resource", namespaces=ns)
assert len(resource) == 1,\
"<doi_data> element contains %s <resource> subelement" %\
_notOne(len(resource))
resource = resource[0]
resource.text = "(:tba)"
assert doiData.find("N:collection/N:item/N:doi", namespaces=ns) == None,\
"<doi_data> element contains more than one <doi> subelement"
e = doiData.find("N:timestamp", namespaces=ns)
if e != None: doiData.remove(e)
assert doiData.find("N:timestamp", namespaces=ns) == None,\
"<doi_data> element contains more than one <timestamp> subelement"
# Normalize schema declarations.
root.attrib[_schemaLocation] =\
namespace + " " + (_schemaLocationTemplate % version)
try:
# We re-sanitize the document because unacceptable characters can
# be (and have been) introduced via XML character entities.
return _addDeclaration(util.sanitizeXmlSafeCharset(
lxml.etree.tostring(root, encoding="unicode")))
except Exception, e:
assert False, "XML serialization error: " + str(e)
# In the Crossref deposit schema, version 4.3.4, the <doi_data>
# element can occur in 20 different places. An analysis shows that
# the resource title corresponding to the DOI being defined can be
# found by one or more of the following XPaths relative to the
# <doi_data> element.
_titlePaths = [
"../N:titles/N:title",
"../N:titles/N:original_language_title",
"../N:proceedings_title",
"../N:full_title",
"../N:abbrev_title"]
def _buildDeposit (body, registrant, doi, targetUrl, withdrawTitles=False,
bodyOnly=False):
"""
Builds a Crossref metadata submission document. 'body' should be a
Crossref <body> child element as a Unicode string, and is assumed to
have been validated and normalized per validateBody above.
'registrant' is inserted in the header. 'doi' should be a
scheme-less DOI identifier (e.g., "10.5060/FOO"). The return is a
tuple (document, body, batchId) where 'document' is the entire
submission document as a serialized Unicode string (with the DOI and
target URL inserted), 'body' is the same but just the <body> child
element, and 'batchId' is the submission batch identifier.
Options: if 'withdrawTitles' is true, the title(s) corresponding to
the DOI being defined are prepended with "WITHDRAWN:" (in 'document'
only). If 'bodyOnly' is true, only the body is returned.
"""
body = lxml.etree.XML(body)
m = _tagRE.match(body.tag)
namespace = m.group(1)
version = m.group(2)
ns = { "N": namespace }
doiData = body.xpath("//N:doi_data", namespaces=ns)[0]
doiElement = doiData.find("N:doi", namespaces=ns)
doiElement.text = doi
doiData.find("N:resource", namespaces=ns).text = targetUrl
d1 = _addDeclaration(lxml.etree.tostring(body, encoding="unicode"))
if bodyOnly: return d1
def q (elementName):
return "{%s}%s" % (namespace, elementName)
root = lxml.etree.Element(q("doi_batch"), version=version)
root.attrib[_schemaLocation] = body.attrib[_schemaLocation]
head = lxml.etree.SubElement(root, q("head"))
batchId = str(uuid.uuid1())
lxml.etree.SubElement(head, q("doi_batch_id")).text = batchId
lxml.etree.SubElement(head, q("timestamp")).text = str(int(time.time()*100))
e = lxml.etree.SubElement(head, q("depositor"))
if version >= "4.3.4":
lxml.etree.SubElement(e, q("depositor_name")).text = _depositorName
else:
lxml.etree.SubElement(e, q("name")).text = _depositorName
lxml.etree.SubElement(e, q("email_address")).text = _depositorEmail
lxml.etree.SubElement(head, q("registrant")).text = registrant
e = lxml.etree.SubElement(root, q("body"))
del body.attrib[_schemaLocation]
if withdrawTitles:
for p in _titlePaths:
for t in doiData.xpath(p, namespaces=ns):
if t.text != None: t.text = "WITHDRAWN: " + t.text
e.append(body)
d2 = _addDeclaration(lxml.etree.tostring(root, encoding="unicode"))
return (d2, d1, batchId)
def replaceTbas (body, doi, targetUrl):
"""
Fills in the (:tba) portions of Crossref deposit metadata with the
given arguments. 'body' should be a Crossref <body> child element
as a Unicode string, and is assumed to have been validated and
normalized per validateBody above. 'doi' should be a scheme-less
DOI identifier (e.g., "10.5060/FOO"). The return is a Unicode
string.
"""
return _buildDeposit(body, None, doi, targetUrl, bodyOnly=True)
def _multipartBody (*parts):
"""
Builds a multipart/form-data (RFC 2388) document out of a list of
constituent parts. Each part is either a 2-tuple (name, value) or a
4-tuple (name, filename, contentType, value). Returns a tuple
(document, boundary).
"""
while True:
boundary = "BOUNDARY_%s" % uuid.uuid1().hex
collision = False
for p in parts:
for e in p:
if boundary in e: collision = True
if not collision: break
body = []
for p in parts:
body.append("--" + boundary)
if len(p) == 2:
body.append("Content-Disposition: form-data; name=\"%s\"" % p[0])
body.append("")
body.append(p[1])
else:
body.append(("Content-Disposition: form-data; name=\"%s\"; " +\
"filename=\"%s\"") % (p[0], p[1]))
body.append("Content-Type: " + p[2])
body.append("")
body.append(p[3])
body.append("--%s--" % boundary)
return ("\r\n".join(body), boundary)
def _wrapException (context, exception):
m = str(exception)
if len(m) > 0: m = ": " + m
return Exception("Crossref error: %s: %s%s" % (context,
type(exception).__name__, m))
def _submitDeposit (deposit, batchId, doi):
"""
Submits a Crossref metadata submission document as built by
_buildDeposit above. Returns True on success, False on (internal)
error. 'doi' is the identifier in question.
"""
if not _enabled: return True
body, boundary = _multipartBody(("operation", "doMDUpload"),
("login_id", _username), ("login_passwd", _password),
("fname", batchId + ".xml", "application/xml; charset=UTF-8",
deposit.encode("UTF-8")))
url = _depositUrl %\
(_testServer if util2.isTestCrossrefDoi(doi) else _realServer)
try:
c = None
try:
c = urllib2.urlopen(urllib2.Request(url, body,
{ "Content-Type": "multipart/form-data; boundary=" + boundary }))
r = c.read()
assert "Your batch submission was successfully received." in r,\
"unexpected return from metadata submission: " + r
except urllib2.HTTPError, e:
if e.fp != None:
try:
m = e.fp.read()
except Exception:
pass
else:
if not e.msg.endswith("\n"): e.msg += "\n"
e.msg += m
raise e
finally:
if c: c.close()
except Exception, e:
log.otherError("crossref._submitDeposit",
_wrapException("error submitting deposit, doi %s, batch %s" %\
(doi, batchId), e))
return False
else:
return True
def _pollDepositStatus (batchId, doi):
"""
Polls the status of the metadata submission identified by 'batchId'.
'doi' is the identifier in question. The return is one of the
tuples:
("submitted", message)
'message' further indicates the status within Crossref, e.g.,
"in_process". The status may also be, somewhat confusingly,
"unknown_submission", even though the submission has taken
place.
("completed successfully", None)
("completed with warning", message)
("completed with failure", message)
("unknown", None)
An error occurred retrieving the status.
In each case, 'message' may be a multi-line string. In the case of
a conflict warning, 'message' has the form:
Crossref message
conflict_id=1423608
in conflict with: 10.5072/FK2TEST1
in conflict with: 10.5072/FK2TEST2
...
"""
if not _enabled: return ("completed successfully", None)
url = _resultsUrl %\
(_testServer if util2.isTestCrossrefDoi(doi) else _realServer)
try:
c = None
try:
c = urllib2.urlopen("%s?%s" % (url,
urllib.urlencode({ "usr": _username, "pwd": _password,
"file_name": batchId + ".xml", "type": "result" })))
response = c.read()
except urllib2.HTTPError, e:
if e.fp != None:
try:
m = e.fp.read()
except Exception:
pass
else:
if not e.msg.endswith("\n"): e.msg += "\n"
e.msg += m
raise e
finally:
if c: c.close()
try:
# We leave the returned XML undecoded, and let lxml decode it
# based on the embedded encoding declaration.
root = lxml.etree.XML(response)
except Exception, e:
assert False, "XML parse error: " + str(e)
assert root.tag == "doi_batch_diagnostic",\
"unexpected response root element: " + root.tag
assert "status" in root.attrib,\
"missing doi_batch_diagnostic/status attribute"
if root.attrib["status"] != "completed":
return ("submitted", root.attrib["status"])
else:
d = root.findall("record_diagnostic")
if len(d) > 1:
allSuccess = 0
for element in d:
assert "status" in element.attrib, "missing record_diagnostic/status attribute"
if element.attrib["status"] == "Success":
allSuccess += 1
elif element.attrib["status"] in ["Warning", "Failure"]:
m = element.findall("msg")
m = m[0].text
e = element.find("conflict_id")
if e != None: m += "\nconflict_id=" + e.text
for e in element.xpath("dois_in_conflict/doi"):
m += "\nin conflict with: " + e.text
return ("completed with %s" % element.attrib["status"].lower(), m)
else:
assert False, "unexpected status value: " + element.attrib["status"]
if allSuccess == len(d):
return ("completed successfully", None)
else:
assert len(d) == 1, ("<doi_batch_diagnostic> element contains %s " +\
"<record_diagnostic> element") % _notOne(len(d))
d = d[0]
assert "status" in d.attrib, "missing record_diagnostic/status attribute"
if d.attrib["status"] == "Success":
return ("completed successfully", None)
elif d.attrib["status"] in ["Warning", "Failure"]:
m = d.findall("msg")
assert len(m) == 1, ("<record_diagnostic> element contains %s " +\
"<msg> element") % _notOne(len(m))
m = m[0].text
e = d.find("conflict_id")
if e != None: m += "\nconflict_id=" + e.text
for e in d.xpath("dois_in_conflict/doi"):
m += "\nin conflict with: " + e.text
return ("completed with %s" % d.attrib["status"].lower(), m)
else:
assert False, "unexpected status value: " + d.attrib["status"]
except Exception, e:
log.otherError("crossref._pollDepositStatus",
_wrapException("error polling deposit status, doi %s, batch %s" %\
(doi, batchId), e))
return ("unknown", None)
def enqueueIdentifier (identifier, operation, metadata, blob):
"""
Adds an identifier to the Crossref queue. 'identifier' should be
the normalized, qualified identifier, e.g., "doi:10.5060/FOO".
'operation' is the identifier operation and should be one of the
strings "create", "update", or "delete". 'metadata' is the
identifier's metadata dictionary; 'blob' is the same in blob form.
"""
e = ezidapp.models.CrossrefQueue(identifier=identifier, owner=metadata["_o"],
metadata=blob,
operation=ezidapp.models.CrossrefQueue.operationLabelToCode(operation))
e.save()
def getQueueStatistics ():
"""
Returns a 4-tuple containing the numbers of identifiers in the
Crossref queue by status: (awaiting submission, submitted,
registered with warning, registration failed).
"""
q = ezidapp.models.CrossrefQueue.objects.values("status").\
annotate(django.db.models.Count("status"))
d = {}
for r in q: d[r["status"]] = r["status__count"]
return (d.get("U", 0), d.get("S", 0), d.get("W", 0), d.get("F", 0))
class _AbortException (Exception):
pass
def _checkAbort ():
# This function provides a handy way to abort processing if the
# daemon is disabled or if a new daemon thread is started by a
# configuration reload. It doesn't entirely eliminate potential
# race conditions between two daemon threads, but it should make
# conflicts very unlikely.
if not _daemonEnabled or threading.currentThread().getName() != _threadName:
raise _AbortException()
return True
def _queue ():
_checkAbort()
return ezidapp.models.CrossrefQueue
def _doDeposit (r):
m = util.deblobify(r.metadata)
if r.operation == ezidapp.models.CrossrefQueue.DELETE:
url = "http://datacite.org/invalidDOI"
else:
url = m["_t"]
submission, body, batchId = _buildDeposit(m["crossref"],
ezidapp.models.getUserByPid(r.owner).username, r.identifier[4:], url,
withdrawTitles=(r.operation == ezidapp.models.CrossrefQueue.DELETE or\
m.get("_is", "public").startswith("unavailable")))
if _submitDeposit(submission, batchId, r.identifier[4:]):
if r.operation == ezidapp.models.CrossrefQueue.DELETE:
# Well this is awkard. If the identifier was deleted, there's
# no point in polling for the status... if anything goes wrong,
# there's no correction that could possibly be made, as the
# identifier no longer exists as far as EZID is concerned.
_checkAbort()
r.delete()
else:
r.status = ezidapp.models.CrossrefQueue.SUBMITTED
r.batchId = batchId
r.submitTime = int(time.time())
_checkAbort()
r.save()
def _sendEmail (emailAddress, r):
if r.status == ezidapp.models.CrossrefQueue.WARNING:
s = "warning"
else:
s = "error"
l = "%s/id/%s" % (_ezidUrl, urllib.quote(r.identifier, ":/"))
m = ("ARKetype received a%s %s in registering an identifier of yours with " +\
"Crossref.\n\n" +\
"Identifier: %s\n\n" +\
"Status: %s\n\n" +\
"Crossref message: %s\n\n" +\
"The identifier can be viewed in ARKetype at:\n" +
"%s\n\n" +\
"You are receiving this message because your account is configured to " +\
"receive Crossref errors and warnings. This is an automated email. " +\
"Please do not reply.\n") %\
("n" if s == "error" else "", s, r.identifier, r.get_status_display(),
r.message if r.message != "" else "(unknown reason)", l)
try:
django.core.mail.send_mail("Crossref registration " + s, m,
django.conf.settings.SERVER_EMAIL, [emailAddress], fail_silently=True)
except Exception, e:
raise _wrapException("error sending email", e)
def _oneline (s):
return re.sub("\s", " ", s)
def _doPoll (r):
t = _pollDepositStatus(r.batchId, r.identifier[4:])
if t[0] == "submitted":
r.message = t[1]
_checkAbort()
r.save()
elif t[0].startswith("completed"):
# Deleted identifiers aren't retained in the queue, but just to
# make it clear...
if r.operation != ezidapp.models.CrossrefQueue.DELETE:
if t[0] == "completed successfully":
crs = ezidapp.models.StoreIdentifier.CR_SUCCESS
crm = ""
else:
if t[0] == "completed with warning":
crs = ezidapp.models.StoreIdentifier.CR_WARNING
else:
crs = ezidapp.models.StoreIdentifier.CR_FAILURE
crm = _oneline(t[1]).strip()
_checkAbort()
# We update the identifier's Crossref status in the store and
# search databases, but do so in such a way as to avoid
# infinite loops and triggering further updates to DataCite or
# Crossref.
s = ezid.setMetadata(r.identifier, ezidapp.models.getAdminUser(),
{ "_crossref": "%s/%s" % (crs, crm) }, updateExternalServices=False)
assert s.startswith("success:"), "ezid.setMetadata failed: " + s
if t[0] == "completed successfully":
_checkAbort()
r.delete()
else:
if t[0] == "completed with warning":
r.status = ezidapp.models.CrossrefQueue.WARNING
else:
r.status = ezidapp.models.CrossrefQueue.FAILURE
r.message = t[1]
u = ezidapp.models.getUserByPid(r.owner)
if u.crossrefEmail != "":
_checkAbort()
_sendEmail(u.crossrefEmail, r)
_checkAbort()
r.save()
else:
pass
def _daemonThread ():
maxSeq = None
while True:
django.db.connections["default"].close()
django.db.connections["search"].close()
time.sleep(_idleSleep)
try:
_checkAbort()
# First, a quick test to avoid retrieving the entire table if
# nothing needs to be done. Note that in the loop below, if any
# entry is deleted or if any identifier is processed, maxSeq is
# set to None, thus forcing another round of processing.
if maxSeq != None:
if _queue().objects.aggregate(
django.db.models.Max("seq"))["seq__max"] == maxSeq:
continue
# Hopefully the queue will not grow so large that the following
# query will cause a burden.
query = _queue().objects.all().order_by("seq")
if len(query) > 0:
maxSeq = query[len(query)-1].seq
else:
maxSeq = None
for r in query:
# If there are multiple entries for this identifier, we are
# necessarily looking at the first, i.e., the earliest, and
# the others must represent subsequent modifications. Hence
# we simply delete this entry regardless of its status.
if _queue().objects.filter(identifier=r.identifier).count() > 1:
r.delete()
maxSeq = None
else:
if r.status == ezidapp.models.CrossrefQueue.UNSUBMITTED:
_doDeposit(r)
maxSeq = None
elif r.status == ezidapp.models.CrossrefQueue.SUBMITTED:
_doPoll(r)
maxSeq = None
else:
pass
except _AbortException:
break
except Exception, e:
log.otherError("crossref._daemonThread", e)
maxSeq = None
loadConfig()
config.registerReloadListener(loadConfig)
|
arearesilience.py | #!/usr/bin/env python3
"""
RESOURCE MANAGEMENT - POLICIES MODULE
Area Resilience - Keep always a Leader in the area!
"""
import threading
import requests
import socket
from time import sleep
from random import randrange
from common.logs import LOG
from common.common import CPARAMS, URLS
from policies.leaderprotectionpolicies import LeaderProtectionPolicies
from requests.exceptions import ConnectTimeout as timeout
__maintainer__ = 'Alejandro Jurnet'
__email__ = 'ajurnet@ac.upc.edu'
__author__ = 'Universitat Politècnica de Catalunya'
class BackupEntry:
def __init__(self, deviceID, deviceIP, priority):
self.deviceID = deviceID
self.deviceIP = deviceIP
self.priority = priority
self.TTL = 5 / .1 # Only because is creation
class AreaResilience:
# SELECTION_PORT = 46051 # 29512 # Deprecated - Keepalive is now REST!
# LEADER_PORT = 46052 # 29513
# MAX_RETRY_ATTEMPTS = 5
#
# TIME_TO_WAIT_BACKUP_SELECTION = 3
# TIME_KEEPALIVE = 1
# TIME_KEEPER = .1
# MINIMUM_BACKUPS = 1
# MAX_TTL = 3 / .1 # 30 ticks ~ 3 secs
PRIORITY_ON_DEMOTION = -2
PRIORITY_ON_REELECTION = 0
PRIORITY_ON_FAILURE = -3
TAG = '\033[34m' + '[AR]: ' + '\033[0m'
def __init__(self, CIMIRequesterFunction=None, leaderprotectionpolicies_obj=LeaderProtectionPolicies()):
self._connected = False
self._imBackup = False
self._imLeader = False
self._imCapable = False
self._leaderFailed = False
self._backupSelected = False
self._startupCorrect = False
self._deviceID = ''
self._leaderIP = ''
self._backupPriority = -1
self._nextPriority = 1
self._lpp = leaderprotectionpolicies_obj
self.backupDatabase = []
self.backupDatabaseLock = threading.Lock()
self._CIMIRequesterFunction = CIMIRequesterFunction
self.th_proc = None
self.th_keep = None
self.isStarted = False
def __imLeader(self):
"""
:return:
"""
self._imLeader = self.__getCIMIData('leader', False)
return self._imLeader
def __imCapable(self):
"""
:return:
"""
# TODO: Capable by evaluation, not hardcoded
return True # By default, all agents will be capable to be leader.
def __getCIMIData(self, key, default=None):
"""
:return:
"""
if self._CIMIRequesterFunction is None:
value = default
else:
value = self._CIMIRequesterFunction(key, default)
return value
def imBackup(self):
return self._imBackup
def imLeader(self):
return self._imLeader
def getBackupDatabase(self):
with self.backupDatabaseLock:
ret = self.backupDatabase.copy()
return ret
def addBackup(self, deviceID, deviceIP, priority):
found = False
with self.backupDatabaseLock:
for backup in self.backupDatabase:
if backup.deviceID == deviceID:
LOG.debug(self.TAG + 'Backup {} found!'.format(deviceID))
found = True
break
if not found:
correct = self.__send_election_message(deviceIP)
if correct:
new_backup = BackupEntry(deviceID, deviceIP, priority)
with self.backupDatabaseLock:
self.backupDatabase.append(new_backup)
LOG.info('Backup {}[{}] added with priority {}'.format(deviceID, deviceIP, priority))
return correct
def deleteBackup(self, deviceID):
"""
:param deviceID:
:return:
"""
# 1- Get and delete backup from database
found = False
correct = False
with self.backupDatabaseLock:
for backup in self.backupDatabase:
if backup.deviceID == deviceID:
LOG.debug(self.TAG + 'Backup {} found!'.format(deviceID))
found = True
break
if found:
# Backup is in the database, delete him!
with self.backupDatabaseLock:
self.backupDatabase.remove(backup)
# And now... Let him know...
correct = self.__send_demotion_message(backup.deviceIP)
return correct
def start(self, deviceID): # TODO: Give deviceID at startup?
"""
:return:
"""
self._deviceID = deviceID
if self.isStarted:
LOG.warning(self.TAG + 'Procedure is already started...')
return False
else:
self.th_proc = threading.Thread(name='area_res', target=self.__common_flow, daemon=True)
self.th_proc.start()
self.isStarted = True
LOG.info(self.TAG + 'Module Started')
return True
def stop(self):
"""
Stop all the module activity
:return:
"""
if self.isStarted:
self._connected = False
if self.th_proc is not None:
while self.th_proc.is_alive():
LOG.debug(self.TAG + 'Waiting {} to resume activity...'.format(self.th_proc.name))
sleep(0.5)
if self.th_keep is not None:
while self.th_keep.is_alive():
LOG.debug(self.TAG + 'Waiting {} to resume activity...'.format(self.th_keep.name))
sleep(0.1)
LOG.info(self.TAG + 'All threads stoped. AreaResilience module is stopped.')
else:
LOG.info(self.TAG + 'Module is not started')
return
def promotedToBackup(self, leaderIP):
"""
The agent is promoted to be a backup
:return:
"""
# First check if Agent was electable
self._leaderIP = leaderIP
if self._imCapable:
LOG.info(self.TAG + 'Becoming backup due leader selection.')
# Then, check if AreaResilience thread is running
if self.th_proc is None:
pass
elif not self.th_proc.is_alive():
pass
elif self._imLeader or self._imBackup:
LOG.error('Agent is already a Backup/Leader. Cannot become a Backup.')
return False
else:
LOG.warning('Area Resilience still starting. Cannot promote on this state. Waiting...')
while self.th_proc.is_alive():
sleep(0.1)
LOG.debug('Successful waiting.')
LOG.debug('Module is ready for promotion.')
self.th_proc = threading.Thread(name='area_res', target=self.__backupLeader_flow, daemon=True)
self.th_proc.start()
self.isStarted = True
return True
else:
if not self._startupCorrect:
LOG.warning('Area Resilience still starting. Cannot promote on this state.')
else:
LOG.error('Agent not capable to be Backup/Leader')
return False
def __common_flow(self):
self._connected = True
if not self.__imLeader():
LOG.info(self.TAG + 'I\'m not a Leader.')
# I'm not a leader
if self.__imCapable():
LOG.info(self.TAG + 'I\'m capable to be Leader.')
self._imCapable = True
# Can be a backup
self.__preSelectionSetup()
LOG.info(self.TAG + 'Waiting to be selected.')
else:
# Can not be a backup
LOG.info(self.TAG + 'I\'m NOT capable to be Leader.')
self._startupCorrect = True
if self._imLeader:
# Starting as Leader
self.__backupLeader_flow()
return
def __backupLeader_flow(self):
if not self._connected:
LOG.error('Module stoped due _connected = False')
return
if not self._imLeader:
# I've been promoted as backup
LOG.info(self.TAG + 'I\'m selected to be a backup. Seting up')
self.__preBackupSetup()
self.__becomeBackup()
if not self._connected:
return
# Multiple backups support
if self._backupPriority > 0:
sleep_time = 1. + 10 * (self._backupPriority - 1)
LOG.info('Waiting {}s before leader takeover...'.format(sleep_time))
sleep(sleep_time)
if not self._connected:
return
LOG.debug('Checking if new Leader is up...')
new_leader = self.__getCIMIData('disc_leaderIP', default='')
LOG.debug('Stored Leader = [{}], Detected Leader = [{}]'.format(self._leaderIP, new_leader))
if new_leader == '' or new_leader == self._leaderIP:
LOG.warning('Leader not detected by Discovery')
elif self._leaderIP != new_leader:
LOG.info('Correct Leader takeover by a backup with more preference.')
try: # TODO: Clean solution
r = requests.get('{}agent'.format(
URLS.build_url_address(URLS.URL_POLICIES_ROLECHANGE, addr='127.0.0.1', port=CPARAMS.POLICIES_PORT)),
timeout=.5)
except:
pass
finally:
return
if not self._connected:
return
if self._imLeader or self._leaderFailed:
# I'm a leader
LOG.info(self.TAG + 'Leader seting up')
self.__becomeLeader()
self.__backupSelection()
return
def __becomeLeader(self): # TODO
"""
:return:
"""
# 1- Shutdown/Notify all the modules involving Agent to Leader transiction.
if self._leaderFailed:
# Only if leader fails, triggers are needed, otherwise no action is required
try:
r = requests.get(URLS.build_url_address('{}leader'.format(URLS.URL_POLICIES_ROLECHANGE), portaddr=('127.0.0.1', '46050'))) #TODO Addr+Prt by CPARAMS; Parametrize
LOG.info(self.TAG + 'Trigger to AgentStart Switch done. {}'.format(r.json()))
self._imLeader = True
self._imBackup = False
except Exception as ex:
LOG.exception(self.TAG + '_becomeLeader trigger to AgentStart failed')
self.th_keep = threading.Thread(name='ar_keeper', target=self.__keeper, daemon=True)
self.th_keep.start()
def __getTopology(self): # TODO: Get actual CIMI data or Topology from environment
"""
:return:
"""
return self.__getCIMIData('topology', default=[]).copy()
def __backupSelection(self):
"""
:return:
"""
# TODO:
# 1- Check Backups
# 2- Enough backups?
# YES: End sleep(X)
# NO:
# 3- Get topology and select one agent
# If not capable: Go to 3
# 4- promote to Backup
# Success: Correct_backups++
# Go to 2
while self._connected:
correct_backups = 0
with self.backupDatabaseLock:
# Check backups
for backup in self.backupDatabase:
if backup.TTL >= 0:
correct_backups += 1
# Enough?
if correct_backups >= self._lpp.get(self._lpp.BACKUP_MINIMUM, default=1):
# Enough backups
LOG.debug('{} correct backup detected in Leader. Everything is OK.'.format(correct_backups))
else:
# Not enough
if not self._connected:
break
LOG.warning('{} backup dettected are not enough. Electing new ones...'.format(correct_backups))
topology = self.__getTopology()
new_backups = []
while self._connected and correct_backups < self._lpp.get(self._lpp.BACKUP_MINIMUM, default=1) and len(topology) > 0:
device = topology[0]
topology.remove(device)
found = False
with self.backupDatabaseLock:
for backup in self.backupDatabase:
if backup.deviceID == device.get('deviceID'):
found = True
break
if not found:
# Todo: Evaluate if selected device is capable
correct = self.__send_election_message(device.get('deviceIP'))
if correct:
new_backup = BackupEntry(device.get('deviceID'), device.get('deviceIP'), self._nextPriority)
with self.backupDatabaseLock:
self.backupDatabase.append(new_backup)
LOG.info('Backup {}[{}] added with priority {}'.format(device.get('deviceID'), device.get('deviceIP'), self._nextPriority))
correct_backups += 1
self._nextPriority += 1
new_backups.append(new_backups)
if correct_backups >= self._lpp.get(self._lpp.BACKUP_MINIMUM, default=1):
# Now we have enough
LOG.info('{} correct backups dettected in Leader. {} new backups added.'.format(correct_backups, len(new_backups)))
else:
LOG.warning('{} backups dettected are not enough. Waiting for new election.'.format(correct_backups))
# Sleep
if self._connected:
sleep(self._lpp.get(self._lpp.TIME_TO_WAIT_BACKUP_SELECTION))
LOG.info('Leader stopped...')
def __preSelectionSetup(self):
"""
:return:
"""
return
def __preBackupSetup(self):
"""
:return:
"""
if self._imBackup:
pass # Do something here if necessary
def __becomeBackup(self):
"""
:return:
"""
# 1- Send the KeepAlive message to the leader.
# 2- Receive the reply (with preference number).
# If leader down, Backup becomes leader.
# Else repeat.
attempt = 0
counter = 0
payload = {
'deviceID': self._deviceID
}
self._imBackup = True
while self._connected and attempt < self._lpp.get(self._lpp.MAX_RETRY_ATTEMPTS):
stopLoop = False
while self._connected and not stopLoop:
try:
# 1. Requests to Leader Keepalive endpoint
r = requests.post(URLS.build_url_address(URLS.URL_POLICIES_KEEPALIVE, portaddr=(self._leaderIP, CPARAMS.POLICIES_PORT)), json=payload, timeout=0.5)
LOG.debug(self.TAG + 'Keepalive sent [#{}]'.format(counter))
# 2. Process Reply
jreply = r.json()
if r.status_code == 200:
leaderID = jreply['deviceID'] # Todo: Use this
priority = jreply['backupPriority']
# 3. Update Preference
self._backupPriority = priority
LOG.debug(self.TAG + 'Reply received, Leader still alive: LeaderID: {}'.format(leaderID))
attempt = 0
else:
# Error?
LOG.error('KeepAlive status_code = {}'.format(r.status_code))
if r.status_code == 403 and self.PRIORITY_ON_DEMOTION == jreply['backupPriority']:
LOG.warning('Backup has been removed from database or not authorized to send keepalive messages')
elif r.status_code == 405 and self.PRIORITY_ON_FAILURE == jreply['backupPriority']:
LOG.warning('Sending message to a Device that is not a Leader!')
stopLoop = True
else:
stopLoop = True
if not stopLoop:
# 4. Sleep
sleep(self._lpp.get(self._lpp.TIME_KEEPALIVE))
counter += 1
except:
# Connection broke, backup assumes that Leader is down.
LOG.debug('Keepalive connection refused')
stopLoop = True
LOG.warning('Keepalive connection is broken... Retry Attempts: {}'.format(self._lpp.get(self._lpp.MAX_RETRY_ATTEMPTS)-(attempt+1)))
attempt += 1
if not self._connected:
LOG.info('Backup stopped.')
else:
LOG.warning(self.TAG + '## LEADER IS DOWN! ##')
self._leaderFailed = True
return
def __keeper(self):
"""
Thread that reduces the TTL of a backup and demotes if TTL <= 0
:return:
"""
LOG.debug('Keeper is running')
with self.backupDatabaseLock:
self.backupDatabase = [] # Restart database
while self._connected:
with self.backupDatabaseLock:
for backup in self.backupDatabase:
# Reduce TTL
backup.TTL -= 1
if backup.TTL < 0:
# Backup is down
LOG.warning('Backup {}[{}] is DOWN with TTL: {}'.format(backup.deviceID, backup.deviceIP, backup.TTL))
self.__send_demotion_message(backup.deviceIP)
# Remove from list
self.backupDatabase.remove(backup) # TODO: Inform CIMI?
LOG.debug('Backup removed from database.')
else:
# Backup is ok
LOG.debug('Backup {}[{}] is OK with TTL: {}'.format(backup.deviceID, backup.deviceIP, backup.TTL))
if self._connected:
sleep(self._lpp.get(self._lpp.TIME_KEEPER))
LOG.warning('Keeper thread stopped')
def __send_election_message(self, address):
"""
:param address: IP address for election
:return: True if correct election, False otherwise
"""
try:
r = requests.get('{}backup'.format(URLS.build_url_address(URLS.URL_POLICIES_ROLECHANGE, addr=address, port=CPARAMS.POLICIES_PORT)), timeout=1.5)
if r.status_code == 200:
# Correct
return True
else:
LOG.warning('Selected device [{}] received {} status code received on electing a new backup'.format(address,r.status_code))
return False
except timeout:
LOG.warning('Selected device [{}] cannot become Backup due timeout'.format(address))
return False
except:
LOG.exception('Selected device [{}] cannot become Backup due error in election message'.format(address))
return False
def __send_demotion_message(self, address):
"""
Demote a backup to normal agent
:param address: IP address for demotion
:return: True if correct demotion, False otherwise
"""
try:
r = requests.get('{}agent'.format(
URLS.build_url_address(URLS.URL_POLICIES_ROLECHANGE, addr=address, port=CPARAMS.POLICIES_PORT)),
timeout=1.5)
if r.status_code == 200:
# Correct
return True
else:
LOG.warning('Selected device [{}] received {} status code received on removing a backup'.format(address, r.status_code))
return False
except timeout:
LOG.warning('Selected device [{}] cannot be demoted to Agent due timeout'.format(address))
return False
except:
LOG.exception('Selected device cannot become Agent due error in demotion message')
return False
def receive_keepalive(self, deviceID):
with self.backupDatabaseLock:
for backup in self.backupDatabase:
if backup.deviceID == deviceID:
# It's a match
backup.TTL = int(self._lpp.get(self._lpp.MAX_TTL))
LOG.debug(
'backupID: {}; backupIP: {}; priority: {}; Keepalive received correctly'.format(backup.deviceID,
backup.deviceIP,
backup.priority))
return True, backup.priority
return False, self.PRIORITY_ON_DEMOTION
|
base.py | """
Module containing base classes that represent object entities that can accept
configuration, start/stop/run/abort, create results and have some state.
"""
import os
import sys
import time
import signal
import threading
import traceback
from collections import deque, OrderedDict
import psutil
from schema import Or, And, Use
from testplan.common.config import Config, ConfigOption
from testplan.common.utils.thread import execute_as_thread
from testplan.common.utils.timing import wait
from testplan.common.utils.path import makeemptydirs, makedirs, default_runpath
from testplan.common.utils.strings import slugify, uuid4
from testplan.common.utils.validation import is_subclass, has_method
from testplan.common.utils import logger
class Environment(object):
"""
A collection of resources that can be started/stopped.
:param parent: Reference to parent object.
:type parent: :py:class:`Entity <testplan.common.entity.base.Entity>`
"""
def __init__(self, parent=None):
self.__dict__["parent"] = parent
self.__dict__["_initial_context"] = {}
self.__dict__["_resources"] = OrderedDict()
self.__dict__["start_exceptions"] = OrderedDict()
self.__dict__["stop_exceptions"] = OrderedDict()
def add(self, item, uid=None):
"""
Adds a :py:class:`Resource <testplan.common.entity.base.Resource>` to
the Environment.
:param item: Resource to be added.
:type item: :py:class:`Resource <testplan.common.entity.base.Resource>`
:param uid: Unique identifier.
:type uid: ``str`` or ``NoneType``
:return: Unique identifier assigned to item added.
:rtype: ``str``
"""
if uid is None:
uid = item.uid()
if uid in dir(self):
raise ValueError(
f'Identifier "{uid}" is reserved and cannot be used as UID.'
)
if uid in self._resources:
raise RuntimeError(f'Uid "{uid}" already in environment.')
item.context = self
self._resources[uid] = item
return uid
def remove(self, uid):
"""
Remove resource with the given uid from the environment.
"""
del self._resources[uid]
def first(self):
return next(uid for uid in self._resources.keys())
def get(self, key, default=None):
# For compatibility reason, acts like a dictionary which has
# a `get` method that returns `None` if no attribute found.
try:
return self.__getitem__(key)
except AttributeError:
return default
def __getattr__(self, name):
resources = self.__getattribute__("_resources")
initial_context = self.__getattribute__("_initial_context")
if name in resources:
return resources[name]
if name in initial_context:
return initial_context[name]
raise AttributeError(
"'{}' object has no attribute '{}'".format(
self.__class__.__name__, name
)
)
def __setattr__(self, name, value):
if name in self.__dict__:
self.__dict__[name] = value
elif name in self.__getattribute__("_resources"):
raise RuntimeError(
'Cannot modify resource "{}" in environment.'.format(name)
)
elif name in self.__getattribute__("_initial_context"):
raise RuntimeError(
'Cannot modify attribute "{}" in initial context.'.format(name)
)
else:
super(Environment, self).__setattr__(name, value)
def __getitem__(self, item):
return self.__getattr__(item)
def __contains__(self, item):
if item in self.__getattribute__(
"_resources"
) or item in self.__getattribute__("_initial_context"):
return True
else:
return False
def __iter__(self):
return iter(self._resources.values())
def __repr__(self):
initial = {key: val for key, val in self._initial_context.items()}
res = {key: val for key, val in self._resources.items()}
initial.update(res)
return "{}[{}]".format(self.__class__.__name__, initial)
def __len__(self):
return len(self._resources)
def all_status(self, target):
"""
Check all resources has target status.
"""
return all(
self._resources[resource].status.tag == target
for resource in self._resources
)
def start(self):
"""
Start all resources sequentially and log errors.
"""
# Trigger start all resources
for resource in self._resources.values():
if not resource.cfg.auto_start:
continue
try:
resource.start()
if not resource.cfg.async_start:
resource.wait(resource.STATUS.STARTED)
except Exception:
msg = "While starting resource [{}]\n{}".format(
resource.cfg.name, traceback.format_exc()
)
if self.parent and hasattr(self.parent, "logger"):
self.parent.logger.error(msg)
self.start_exceptions[resource] = msg
# Environment start failure. Won't start the rest.
break
# Wait resources status to be STARTED.
for resource in self._resources.values():
if resource in self.start_exceptions:
break
if resource.cfg.async_start is False:
continue
else:
resource.wait(resource.STATUS.STARTED)
def _log_exception(self, resource, func):
def wrapper(*args, **kargs):
try:
func(*args, **kargs)
except Exception:
msg = "While executing {} of resource [{}]\n{}".format(
func.__name__, resource.cfg.name, traceback.format_exc()
)
if self.parent and hasattr(self.parent, "logger"):
self.parent.logger.error(msg)
self.start_exceptions[resource] = msg
return wrapper
def start_in_pool(self, pool):
"""
Start all resources concurrently in thread pool.
"""
for resource in self._resources.values():
if not resource.cfg.async_start:
raise RuntimeError(
"Cannot start resource {} in thread pool, "
"its async_start attr is set to False".format(resource)
)
for resource in self._resources.values():
pool.apply_async(self._log_exception(resource, resource.start))
# Wait resources status to be STARTED.
for resource in self._resources.values():
resource.wait(resource.STATUS.STARTED)
def stop(self, reversed=False):
"""
Stop all resources in reverse order and log exceptions.
"""
resources = list(self._resources.values())
if reversed is True:
resources = resources[::-1]
# Stop all resources
for resource in resources:
if (resource.status.tag is None) or (
resource.status.tag == resource.STATUS.STOPPED
):
# Skip resources not even triggered to start.
continue
try:
resource.stop()
except Exception:
msg = "While stopping resource [{}]\n{}".format(
resource.cfg.name, traceback.format_exc()
)
self.stop_exceptions[resource] = msg
# Wait resources status to be STOPPED.
for resource in resources:
if resource in self.stop_exceptions:
continue
elif resource.status.tag is None:
# Skip resources not even triggered to start.
continue
else:
resource.wait(resource.STATUS.STOPPED)
def stop_in_pool(self, pool, reversed=False):
"""
Stop all resources in reverse order and log exceptions.
"""
resources = list(self._resources.values())
if reversed is True:
resources = resources[::-1]
# Stop all resources
for resource in resources:
# Skip resources not even triggered to start.
if (resource.status.tag is None) or (
resource.status.tag == resource.STATUS.STOPPED
):
continue
pool.apply_async(self._log_exception(resource, resource.stop))
# Wait resources status to be STOPPED.
for resource in resources:
# Skip resources not even triggered to start.
if resource.status.tag is None:
continue
else:
resource.wait(resource.STATUS.STOPPED)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
class StatusTransitionException(Exception):
"""To be raised on illegal state transition attempt."""
pass
class EntityStatus(object):
"""
Represents current status of an
:py:class:`Entity <testplan.common.entity.base.Entity>` object.
TODO: Utilise metadata sto store information.
"""
NONE = None
PAUSING = "PAUSING"
PAUSED = "PAUSED"
RESUMING = "RESUMING"
def __init__(self):
"""TODO."""
self._current = self.NONE
self._metadata = OrderedDict()
self._transitions = self.transitions()
@property
def tag(self):
"""Current status value."""
return self._current
@property
def metadata(self):
"""TODO."""
return self._metadata
def change(self, new):
"""Transition to new state."""
current = self._current
try:
if current == new or new in self._transitions[current]:
self._current = new
else:
msg = "On status change from {} to {}".format(current, new)
raise StatusTransitionException(msg)
except KeyError as exc:
msg = "On status change from {} to {} - {}".format(
current, new, exc
)
raise StatusTransitionException(msg)
def update_metadata(self, **metadata):
"""TODO."""
self._metadata.update(metadata)
def clear_metadata(self):
"""TODO."""
self._metadata = OrderedDict()
def transitions(self):
"""
Returns all legal transitions of the status of the
:py:class:`Entity <testplan.common.entity.base.Entity>`.
"""
return {self.PAUSING: {self.PAUSED}, self.PAUSED: {self.RESUMING}}
class EntityConfig(Config):
"""
Configuration object for
:py:class:`Entity <testplan.common.entity.base.Entity>` object.
All classes that inherit
:py:class:`Entity <testplan.common.entity.base.Entity>` can define a
configuration that inherits this ones schema.
"""
@classmethod
def get_options(cls):
"""Config options for base Entity class."""
return {
ConfigOption("runpath"): Or(None, str, callable),
ConfigOption("initial_context", default={}): dict,
ConfigOption("path_cleanup", default=False): bool,
ConfigOption("status_wait_timeout", default=600): int,
ConfigOption("abort_wait_timeout", default=30): int,
# active_loop_sleep impacts cpu usage in interactive mode
ConfigOption("active_loop_sleep", default=0.005): float,
}
class Entity(logger.Loggable):
"""
Base class for :py:class:`Entity <testplan.common.entity.base.Entity>`
and :py:class:`Resource <testplan.common.entity.base.Resource>` objects
providing common functionality like runpath creation, abort policy
and common attributes.
:param runpath: Path to be used for temp/output files by entity.
:type runpath: ``str`` or ``NoneType`` callable that returns ``str``
:param initial_context: Initial key: value pair context information.
:type initial_context: ``dict``
:param path_cleanup: Remove previous runpath created dirs/files.
:type path_cleanup: ``bool``
:param status_wait_timeout: Timeout for wait status events.
:type status_wait_timeout: ``int``
:param abort_wait_timeout: Timeout for entity abort.
:type abort_wait_timeout: ``int``
:param active_loop_sleep: Sleep time on busy waiting loops.
:type active_loop_sleep: ``float``
"""
CONFIG = EntityConfig
STATUS = EntityStatus
def __init__(self, **options):
super(Entity, self).__init__()
self._cfg = self.__class__.CONFIG(**options)
self._status = self.__class__.STATUS()
self._wait_handlers = {}
self._runpath = None
self._scratch = None
self._parent = None
self._uid = None
self._should_abort = False
self._aborted = False
def __str__(self):
return "{}[{}]".format(self.__class__.__name__, self.uid())
@property
def cfg(self):
"""Configuration object."""
return self._cfg
@property
def status(self):
"""Status object."""
return self._status
@property
def aborted(self):
"""Returns if entity was aborted."""
return self._aborted
@property
def active(self):
"""Entity not aborting/aborted."""
return self._should_abort is False and self._aborted is False
@property
def runpath(self):
"""Path to be used for temp/output files by entity."""
return self._runpath
@property
def scratch(self):
"""Path to be used for temp files by entity."""
return self._scratch
@property
def parent(self):
"""
Returns parent :py:class:`Entity <testplan.common.entity.base.Entity>`.
"""
return self._parent
@parent.setter
def parent(self, value):
"""Reference to parent object."""
self._parent = value
def pause(self):
"""Pause entity execution."""
self.status.change(self.STATUS.PAUSING)
self.pausing()
def resume(self):
"""Resume entity execution."""
self.status.change(self.STATUS.RESUMING)
self.resuming()
def abort(self):
"""
Default abort policy. First abort all dependencies and then itself.
"""
if not self.active:
return
self._should_abort = True
for dep in self.abort_dependencies():
self._abort_entity(dep)
self.aborting()
self._aborted = True
def abort_dependencies(self):
"""Default empty generator."""
return
yield
def _abort_entity(self, entity, wait_timeout=None):
"""Method to abort an entity and log exceptions."""
timeout = wait_timeout or self.cfg.abort_wait_timeout
try:
self.logger.debug("Aborting {}".format(entity))
entity.abort() # Here entity can be a function and will raise
self.logger.debug("Aborted {}".format(entity))
except Exception as exc:
self.logger.error(traceback.format_exc())
self.logger.error(
"Exception on aborting {} - {}".format(self, exc)
)
else:
if wait(lambda: entity.aborted is True, timeout) is False:
self.logger.error(
"Timeout on waiting to abort {}.".format(self)
)
def aborting(self):
"""
Aborting logic for self.
"""
self.logger.debug(
"Abort logic not implemented for {}[{}]".format(
self.__class__.__name__, self.uid()
)
)
def pausing(self):
raise NotImplementedError()
def resuming(self):
raise NotImplementedError()
def wait(self, target_status, timeout=None):
"""Wait until objects status becomes target status."""
timeout = timeout or self.cfg.status_wait_timeout
if target_status in self._wait_handlers:
self._wait_handlers[target_status](timeout=timeout)
else:
wait(lambda: self.status.tag == target_status, timeout=timeout)
def uid(self):
"""Unique identifier of self."""
if not self._uid:
self._uid = uuid4()
return self._uid
def define_runpath(self):
"""
Define entity's runpath directory based on parent object and configuration.
"""
# local config has highest precedence
runpath = self.cfg.get_local("runpath")
if runpath:
self._runpath = runpath(self) if callable(runpath) else runpath
# else get container's runpath and append uid
elif self.parent and self.parent.runpath:
self._runpath = os.path.join(
self.parent.runpath, slugify(self.uid())
)
else:
self._runpath = default_runpath(self)
def make_runpath_dirs(self):
"""
Creates runpath related directories.
"""
self.define_runpath()
if self._runpath is None:
raise RuntimeError(
"{} runpath cannot be None".format(self.__class__.__name__)
)
self._scratch = os.path.join(self._runpath, "scratch")
self.logger.debug(
"%s has %s runpath and pid %d", self, self.runpath, os.getpid()
)
if self.cfg.path_cleanup is False:
makedirs(self._runpath)
makedirs(self._scratch)
else:
makeemptydirs(self._runpath)
makeemptydirs(self._scratch)
@classmethod
def filter_locals(cls, local_vars):
"""
Filter out init params of None value, they will take default value
defined in its ConfigOption object; also filter out special vars that
are not init params from local_vars.
"""
EXCLUDE = ("cls", "self", "kwargs", "options", "__class__", "__dict__")
return {
key: value
for key, value in local_vars.items()
if key not in EXCLUDE and value is not None
}
class RunnableStatus(EntityStatus):
"""
Status of a
:py:class:`Runnable <testplan.common.entity.base.Runnable>` entity.
"""
EXECUTING = "EXECUTING"
RUNNING = "RUNNING"
FINISHED = "FINISHED"
PAUSING = "PAUSING"
PAUSED = "PAUSED"
def transitions(self):
"""
Defines the status transitions of a
:py:class:`Runnable <testplan.common.entity.base.Runnable>` entity.
"""
transitions = super(RunnableStatus, self).transitions()
overrides = {
self.NONE: {self.RUNNING},
self.RUNNING: {self.FINISHED, self.EXECUTING, self.PAUSING},
self.EXECUTING: {self.RUNNING},
self.PAUSING: {self.PAUSED},
self.PAUSED: {self.RESUMING},
self.RESUMING: {self.RUNNING},
self.FINISHED: {self.RUNNING},
}
transitions.update(overrides)
return transitions
class RunnableConfig(EntityConfig):
"""
Configuration object for
:py:class:`~testplan.common.entity.base.Runnable` entity.
"""
@classmethod
def get_options(cls):
"""Runnable specific config options."""
return {
# IHandlers explicitly enable interactive mode of runnables.
ConfigOption("interactive_port", default=None): Or(None, int),
ConfigOption(
"interactive_block",
default=hasattr(sys.modules["__main__"], "__file__"),
): bool,
}
class RunnableResult(object):
"""
Result object of a
:py:class:`~testplan.common.entity.base.Runnable` entity.
"""
def __init__(self):
self.step_results = OrderedDict()
self.run = False
def __repr__(self):
return "{}[{}]".format(self.__class__.__name__, self.__dict__)
class Runnable(Entity):
"""
An object that defines steps, a run method to execute the steps and
provides results with the
:py:class:`~testplan.common.entity.base.RunnableResult`
object.
It contains an
:py:class:`~testplan.common.entity.base.Environment`
object of
:py:class:`~testplan.common.entity.base.Resource` objects
that can be started/stopped and utilized by the steps defined.
:param interactive_port: Enable interactive execution mode on a port.
:type interactive_port: ``int`` or ``NoneType``
:param interactive_block: Block on run() on interactive mode.
:type interactive_block: ``bool``
Also inherits all
:py:class:`~testplan.common.entity.base.Entity` options.
"""
CONFIG = RunnableConfig
STATUS = RunnableStatus
RESULT = RunnableResult
ENVIRONMENT = Environment
def __init__(self, **options):
super(Runnable, self).__init__(**options)
self._environment = self.__class__.ENVIRONMENT(parent=self)
self._result = self.__class__.RESULT()
self._steps = deque()
self._ihandler = None
@property
def result(self):
"""
Returns a
:py:class:`~testplan.common.entity.base.RunnableResult`
"""
return self._result
@property
def resources(self):
"""
Returns the
:py:class:`Environment <testplan.common.entity.base.Environment>`
of :py:class:`Resources <testplan.common.entity.base.Resource>`.
"""
return self._environment
@property
def interactive(self):
"""
TODO
"""
return self._ihandler
# Shortcut for interactive handler
i = interactive
def add_resource(self, resource, uid=None):
"""
Adds a :py:class:`resource <testplan.common.entity.base.Resource>`
in the runnable environment.
:param resource: Resource to be added.
:type resource: Subclass of
:py:class:`~testplan.common.entity.base.Resource`
:param uid: Optional input resource uid.
:type uid: ``str`` or ``NoneType``
:return: Resource uid assigned.
:rtype: ``str``
"""
resource.parent = self
resource.cfg.parent = self.cfg
return self.resources.add(resource, uid=uid or uuid4())
def _add_step(self, step, *args, **kwargs):
self._steps.append((step, args, kwargs))
def pre_step_call(self, step):
"""Callable to be invoked before each step."""
pass
def skip_step(self, step):
"""Callable to determine if step should be skipped."""
return False
def post_step_call(self, step):
"""Callable to be invoked before each step."""
pass
def _run(self):
self.logger.debug("Running %s", self)
self.status.change(RunnableStatus.RUNNING)
while self.active:
if self.status.tag == RunnableStatus.RUNNING:
try:
func, args, kwargs = self._steps.popleft()
self.pre_step_call(func)
if self.skip_step(func) is False:
self.logger.debug(
"Executing step of %s - %s", self, func.__name__
)
start_time = time.time()
self._execute_step(func, *args, **kwargs)
self.logger.debug(
"Finished step of %s - %s. Took %ds",
self,
func.__name__,
round(time.time() - start_time, 5),
)
else:
self.logger.debug(
"Skipping step of %s - %s", self, func.__name__
)
self.post_step_call(func)
except IndexError:
self.status.change(RunnableStatus.FINISHED)
break
time.sleep(self.cfg.active_loop_sleep)
def _run_batch_steps(self):
start_threads, start_procs = self._get_start_info()
self._add_step(self.setup)
self.pre_resource_steps()
self._add_step(self.resources.start)
self.pre_main_steps()
self.main_batch_steps()
self.post_main_steps()
self._add_step(self.resources.stop, reversed=True)
self.post_resource_steps()
self._add_step(self.teardown)
self._run()
self._post_run_checks(start_threads, start_procs)
def _get_start_info(self):
"""
:return: lists of threads and child processes, to be passed to the
_post_run_checks method after the run has finished.
"""
start_threads = threading.enumerate()
current_proc = psutil.Process()
start_children = current_proc.children()
return start_threads, start_children
def _post_run_checks(self, start_threads, start_procs):
"""
Compare the current running threads and processes to those that were
alive before we were run. If there are any differences that indicates
we have either gained or lost threads or processes during the run,
which may indicate insufficient cleanup. Warnings will be logged.
"""
end_threads = threading.enumerate()
if start_threads != end_threads:
new_threads = [
thr.name for thr in end_threads if thr not in start_threads
]
self.logger.warning(
"New threads are still alive after run: %s", new_threads
)
dead_threads = [
thr.name for thr in start_threads if thr not in end_threads
]
self.logger.warning(
"Threads have died during run: %s", dead_threads
)
current_proc = psutil.Process()
end_procs = current_proc.children()
if start_procs != end_procs:
new_procs = [proc for proc in end_procs if proc not in start_procs]
self.logger.warning(
"New processes are still alive after run: %s", new_procs
)
dead_procs = [
proc for proc in start_procs if proc not in end_procs
]
self.logger.warning(
"Child processes have died during run: %s", dead_procs
)
def _execute_step(self, step, *args, **kwargs):
try:
res = step(*args, **kwargs)
except Exception as exc:
self.logger.error(
"Exception on %s[%s], step %s - %s",
self.__class__.__name__,
self.uid(),
step.__name__,
str(exc),
)
self.logger.error(traceback.format_exc())
res = exc
finally:
self.result.step_results[step.__name__] = res
self.status.update_metadata(**{str(step): res})
def pre_resource_steps(self):
"""Runnable steps to run before environment started."""
pass
def pre_main_steps(self):
"""Runnable steps to run after environment started."""
pass
def main_batch_steps(self):
"""Runnable steps to be executed while environment is running."""
pass
def post_main_steps(self):
"""Runnable steps to run before environment stopped."""
pass
def post_resource_steps(self):
"""Runnable steps to run after environment stopped."""
pass
def pausing(self):
for resource in self.resources:
resource.pause()
self.status.change(RunnableStatus.PAUSED)
def resuming(self):
for resource in self.resources:
resource.resume()
self.status.change(RunnableStatus.RUNNING)
def abort_dependencies(self):
"""
Yield all dependencies to be aborted before self abort.
"""
for resource in self.resources:
yield resource
def setup(self):
"""Setup step to be executed first."""
pass
def teardown(self):
"""Teardown step to be executed last."""
pass
def should_run(self):
"""Determines if current object should run."""
return True
def run(self):
"""Executes the defined steps and populates the result object."""
try:
if self.cfg.interactive_port is not None:
if self._ihandler is not None:
raise RuntimeError(
"{} already has an active {}".format(
self, self._ihandler
)
)
self.logger.test_info("Starting %s in interactive mode", self)
self._ihandler = self.cfg.interactive_handler(
target=self, http_port=self.cfg.interactive_port
)
self._ihandler.parent = self
thread = threading.Thread(target=self._ihandler)
thread.start()
# Check if we are on interactive session.
if self.cfg.interactive_block:
while self._ihandler.active:
time.sleep(self.cfg.active_loop_sleep)
return self._ihandler
else:
self._run_batch_steps()
except Exception as exc:
self._result.run = exc
self.logger.error(traceback.format_exc())
else:
# TODO fix swallow exceptions in self._result.step_results.values()
self._result.run = (
self.status.tag == RunnableStatus.FINISHED
and self.run_result() is True
)
return self._result
def run_result(self):
"""Returns if a run was successful."""
return all(
not isinstance(val, Exception) and val is not False
for val in self._result.step_results.values()
)
def dry_run(self):
"""A testing process that creates result for each step."""
raise NotImplementedError
class FailedAction(object):
"""
Simple Falsey container that can be used for
returning results of certain failed async actions.
The `error_msg` can later on be used for enriching the error messages.
"""
def __init__(self, error_msg):
self.error_msg = error_msg
def __bool__(self):
return False
class ResourceConfig(EntityConfig):
"""
Configuration object for
:py:class:`~testplan.common.entity.base.Resource` entity.
"""
@classmethod
def get_options(cls):
"""Resource specific config options."""
return {
ConfigOption("async_start", default=True): bool,
ConfigOption("auto_start", default=True): bool,
}
class ResourceStatus(EntityStatus):
"""
Status of a
:py:class:`Resource <testplan.common.entity.base.Resource>` entity.
"""
STARTING = "STARTING"
STARTED = "STARTED"
STOPPING = "STOPPING"
STOPPED = "STOPPED"
def transitions(self):
"""
Defines the status transitions of a
:py:class:`Resource <testplan.common.entity.base.Resource>` entity.
"""
transitions = super(ResourceStatus, self).transitions()
overrides = {
self.NONE: {self.STARTING},
self.STARTING: {self.STARTED, self.STOPPING},
self.STARTED: {self.PAUSING, self.STOPPING},
self.PAUSING: {self.PAUSED},
self.PAUSED: {self.RESUMING, self.STOPPING},
self.RESUMING: {self.STARTED},
self.STOPPING: {self.STOPPED},
self.STOPPED: {self.STARTING},
}
transitions.update(overrides)
return transitions
class Resource(Entity):
"""
An object that can be started/stopped and expose its context
object of key/value pair information.
A Resource is usually part of an
:py:class:`~testplan.common.entity.base.Environment`
object of a
:py:class:`~testplan.common.entity.base.Runnable` object.
:param async_start: Resource can start asynchronously.
:type async_start: ``bool``
:param auto_start: Enables the Environment to start the Resource
automatically.
:type auto_start: ``bool``
Also inherits all
:py:class:`~testplan.common.entity.base.Entity` options.
"""
CONFIG = ResourceConfig
STATUS = ResourceStatus
def __init__(self, **options):
super(Resource, self).__init__(**options)
self._context = None
self._wait_handlers.update(
{
self.STATUS.STARTED: self._wait_started,
self.STATUS.STOPPED: self._wait_stopped,
}
)
@property
def context(self):
"""Key/value pair information of a Resource."""
return self._context
@context.setter
def context(self, context):
"""Set the Resource context."""
self._context = context
def start(self):
"""
Triggers the start logic of a Resource by executing
:py:meth:
`Resource.starting <testplan.common.entity.base.Resource.starting>`
method.
"""
self.logger.debug("Starting %r", self)
self.status.change(self.STATUS.STARTING)
self.pre_start()
self.starting()
if not self.cfg.async_start:
self.wait(self.STATUS.STARTED)
self.logger.debug("Started %r", self)
self.post_start()
def stop(self):
"""
Triggers the stop logic of a Resource by executing
:py:meth:
`Resource.stopping <testplan.common.entity.base.Resource.stopping>`
method.
"""
self.logger.debug("Stopping %r", self)
self.status.change(self.STATUS.STOPPING)
self.pre_stop()
if self.active:
self.stopping()
if not self.cfg.async_start:
self.wait(self.STATUS.STOPPED)
self.logger.debug("Stopped %r", self)
self.post_stop()
def pre_start(self):
"""Steps to be executed right before resource starts."""
def post_start(self):
"""Steps to be executed right after resource is started."""
def pre_stop(self):
"""Steps to be executed right before resource stops."""
def post_stop(self):
"""Steps to be executed right after resource is stopped"""
def _wait_started(self, timeout=None):
self.status.change(self.STATUS.STARTED)
def _wait_stopped(self, timeout=None):
self.status.change(self.STATUS.STOPPED)
def starting(self):
"""
Start logic for Resource that also sets the status to *STARTED*.
"""
raise NotImplementedError()
def stopping(self):
"""
Stop logic for Resource that also sets the status to *STOPPED*.
"""
raise NotImplementedError()
def pausing(self):
"""Pause the resource."""
self.status.change(self.status.PAUSED)
def resuming(self):
"""Resume the resource."""
self.status.change(self.status.STARTED)
def restart(self, timeout=None):
"""Stop and start the resource."""
self.stop()
self.wait(self.status.STOPPED)
self.start()
self.wait(self.status.STARTED)
def __enter__(self):
self.start()
self.wait(self.STATUS.STARTED)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
self.wait(self.STATUS.STOPPED)
@property
def is_alive(self):
"""
Called to periodically poll the resource health. Default implementation
assumes the resource is always healthy.
"""
return True
def pending_work(self):
"""Resource has pending work."""
return False
DEFAULT_RUNNABLE_ABORT_SIGNALS = [signal.SIGINT, signal.SIGTERM]
class RunnableManagerConfig(EntityConfig):
"""
Configuration object for
:py:class:`RunnableManager <testplan.common.entity.base.RunnableManager>`
entity.
"""
@classmethod
def get_options(cls):
"""RunnableManager specific config options."""
return {
ConfigOption("parse_cmdline", default=True): bool,
ConfigOption("runnable", default=Runnable): is_subclass(Runnable),
ConfigOption("resources", default=[]): [Resource],
ConfigOption(
"abort_signals", default=DEFAULT_RUNNABLE_ABORT_SIGNALS
): [int],
}
class RunnableManager(Entity):
"""
Executes a
:py:class:`Runnable <testplan.common.entity.base.Runnable>` entity
in a separate thread and handles the abort signals.
:param parse_cmdline: Parse command line arguments.
:type parse_cmdline: ``bool``
:param runnable: Test runner.
:type runnable: :py:class:`~testplan.runnable.TestRunner`
:param resources: Initial resources.
:type resources:
``list`` of :py:class:`Resources <testplan.common.entity.base.Resource>`
:param abort_signals: Signals to catch and trigger abort.
:type abort_signals: ``list`` of signals
Also inherits all
:py:class:`~testplan.common.entity.base.Entity` options.
"""
CONFIG = RunnableManagerConfig
def __init__(self, **options):
super(RunnableManager, self).__init__(**options)
self._default_options = options
if self._cfg.parse_cmdline is True:
options = self.enrich_options(self._default_options)
self._runnable = self._initialize_runnable(**options)
for resource in self._cfg.resources:
self._runnable.add_resource(resource)
def enrich_options(self, options):
"""
Enrich the options using parsed command line arguments.
Override this method to add extra argument processing logic.
The result dictionary is used to initialize the configuration.
"""
return options
def _initialize_runnable(self, **options):
runnable_class = self._cfg.runnable
runnable_config = dict(**options)
return runnable_class(**runnable_config)
def __getattr__(self, item):
try:
return self.__getattribute__(item)
except AttributeError:
if "_runnable" in self.__dict__:
return getattr(self._runnable, item)
raise
@property
def runnable(self):
"""Runnable instance."""
return self._runnable
@property
def runpath(self):
"""Expose the runnable runpath."""
return self._runnable.runpath
@property
def cfg(self):
"""Expose the runnable configuration object."""
return self._runnable.cfg
@property
def status(self):
"""Expose the runnable status."""
return self._runnable.status
@property
def active(self):
"""Expose the runnable active attribute."""
return self._runnable.active
def run(self):
"""
Executes target runnable defined in configuration in a separate thread.
:return: Runnable result object.
:rtype: :py:class:
`RunnableResult <testplan.common.entity.base.RunnableResult>`
"""
try:
for sig in self._cfg.abort_signals:
signal.signal(sig, self._handle_abort)
except ValueError:
self.logger.warning(
"Not able to install signal handler -"
" signal only works in main thread"
)
execute_as_thread(
self._runnable.run,
daemon=True,
join=True,
break_join=lambda: self.aborted is True,
)
if self._runnable.interactive is not None:
return self._runnable.interactive
if isinstance(self._runnable.result, Exception):
raise self._runnable.result
return self._runnable.result
def _handle_abort(self, signum, frame):
for sig in self._cfg.abort_signals:
signal.signal(sig, signal.SIG_IGN)
self.logger.debug(
"Signal handler called for signal %d from %s",
signum,
threading.current_thread(),
)
self.abort()
def pausing(self):
"""Pause the runnable execution."""
self._runnable.pause()
def resuming(self):
"""Resume the runnable execution."""
self._runnable.resume()
def abort_dependencies(self):
"""Dependencies to be aborted first."""
yield self._runnable
def aborting(self):
"""Suppressing not implemented debug log by parent class."""
pass
|
engineer.py | import os
import numpy as np
import pandas as pd
from scipy.stats import skew, kurtosis, mode
# from constant import Constant
from meta_feature_utils import sample_num_strategy # sample strategy
import random
import re
import easyocr # pip install easyocr
from mtcnn import MTCNN # pip install mtcnn
# python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
from PIL import Image
from numba import cuda
class EngineerFeatureData(object):
def __init__(self, dict):
self.name = None
self.class_count = None
self.image_count = None
self.color_mode = None
# image per class
self.im_per_class_mean = None
self.im_per_class_median = None
self.im_per_class_mode = None
self.im_per_class_min = None
self.im_per_class_max = None
self.im_per_class_range = None
self.im_per_class_std = None
self.im_per_class_skew = None
self.im_per_class_kurt = None
# image height
self.height_mean = None
self.height_median = None
self.height_mode = None
self.height_min = None
self.height_max = None
self.height_range = None
self.height_std = None
self.height_skew = None
self.height_kurt = None
# image width
self.width_mean = None
self.width_median = None
self.width_mode = None
self.width_min = None
self.width_max = None
self.width_range = None
self.width_std = None
self.width_skew = None
self.width_kurt = None
# image area
self.area_mean = None
self.area_median = None
self.area_mode = None
self.area_min = None
self.area_max = None
self.area_range = None
self.area_std = None
self.area_skew = None
self.area_kurt = None
self.__dict__.update(dict)
import multiprocessing
class EngineerFeature:
#Constant.ENGINEER_FEATURES_CSV
def __init__(self, task_config, csv_path='', save_to_file = False):
''' Calculate engineered meta features to a dataset, such as num of classes, total count of images
Args:
task_config: configs containing job info
csv_path: path to engineerFeatures feature file
save_to_file: whether save current data to file, default is False
Params:
data_name[str]: name of the dataset
data_path[str]: path to the dataset
csv_path[str]: path to the csv file that contains info about previous datasets
df[pd.DataFrame]: data loaded from csv_path
entry[np.ndarray]: engineered meta features of current dataset
'''
self._contain_chars = False
self._contain_faces = False
self._contain_poses = False
self._is_xray = False
self.data_name = task_config["data_name"]
self.data_path = task_config["data_path"]
self.csv_path = csv_path
self.df = self._load_csv()
self.entry = self._generate_feature(save_to_file)
self.contains = self._judge_special_cases(self.data_path)
def get_engineered_feature(self) -> EngineerFeatureData:
''' Wrap entry to current entry in SimpleNamespace and return
Returns:
arg: a SimpleNamespace containing info regarding the dataset.
Ex: arg.name, arg.im_per_class_median
'''
dict = {i : j for i,j in zip(self.df.index, self.entry)}
dict['name'] = self.data_name
arg = EngineerFeatureData(dict)
return arg
def contain_chars(self):
return self._contain_chars
def contain_faces(self):
return self._contain_faces
def contain_poses(self):
return self._contain_poses
def is_xray(self):
return self._is_xray
def _remove_special_chars(self, input) :
input = re.sub('[’!"#$%&\'()*+,-./:;<=>?@,。?★、…【】《》?“”‘’![\\]^_`{|}~\s]+', "", input)
return re.sub(u"([^\u4e00-\u9fa5\u0030-\u0039\u0041-\u005a\u0061-\u007a])", "", input)
def _init_keypoint_detection_predictor(self):
# python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
cfg = get_cfg() # get a fresh new config
cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")
predictor = DefaultPredictor(cfg)
return predictor
def _data_has_char(self, images:list, total_sample) -> bool:
chars = 0
reader = easyocr.Reader(['ch_sim', 'en']) # need to run only once to load model into memory
for im in images:
res = reader.readtext(im)
invalid = 0
for i in res :
if (self._remove_special_chars(i[1]) == "") :
invalid += 1
if len(res) - invalid > 0:
chars += 1
# set threshold
if chars / total_sample > 0.9:
self._contain_chars = True
return True
return False
def _data_has_face(self, images:list, total_sample) -> bool:
faces = 0
detector = MTCNN()
for im in images:
im = np.array(Image.open(im).convert('RGB')).astype(np.float32)
res = detector.detect_faces(im)
largest = 0
for face in res :
curr = face['box'][0] * face['box'][0]
largest = curr if curr > largest else largest
if(largest / 50176 > 0.35):
faces +=1
if faces / total_sample > 0.9:
self._contain_faces = True
return True
return False
def _data_has_pose(self, images:list, total_sample) -> bool:
poses = 0
predictor = self._init_keypoint_detection_predictor()
for im in images:
im = np.array(Image.open(im).convert('RGB')).astype(np.float32)
out = predictor(im)
if len(out['instances'].get_fields()['pred_boxes'].tensor) > 0:
poses += 1
if poses/total_sample > 0.9:
self._contain_poses = True
return True
return False
def _judge_special_cases(self, ddir: str) -> None:
''' Get one vector of feature to one dataset
Args:
ddir: path to the dataset
Returns:
entry: feature vector of one dataset
'''
print('Start judging dataset special cases.')
imPerClass = [len(os.listdir(os.path.join(ddir, i))) for i in os.listdir(ddir)]
mean = int(np.mean(imPerClass))
total_sample = 0
images = []
for j, c in enumerate(os.listdir(ddir)) :
im_path = os.path.join(ddir, c) # path to current class folder
im_files = os.listdir(im_path) # image names in the class folder
class_num = len(im_files)
sample_num = sample_num_strategy(mean, class_num)
total_sample += sample_num
index = random.sample(range(class_num), sample_num)
for i in index :
im = os.path.join(im_path, im_files[i])
images.append(im)
# multiprocessing.Process(target=self._data_has_face(images, total_sample), )
if self._data_has_pose(images, total_sample):
return
if self._data_has_char(images, total_sample):
return
device = cuda.get_current_device()
device.reset()
if self._data_has_face(images, total_sample):
return
device = cuda.get_current_device()
device.reset()
def _generate_feature(self, save_to_file:bool) -> np.ndarray:
''' to generate feature
Used Params:
self.data_name,
self.data_path
Args:
save_to_file: whether save to file
Returns:
entry: entry to current dataset
'''
if self.data_name in self.df.columns:
print(f'{self.data_name} already in csv file so stored features will be loaded. '
f'Please use another name if you entered a new dataset.')
return np.array(self.df[self.data_name])
entry = self._get_data_features(self.data_path, self.data_name)
if save_to_file:
self.df[self.data_name] = entry[1:]
self.df.to_csv(self.csv_path, header=True, index=True)
return entry
def _load_csv(self) -> pd.DataFrame:
'''
Args:
csv_path: path to the csv file
Returns:
df: dataframe loaded from the csv file
'''
if not os.path.isfile(self.csv_path):
raise FileNotFoundError(f'Cannot find csv file {self.csv_path}')
df = pd.read_csv(self.csv_path, index_col=0, dtype='str')
# convert string to float
for i in df.index :
if i == 'color_mode' :
continue
df.loc[i] = df.loc[i].astype('float32')
return df
def _get_data_features(self, ddir: str, name: str) -> np.ndarray :
''' Calculate all the features to the one dataset
Args:
ddir: path to the dataset train folder
name: name of the dataset
Returns:
entry: one entry of the engineered features of the dataset
'''
imPerClass = [len(os.listdir(os.path.join(ddir, i))) for i in os.listdir(ddir)]
imPerClass = np.asarray(imPerClass)
num_classes = len(os.listdir(ddir))
num_images = np.sum(imPerClass)
heights = []
widths = []
areas = []
rgb = 0
for c in os.listdir(ddir) :
for i in os.listdir(os.path.join(ddir, c)) :
im = Image.open(os.path.join(ddir, c, i))
size = im.size
heights.append(size[0])
widths.append(size[1])
areas.append(size[0] * size[1])
cmode = im.mode
if im.mode == 'RGB' or im.mode == 'RGBA':
rgb +=1
cmode = 'RGB' if rgb / num_images > 0.5 else 'L'
ipc = self._get_list_distribution(imPerClass)
imh = self._get_list_distribution(np.asarray(heights))
imw = self._get_list_distribution(np.asarray(widths))
ima = self._get_list_distribution(np.asarray(areas))
general = np.asarray([name, num_classes, num_images, cmode], dtype=object)
entry = np.concatenate((general, ipc, imh, imw, ima))
return entry
def _get_list_distribution(self, data: np.ndarray) -> np.ndarray :
''' Calculate the statistical info of a list.
Args:
data: 1d np array
Returns:
out: the following statistics of input data
[ mean, median, mode, min, max,
range,std, skewness, kurtosis]
'''
out = np.array([np.mean(data),
np.median(data),
mode(data)[0][0],
np.min(data),
np.max(data),
np.max(data) - np.min(data),
np.std(data),
skew(data),
kurtosis(data)])
return out
|
test_base.py | import asyncio
import fcntl
import logging
import os
import random
import sys
import threading
import time
import uvloop
import unittest
import weakref
from unittest import mock
from uvloop._testbase import UVTestCase, AIOTestCase
class _TestBase:
def test_close(self):
self.assertFalse(self.loop._closed)
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop._closed)
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = asyncio.Future()
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = self.loop.call_soon(lambda: None)
wd['h'] = h # Would fail without __weakref__ slot.
def test_call_soon_1(self):
calls = []
def cb(inc):
calls.append(inc)
self.loop.stop()
self.loop.call_soon(cb, 10)
h = self.loop.call_soon(cb, 100)
self.assertIn('.cb', repr(h))
h.cancel()
self.assertIn('cancelled', repr(h))
self.loop.call_soon(cb, 1)
self.loop.run_forever()
self.assertEqual(calls, [10, 1])
def test_call_soon_2(self):
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_soon(lambda f: f.set_result(None), waiter)
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_soon_3(self):
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_soon(lambda f=waiter: f.set_result(None))
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_soon_base_exc(self):
def cb():
raise KeyboardInterrupt()
self.loop.call_soon(cb)
with self.assertRaises(KeyboardInterrupt):
self.loop.run_forever()
self.assertFalse(self.loop.is_closed())
def test_calls_debug_reporting(self):
def run_test(debug, meth, stack_adj):
context = None
def handler(loop, ctx):
nonlocal context
context = ctx
self.loop.set_debug(debug)
self.loop.set_exception_handler(handler)
def cb():
1 / 0
meth(cb)
self.assertIsNone(context)
self.loop.run_until_complete(asyncio.sleep(0.05))
self.assertIs(type(context['exception']), ZeroDivisionError)
self.assertTrue(context['message'].startswith(
'Exception in callback'))
if debug:
tb = context['source_traceback']
self.assertEqual(tb[-1 + stack_adj].name, 'run_test')
else:
self.assertFalse('source_traceback' in context)
del context
for debug in (True, False):
for meth_name, meth, stack_adj in (
('call_soon',
self.loop.call_soon, 0),
('call_later', # `-1` accounts for lambda
lambda *args: self.loop.call_later(0.01, *args), -1)
):
with self.subTest(debug=debug, meth_name=meth_name):
run_test(debug, meth, stack_adj)
def test_now_update(self):
async def run():
st = self.loop.time()
time.sleep(0.05)
return self.loop.time() - st
delta = self.loop.run_until_complete(run())
self.assertTrue(delta > 0.049 and delta < 0.6)
def test_call_later_1(self):
calls = []
def cb(inc=10, stop=False):
calls.append(inc)
self.assertTrue(self.loop.is_running())
if stop:
self.loop.call_soon(self.loop.stop)
self.loop.call_later(0.05, cb)
# canceled right away
h = self.loop.call_later(0.05, cb, 100, True)
self.assertIn('.cb', repr(h))
h.cancel()
self.assertIn('cancelled', repr(h))
self.loop.call_later(0.05, cb, 1, True)
self.loop.call_later(1000, cb, 1000) # shouldn't be called
started = time.monotonic()
self.loop.run_forever()
finished = time.monotonic()
self.assertEqual(calls, [10, 1])
self.assertFalse(self.loop.is_running())
self.assertLess(finished - started, 0.3)
self.assertGreater(finished - started, 0.04)
def test_call_later_2(self):
# Test that loop.call_later triggers an update of
# libuv cached time.
async def main():
await asyncio.sleep(0.001)
time.sleep(0.01)
await asyncio.sleep(0.01)
started = time.monotonic()
self.loop.run_until_complete(main())
delta = time.monotonic() - started
self.assertGreater(delta, 0.019)
def test_call_later_3(self):
# a memory leak regression test
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_later(0.01, lambda f: f.set_result(None), waiter)
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_later_4(self):
# a memory leak regression test
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_later(0.01, lambda f=waiter: f.set_result(None))
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_later_negative(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop.stop()
self.loop.call_later(-1, cb, 'a')
self.loop.run_forever()
self.assertEqual(calls, ['a'])
def test_call_later_rounding(self):
# Refs #233, call_later() and call_at() shouldn't call cb early
def cb():
self.loop.stop()
for i in range(8):
self.loop.call_later(0.06 + 0.01, cb) # 0.06999999999999999
started = int(round(self.loop.time() * 1000))
self.loop.run_forever()
finished = int(round(self.loop.time() * 1000))
self.assertGreaterEqual(finished - started, 69)
def test_call_at(self):
if (os.environ.get('TRAVIS_OS_NAME')
or os.environ.get('GITHUB_WORKFLOW')):
# Time seems to be really unpredictable on Travis.
raise unittest.SkipTest('time is not monotonic on CI')
i = 0
def cb(inc):
nonlocal i
i += inc
self.loop.stop()
at = self.loop.time() + 0.05
self.loop.call_at(at, cb, 100).cancel()
self.loop.call_at(at, cb, 10)
started = time.monotonic()
self.loop.run_forever()
finished = time.monotonic()
self.assertEqual(i, 10)
self.assertLess(finished - started, 0.07)
self.assertGreater(finished - started, 0.045)
def test_check_thread(self):
def check_thread(loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an "
"event loop other than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = self.new_loop()
try:
asyncio.set_event_loop(loop2)
check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = asyncio.Future(loop=loop)
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test_run_once_in_executor_plain(self):
called = []
def cb(arg):
called.append(arg)
async def runner():
await self.loop.run_in_executor(None, cb, 'a')
self.loop.run_until_complete(runner())
self.assertEqual(called, ['a'])
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
def test_run_until_complete_type_error(self):
self.assertRaises(
TypeError, self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = asyncio.Future()
other_loop = self.new_loop()
self.addCleanup(other_loop.close)
self.assertRaises(
ValueError, other_loop.run_until_complete, task)
def test_run_until_complete_error(self):
async def foo():
raise ValueError('aaa')
with self.assertRaisesRegex(ValueError, 'aaa'):
self.loop.run_until_complete(foo())
def test_run_until_complete_loop_orphan_future_close_loop(self):
async def foo(delay):
await asyncio.sleep(delay)
def throw():
raise KeyboardInterrupt
self.loop.call_soon(throw)
try:
self.loop.run_until_complete(foo(0.1))
except KeyboardInterrupt:
pass
# This call fails if run_until_complete does not clean up
# done-callback for the previous future.
self.loop.run_until_complete(foo(0.2))
def test_run_until_complete_keyboard_interrupt(self):
# Issue #336: run_until_complete() must not schedule a pending
# call to stop() if the future raised a KeyboardInterrupt
async def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
with self.assertRaises(KeyboardInterrupt):
self.loop.run_until_complete(raise_keyboard_interrupt())
def func():
self.loop.stop()
func.called = True
func.called = False
self.loop.call_later(0.01, func)
self.loop.run_forever()
self.assertTrue(func.called)
def test_debug_slow_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
self.loop.call_soon(lambda: time.sleep(0.3))
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(asyncio.sleep(0))
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
self.assertIn('Executing <Handle', msg)
self.assertIn('test_debug_slow_callbacks', msg)
def test_debug_slow_timer_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
self.loop.call_later(0.01, lambda: time.sleep(0.3))
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(asyncio.sleep(0.02))
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
self.assertIn('Executing <TimerHandle', msg)
self.assertIn('test_debug_slow_timer_callbacks', msg)
def test_debug_slow_task_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
async def foo():
time.sleep(0.3)
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(foo())
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
self.assertIn('Executing <Task finished', msg)
self.assertIn('test_debug_slow_task_callbacks', msg)
def test_default_exc_handler_callback(self):
self.loop.set_exception_handler(None)
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1 / 0
logger = logging.getLogger('asyncio')
# Test call_soon (events.Handle)
with mock.patch.object(logger, 'error') as log:
fut = asyncio.Future()
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
# Test call_later (events.TimerHandle)
with mock.patch.object(logger, 'error') as log:
fut = asyncio.Future()
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
def test_set_exc_handler_custom(self):
self.loop.set_exception_handler(None)
logger = logging.getLogger('asyncio')
def run_loop():
def zero_error():
self.loop.stop()
1 / 0
self.loop.call_soon(zero_error)
self.loop.run_forever()
errors = []
def handler(loop, exc):
errors.append(exc)
self.loop.set_debug(True)
self.assertIsNone(self.loop.get_exception_handler())
self.loop.set_exception_handler(handler)
if hasattr(self.loop, 'get_exception_handler'):
self.assertIs(self.loop.get_exception_handler(), handler)
run_loop()
self.assertEqual(len(errors), 1)
self.assertRegex(errors[-1]['message'],
'Exception in callback.*zero_error')
self.loop.set_exception_handler(None)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
self.assertEqual(len(errors), 1)
def test_set_exc_handler_broken(self):
logger = logging.getLogger('asyncio')
def run_loop():
def zero_error():
self.loop.stop()
1 / 0
self.loop.call_soon(zero_error)
self.loop.run_forever()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Unhandled error in exception handler'),
exc_info=mock.ANY)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError,
'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
async def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def test_set_task_name(self):
if self.implementation == 'asyncio' and sys.version_info < (3, 8, 0):
raise unittest.SkipTest('unsupported task name')
self.loop._process_events = mock.Mock()
result = None
class MyTask(asyncio.Task):
def set_name(self, name):
nonlocal result
result = name + "!"
def get_name(self):
return result
async def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro(), name="mytask")
self.assertFalse(isinstance(task, MyTask))
if sys.version_info >= (3, 8, 0):
self.assertEqual(task.get_name(), "mytask")
self.loop.run_until_complete(task)
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro(), name="mytask")
self.assertTrue(isinstance(task, MyTask))
self.assertEqual(result, "mytask!")
self.assertEqual(task.get_name(), "mytask!")
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
def test_shutdown_asyncgens_01(self):
finalized = list()
if not hasattr(self.loop, 'shutdown_asyncgens'):
raise unittest.SkipTest()
async def waiter(timeout, finalized):
try:
await asyncio.sleep(timeout)
yield 1
finally:
await asyncio.sleep(0)
finalized.append(1)
async def wait():
async for _ in waiter(1, finalized):
pass
t1 = self.loop.create_task(wait())
t2 = self.loop.create_task(wait())
self.loop.run_until_complete(asyncio.sleep(0.1))
t1.cancel()
t2.cancel()
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.assertEqual(finalized, [1, 1])
for t in {t1, t2}:
try:
self.loop.run_until_complete(t)
except asyncio.CancelledError:
pass
def test_shutdown_asyncgens_02(self):
if not hasattr(self.loop, 'shutdown_asyncgens'):
raise unittest.SkipTest()
logged = 0
def logger(loop, context):
nonlocal logged
expected = 'an error occurred during closing of asynchronous'
if expected in context['message']:
self.assertIn('asyncgen', context)
logged += 1
async def waiter(timeout):
try:
await asyncio.sleep(timeout)
yield 1
finally:
1 / 0
async def wait():
async for _ in waiter(1):
pass
t = self.loop.create_task(wait())
self.loop.run_until_complete(asyncio.sleep(0.1))
self.loop.set_exception_handler(logger)
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.assertEqual(logged, 1)
# Silence warnings
t.cancel()
self.loop.run_until_complete(asyncio.sleep(0.1))
def test_shutdown_asyncgens_03(self):
if not hasattr(self.loop, 'shutdown_asyncgens'):
raise unittest.SkipTest()
async def waiter():
yield 1
yield 2
async def foo():
# We specifically want to hit _asyncgen_finalizer_hook
# method.
await waiter().asend(None)
self.loop.run_until_complete(foo())
self.loop.run_until_complete(asyncio.sleep(0.01))
def test_inf_wait_for(self):
async def foo():
await asyncio.sleep(0.1)
return 123
res = self.loop.run_until_complete(
asyncio.wait_for(foo(), timeout=float('inf')))
self.assertEqual(res, 123)
def test_shutdown_default_executor(self):
if not hasattr(self.loop, "shutdown_default_executor"):
raise unittest.SkipTest()
async def foo():
await self.loop.run_in_executor(None, time.sleep, .1)
self.loop.run_until_complete(foo())
self.loop.run_until_complete(
self.loop.shutdown_default_executor())
def test_call_soon_threadsafe_safety(self):
ITERATIONS = 4096
counter = [0]
def cb():
counter[0] += 1
if counter[0] < ITERATIONS - 512:
h = self.loop.call_later(0.01, lambda: None)
self.loop.call_later(
0.0005 + random.random() * 0.0005, h.cancel
)
def scheduler():
loop = self.loop
for i in range(ITERATIONS):
if loop.is_running():
loop.call_soon_threadsafe(cb)
time.sleep(0.001)
loop.call_soon_threadsafe(loop.stop)
thread = threading.Thread(target=scheduler)
self.loop.call_soon(thread.start)
self.loop.run_forever()
thread.join()
self.assertEqual(counter[0], ITERATIONS)
class TestBaseUV(_TestBase, UVTestCase):
def test_loop_create_future(self):
fut = self.loop.create_future()
self.assertTrue(isinstance(fut, asyncio.Future))
self.assertIs(fut._loop, self.loop)
fut.cancel()
def test_loop_call_soon_handle_cancelled(self):
cb = lambda: False # NoQA
handle = self.loop.call_soon(cb)
self.assertFalse(handle.cancelled())
handle.cancel()
self.assertTrue(handle.cancelled())
handle = self.loop.call_soon(cb)
self.assertFalse(handle.cancelled())
self.run_loop_briefly()
self.assertFalse(handle.cancelled())
def test_loop_call_later_handle_cancelled(self):
cb = lambda: False # NoQA
handle = self.loop.call_later(0.01, cb)
self.assertFalse(handle.cancelled())
handle.cancel()
self.assertTrue(handle.cancelled())
handle = self.loop.call_later(0.01, cb)
self.assertFalse(handle.cancelled())
self.run_loop_briefly(delay=0.05)
self.assertFalse(handle.cancelled())
def test_loop_std_files_cloexec(self):
# See https://github.com/MagicStack/uvloop/issues/40 for details.
for fd in {0, 1, 2}:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
self.assertFalse(flags & fcntl.FD_CLOEXEC)
def test_default_exc_handler_broken(self):
logger = logging.getLogger('asyncio')
_context = None
class Loop(uvloop.Loop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
self.addCleanup(lambda: asyncio.set_event_loop(None))
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
loop.stop()
1 / 0
loop.call_soon(zero_error)
loop.run_forever()
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
def test_big_call_later_timeout(self):
OK, NOT_OK = 0, 0
async def sleep(delay_name, delay):
nonlocal OK, NOT_OK
try:
await asyncio.sleep(delay)
except asyncio.CancelledError:
OK += 1
except Exception:
NOT_OK += 1
async def main():
tests = [
sleep("infinity", float("inf")),
sleep("sys.maxsize", float(sys.maxsize)),
sleep("sys.maxsize", sys.maxsize),
sleep("2**55", 2**55),
sleep("2**54", 2**54),
]
tasks = [self.loop.create_task(test) for test in tests]
await asyncio.sleep(0.1)
for task in tasks:
task.cancel()
await task
self.loop.run_until_complete(main())
self.assertEqual(OK, 5)
self.assertEqual(NOT_OK, 0)
def test_loop_call_later_handle_when(self):
cb = lambda: False # NoQA
delay = 1.0
loop_t = self.loop.time()
handle = self.loop.call_later(delay, cb)
self.assertAlmostEqual(handle.when(), loop_t + delay, places=2)
handle.cancel()
self.assertTrue(handle.cancelled())
class TestBaseAIO(_TestBase, AIOTestCase):
pass
class TestPolicy(unittest.TestCase):
def test_uvloop_policy(self):
try:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.new_event_loop()
try:
self.assertIsInstance(loop, uvloop.Loop)
finally:
loop.close()
finally:
asyncio.set_event_loop_policy(None)
@unittest.skipUnless(hasattr(asyncio, '_get_running_loop'),
'No asyncio._get_running_loop')
def test_running_loop_within_a_loop(self):
async def runner(loop):
loop.run_forever()
try:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.new_event_loop()
outer_loop = asyncio.new_event_loop()
try:
with self.assertRaisesRegex(RuntimeError,
'while another loop is running'):
outer_loop.run_until_complete(runner(loop))
finally:
loop.close()
outer_loop.close()
finally:
asyncio.set_event_loop_policy(None)
@unittest.skipUnless(hasattr(asyncio, '_get_running_loop'),
'No asyncio._get_running_loop')
def test_get_event_loop_returns_running_loop(self):
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise NotImplementedError
loop = None
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = uvloop.new_event_loop()
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
self.assertIs(asyncio._get_running_loop(), None)
|
detect_cup.py | import cv2
import time
#from espeak import espeak
from subprocess import call
import random
import numpy as np
import threading
import requests
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
roi = (275, 309, 39 + 275, 53 + 309)
# hsv values
green_thresh_low = (40, 50, 0)
green_thresh_high = (65, 255, 255)
blue_thresh_low = (80, 50, 0)
blue_thresh_high = (135, 255, 255)
thresh_percent = 0.3
has_green_cup = False
has_blue_cup = False
last_cup_time = time.time()
last_cup_off_time = last_cup_time
did_say_thanks = True
cup_timeout = 10
spasibo_timeout = 2
alert = threading.Event()
shutdown = threading.Event()
led_address = "http://192.168.0.95/"
delay = 0.5
def speak(text):
cmd1 = "espeak"
cmd2 = " -vru+f4"#+str(random.randint(1,5))
cmd3 = " -p60"#+str(random.randint(30,90))
cmd4 = " -s"+str(random.randint(40,160))
cmd5 = " -g"+str(random.randint(0,30))
cmd6 = " -x -a150 \""
cmd7 = "\" 2>/dev/null"
cmd = cmd1 + cmd2 + cmd3 + cmd4 + cmd5 + cmd6 + text + cmd7
call([cmd], shell=True)
def update_state(green_cup, blue_cup):
global has_green_cup
global has_blue_cup
did_update_state = False
if green_cup != has_green_cup:
has_green_cup = green_cup
did_update_state = True
if blue_cup != has_blue_cup:
has_blue_cup = blue_cup
did_update_state = True
if did_update_state:
if has_green_cup:
print "Green cup detected"
elif has_blue_cup:
print "Blue cup detected"
else:
print "No cup detected"
def turn_led_red():
return requests.post(led_address+"red")
def turn_led_off():
return requests.post(led_address+"off")
def led_blink():
while not shutdown.is_set():
if alert.is_set():
turn_led_red()
time.sleep(delay)
turn_led_off()
time.sleep(delay)
led_thread = threading.Thread(target=led_blink, name="Led blink thread")
led_thread.start()
try:
while True:
_, image_frame = cap.read()
rect_img = image_frame[roi[1]:roi[3], roi[0]: roi[2]]
rect_img = cv2.cvtColor(rect_img, cv2.COLOR_BGR2HSV)
green_cup = cv2.inRange(rect_img, green_thresh_low, green_thresh_high)
blue_cup = cv2.inRange(rect_img, blue_thresh_low, blue_thresh_high)
#green_cnt = 0
#for line in green_cup:
# for pixel in line:
# if pixel > 0:
# green_cnt += 1
#blue_cnt = 0
#for line in blue_cup:
# for pixel in line:
# if pixel > 0:
# blue_cnt += 1
green_cnt = np.count_nonzero(green_cup)
blue_cnt = np.count_nonzero(blue_cup)
pixels_total = len(green_cup) * len(green_cup[0])
green_pixels = float(green_cnt) / pixels_total
blue_pixels = float(blue_cnt) / pixels_total
if green_pixels > thresh_percent:
update_state(True, False)
# has_green_cup = True
# has_blue_cup = False
elif blue_pixels > thresh_percent:
update_state(False, True)
# has_green_cup = False
# has_blue_cup = True
else:
update_state(False, False)
# has_green_cup = False
# has_blue_cup = False
if has_blue_cup or has_green_cup:
last_cup_time = time.time()
if (last_cup_time - last_cup_off_time > spasibo_timeout) and not did_say_thanks:
#espeak.synth('Spasibo')
speak("[[spas;'ibV_]]")
did_say_thanks = True
alert.clear()
else:
current_time = time.time()
if current_time - last_cup_time > cup_timeout:
#espeak.set_parameter(espeak.Parameter.Rate, 150)
#espeak.set_parameter(espeak.Parameter.Pitch, 50)
#espeak.set_parameter(espeak.Parameter.Volume, 1000)
#espeak.set_voice('ru')
#espeak.synth("[[v;ern'I_ stAk'Vn__]]")
speak("[[v;ern'I stAk'Vn_]]")
last_cup_time = current_time
last_cup_off_time = current_time
did_say_thanks = False
alert.set()
# if has_green_cup:
# print "Green cup detected"
# elif has_blue_cup:
# print "Blue cup detected"
# else:
# print "No cup detected"
finally:
shutdown.set()
cap.release()
|
audible.py |
# Python imports
import threading
import xlsxwriter
import json, time
import pandas as pd
from numpy import nan
import openpyxl as op
from bs4 import BeautifulSoup
from selenium import webdriver
book_prefix = {
'Australia':'https://www.amazon.com.au',
'Brazil':'https://www.amazon.com.br',
'Canada':'https://www.amazon.ca',
'China':'https://www.amazon.cn',
'France':'https://www.amazon.fr',
'Germany':'https://www.amazon.de',
'India':'https://www.amazon.in',
'Italy':'https://www.amazon.it',
'Japan':'https://www.amazon.co.jp',
'Mexico':'https://www.amazon.com.mx',
'Netherlands':'https://www.amazon.nl',
'Ploand':'https://www.amazon.pl',
# 'Saudi Arabia':'https://www.amazon.',
'Singapore':'https://www.amazon.sg',
'Spain':'https://www.amazon.es',
'Sweden':'https://www.amazon.se',
'Turkey':'https://www.amazon.tr',
'United Arab Emirates':'https://www.amazon.ae',
'United States' : 'https://www.amazon.com',
'United Kingdom': 'https://www.amazon.co.uk'
}
class audible:
count = 0
country = ''
sub_level = 0
sub_names = {}
categories = []
book_number = 50
work_done = list()
audible_filename = ''
audible_categories = {}
data_fields = ['category', 'subcat-1', 'subcat-2', 'subcat-3', 'subcat-4']
def __init__(self, country):
self.country = country
self.audible_filename = f'../data/audible_{country}.xlsx'
setting = pd.read_excel('settings.xlsx', 'settings')
datas = setting['audible-data-fields']
cats = setting['audible-categories']
self.book_number = int(setting['book-number'][0])
for i in datas.index:
if datas[i] is not nan: self.data_fields.append(datas[i])
for i in cats.index:
if cats[i] is not nan: self.categories.append(cats[i])
self.audible_categories = self.update_category_list(f'../category_list/{country}.json')
print(country)
print(self.audible_categories)
def update_category_list(self, filename):
with open(filename, 'r+') as read_file:
return json.load(read_file)
def scrape_category(self):
for cat in self.categories:
self.create_excel_file(cat, self.audible_filename)
break
for category in self.categories: # Create new thread for category
if category in self.audible_categories:
browser = webdriver.Chrome('../../chromedriver.exe')
self.work_done.append(False)
self.sub_level = 1
self.sub_names = {'category': category, 'subcat-1' : 'null', 'subcat-2' : 'null', 'subcat-3' : 'null', 'subcat-4' : 'null'}
workbook = op.load_workbook(self.audible_filename, False)
try:
workbook[category]
except:
workbook.create_sheet(category)
worksheet = workbook[category]
worksheet.append(self.headers())
workbook.save(self.audible_filename)
workbook.close()
try:
subcategories = self.audible_categories[ category ]
t = threading.Thread(target=self.intermediate, args=(self.count, browser, subcategories, ))
t.start()
# t.join()
except:
print ("Error: unable to start new thread")
self.count += 1
def intermediate(self, count, browser, subcategories):
self.helper_category_books(browser, subcategories)
print('\n\n Category Finished -------\n\n')
self.work_done[count] = True
browser.quit()
def helper_category_books(self, browser, subcategories):
for subcat in subcategories:
if not type(subcategories[subcat]) is dict:
self.update_subnames(subcat)
try: # Create new thread for category
t = threading.Thread(target=self.category_books, args=(browser, subcategories[subcat], ))
t.start()
t.join()
except:
print ("Error: unable to start new thread")
else:
self.update_subnames(subcat)
self.sub_level += 1
self.helper_category_books(browser, subcategories[subcat])
self.update_subnames('null')
self.sub_level -= 1
def update_subnames(self, subcat):
print('\n' ,self.sub_level, ' - ', self.sub_names)
if self.sub_level == 1: self.sub_names['subcat-1'] = subcat
if self.sub_level == 2: self.sub_names['subcat-2'] = subcat
if self.sub_level == 3: self.sub_names['subcat-3'] = subcat
if self.sub_level == 4: self.sub_names['subcat-4'] = subcat
def category_books(self, browser, link):
books = [] # for book-data
browser.set_window_position(500,0)
try:
browser.get(link)
except:
print('Failed to Load')
return False
source = browser.page_source
soup = BeautifulSoup(source, features='lxml')
book_sections = soup.find_all('div', attrs={'class':'a-section a-spacing-none aok-relative'})
book_count = 1
for book in book_sections:
book_count += 1
a_tags = book.find_all('a', attrs={'class':'a-link-normal'})
book_details_link = book_prefix[self.country] + a_tags[0]['href']
try:
browser.get(book_details_link)
source = browser.page_source
soup = BeautifulSoup(source, features='lxml')
title = soup.find('span', attrs={'id':'productTitle'}).get_text().strip('\n')
span = soup.find_all('span', attrs={'class', 'author notFaded'})
author = span[0].find('a', attrs={'class':'a-link-normal'}).get_text()
rating = soup.find('span', attrs={'id':'acrCustomerReviewText'}).get_text().replace(' ratings', '')
stars = soup.find('span', attrs={'class': 'reviewCountTextLinkedHistogram noUnderline'})['title'].replace(' out of 5 stars', '')
table = soup.find('table', attrs={'class':'a-keyvalue a-vertical-stripes a-span6'})
table = table.find('tbody')
tr_tags = table.find_all('tr')
tr_list = []
for i in range(len(tr_tags)-1):
tr_list.append(tr_tags[i])
details = {}
for name in self.sub_names: details[name] = self.sub_names[name]
if 'Title' in self.data_fields: details['Title'] = title
if 'Web-Link' in self.data_fields: details['Web-Link'] = book_details_link
if 'Author' in self.data_fields: details['Author'] = author
if 'Ratings' in self.data_fields: details['Ratings'] = rating
if 'Stars' in self.data_fields: details['Stars'] = stars
for tr in tr_list:
span = tr.find('th')
span = span.find('span')
heading = span.get_text()
data = tr.find('td')
try: data = data.find('span').get_text()
except: data = data.find('a').get_text()
details[heading] = data
bst_heading = table.find('th', attrs={'class':'a-color-secondary a-size-base prodDetSectionEntry'}).get_text().replace('\n','')
bst_tr = None
for tr in tr_tags: bst_tr = tr
td = bst_tr.find('td')
span = td.find('span')
spans = span.find_all('span')
bst_data = []
for span in spans:
try: bst_data.append(span.get_text().split('(')[0])
except: bst_data.append(span.get_text())
details[bst_heading] = bst_data
# print( 'A-', self.count, '. ', details, '\n')
print(self.count, end=" ")
books.append(details)
except:
continue
if book_count > self.book_number: break
# Saving in Excel File
self.write_to_excel(self.audible_filename, books)
def create_excel_file(self, category_name, filename):
# creating new excle file
workbook = xlsxwriter.Workbook(filename)
workbook.add_worksheet(category_name)
workbook.close()
workbook = op.load_workbook(filename, False)
worksheet = workbook[category_name]
worksheet.append(self.headers())
workbook.save('../data/' + filename)
workbook.close()
def write_to_excel(self, filename, books=[]):
workbook = op.load_workbook(filename, False)
worksheet = workbook[self.sub_names['category']]
for book in books:
data = []
for data_field in self.data_fields:
try:
if data_field == 'Best Sellers Rank':
for v in book[data_field]: data.append(v)
else: data.append(book[data_field])
except: data.append('N/A')
worksheet.append(data)
workbook.save(filename)
workbook.close()
def headers(self):
header = []
for data_field in self.data_fields:
header.append(data_field)
return header
def selected_countries():
countries = pd.read_excel('../category_list/countries.xlsx', 'countries')
datas = countries['countries']
country_list = []
for i in datas.index:
if datas[i] is not nan:
country_list.append(datas[i])
return country_list
def main():
countries = selected_countries()
print(countries)
for country in countries:
try:
audi = audible(country)
audi.scrape_category()
done = False
while not done:
time.sleep(5)
# print('Work Done - ', audi.work_done)
for val in audi.work_done:
if not val:
done = False
break
else: done = True
except: print(country, '-List Not Found')
if __name__ == '__main__':
main()
|
start_sploit.py | #!/usr/bin/env python3
import argparse
import binascii
import itertools
import json
import logging
import os
import random
import re
import stat
import subprocess
import sys
import time
import threading
from concurrent.futures import ThreadPoolExecutor
from enum import Enum
from math import ceil
from urllib.parse import urljoin
from urllib.request import Request, urlopen
if sys.version_info < (3, 4):
logging.critical('Support of Python < 3.4 is not implemented yet')
sys.exit(1)
os_windows = (os.name == 'nt')
HEADER = '''
____ _ _ _ _____
| _ \ ___ ___| |_ _ __ _ _ ___| |_(_)_ _____ | ___|_ _ _ __ _ __ ___
| | | |/ _ \/ __| __| '__| | | |/ __| __| \ \ / / _ \ | |_ / _` | '__| '_ ` _ `
| |_| | __/\__ \ |_| | | |_| | (__| |_| |\ V / __/ | _| (_| | | | | | | |
|____/ \___||___/\__|_| \__,_|\___|\__|_| \_/ \___| |_| \__,_|_| |_| |_| |_
Note that this software is highly destructive. Keep it away from children.
'''[1:]
class Style(Enum):
"""
Bash escape sequences, see:
https://misc.flogisoft.com/bash/tip_colors_and_formatting
"""
BOLD = 1
FG_BLACK = 30
FG_RED = 31
FG_GREEN = 32
FG_YELLOW = 33
FG_BLUE = 34
FG_MAGENTA = 35
FG_CYAN = 36
FG_LIGHT_GRAY = 37
BRIGHT_COLORS = [Style.FG_RED, Style.FG_GREEN, Style.FG_BLUE,
Style.FG_MAGENTA, Style.FG_CYAN]
def highlight(text, style=None):
if os_windows:
return text
if style is None:
style = [Style.BOLD, random.choice(BRIGHT_COLORS)]
return '\033[{}m'.format(';'.join(str(item.value) for item in style)) + text + '\033[0m'
log_format = '%(asctime)s {} %(message)s'.format(highlight('%(levelname)s', [Style.FG_YELLOW]))
logging.basicConfig(format=log_format, datefmt='%H:%M:%S', level=logging.DEBUG)
def parse_args():
parser = argparse.ArgumentParser(description='Run a sploit on all teams in a loop',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('sploit',
help="Sploit executable (should take a victim's host as the first argument)")
parser.add_argument('--server-url', metavar='URL', default='http://farm.kolambda.com:5000',
help='Server URL')
parser.add_argument('--token', metavar='TOKEN', default='',
help='Farm authorization token')
parser.add_argument('--interpreter', metavar='COMMAND',
help='Explicitly specify sploit interpreter (use on Windows, which doesn\'t '
'understand shebangs)')
parser.add_argument('--pool-size', metavar='N', type=int, default=50,
help='Maximal number of concurrent sploit instances. '
'Too little value will make time limits for sploits smaller, '
'too big will eat all RAM on your computer')
parser.add_argument('--attack-period', metavar='N', type=float, default=120,
help='Rerun the sploit on all teams each N seconds '
'Too little value will make time limits for sploits smaller, '
'too big will miss flags from some rounds')
parser.add_argument('-v', '--verbose-attacks', metavar='N', type=int, default=1,
help="Sploits' outputs and found flags will be shown for the N first attacks")
group = parser.add_mutually_exclusive_group()
group.add_argument('--not-per-team', action='store_true',
help='Run a single instance of the sploit instead of an instance per team')
group.add_argument('--distribute', metavar='K/N',
help='Divide the team list to N parts (by address hash modulo N) '
'and run the sploits only on Kth part of it (K >= 1)')
return parser.parse_args()
def fix_args(args):
check_sploit(args)
if '://' not in args.server_url:
args.server_url = 'http://' + args.server_url
if args.distribute is not None:
valid = False
match = re.fullmatch(r'(\d+)/(\d+)', args.distribute)
if match is not None:
k, n = (int(match.group(1)), int(match.group(2)))
if n >= 2 and 1 <= k <= n:
args.distribute = k, n
valid = True
if not valid:
raise ValueError('Wrong syntax for --distribute, use --distribute K/N (N >= 2, 1 <= K <= N)')
SCRIPT_EXTENSIONS = {
'.pl': 'perl',
'.py': 'python',
'.rb': 'ruby',
}
def check_script_source(source):
errors = []
if not os_windows and source[:2] != '#!':
errors.append(
'Please use shebang (e.g. {}) as the first line of your script'.format(
highlight('#!/usr/bin/env python3', [Style.FG_GREEN])))
if re.search(r'flush[(=]', source) is None:
errors.append(
'Please print the newline and call {} each time after your sploit outputs flags. '
'In Python 3, you can use {}. '
'Otherwise, the flags may be lost (if the sploit process is killed) or '
'sent with a delay.'.format(
highlight('flush()', [Style.FG_RED]),
highlight('print(..., flush=True)', [Style.FG_GREEN])))
return errors
class InvalidSploitError(Exception):
pass
def check_sploit(args):
path = args.sploit
if not os.path.isfile(path):
raise ValueError('No such file: {}'.format(path))
extension = os.path.splitext(path)[1].lower()
is_script = extension in SCRIPT_EXTENSIONS
if is_script:
with open(path, 'r', errors='ignore') as f:
source = f.read()
errors = check_script_source(source)
if errors:
for message in errors:
logging.error(message)
raise InvalidSploitError('Sploit won\'t be run because of validation errors')
if os_windows and args.interpreter is None:
args.interpreter = SCRIPT_EXTENSIONS[extension]
logging.info('Using interpreter `{}`'.format(args.interpreter))
if not os_windows:
file_mode = os.stat(path).st_mode
# TODO: May be check the owner and other X flags properly?
if not file_mode & stat.S_IXUSR:
if is_script:
logging.info('Setting the executable bit on `{}`'.format(path))
os.chmod(path, file_mode | stat.S_IXUSR)
else:
raise InvalidSploitError("The provided file doesn't appear to be executable")
if os_windows:
# By default, Ctrl+C does not work on Windows if we spawn subprocesses.
# Here we fix that using WinApi. See https://stackoverflow.com/a/43095532
import signal
import ctypes
from ctypes import wintypes
kernel32 = ctypes.WinDLL("kernel32", use_last_error=True)
# BOOL WINAPI HandlerRoutine(
# _In_ DWORD dwCtrlType
# );
PHANDLER_ROUTINE = ctypes.WINFUNCTYPE(wintypes.BOOL, wintypes.DWORD)
win_ignore_ctrl_c = PHANDLER_ROUTINE() # = NULL
def _errcheck_bool(result, _, args):
if not result:
raise ctypes.WinError(ctypes.get_last_error())
return args
# BOOL WINAPI SetConsoleCtrlHandler(
# _In_opt_ PHANDLER_ROUTINE HandlerRoutine,
# _In_ BOOL Add
# );
kernel32.SetConsoleCtrlHandler.errcheck = _errcheck_bool
kernel32.SetConsoleCtrlHandler.argtypes = (PHANDLER_ROUTINE, wintypes.BOOL)
@PHANDLER_ROUTINE
def win_ctrl_handler(dwCtrlType):
if dwCtrlType == signal.CTRL_C_EVENT:
kernel32.SetConsoleCtrlHandler(win_ignore_ctrl_c, True)
shutdown()
return False
kernel32.SetConsoleCtrlHandler(win_ctrl_handler, True)
class APIException(Exception):
pass
SERVER_TIMEOUT = 5
def get_config(args):
req = Request(urljoin(args.server_url, '/api/get_config'))
if args.token:
req.add_header('X-Token', args.token)
with urlopen(req, timeout=SERVER_TIMEOUT) as conn:
if conn.status != 200:
raise APIException(conn.read())
return json.loads(conn.read().decode())
def post_flags(args, flags):
sploit_name = os.path.basename(args.sploit)
data = [{'flag': item['flag'], 'sploit': sploit_name, 'team': item['team']}
for item in flags]
req = Request(urljoin(args.server_url, '/api/post_flags'))
req.add_header('Content-Type', 'application/json')
if args.token:
req.add_header('X-Token', args.token)
with urlopen(req, data=json.dumps(data).encode(), timeout=SERVER_TIMEOUT) as conn:
if conn.status != 200:
raise APIException(conn.read())
exit_event = threading.Event()
def once_in_a_period(period):
for iter_no in itertools.count(1):
start_time = time.time()
yield iter_no
time_spent = time.time() - start_time
if period > time_spent:
exit_event.wait(period - time_spent)
if exit_event.is_set():
break
class FlagStorage:
"""
Thread-safe storage comprised of a set and a post queue.
Any number of threads may call add(), but only one "consumer thread"
may call pick_flags() and mark_as_sent().
"""
def __init__(self):
self._flags_seen = set()
self._queue = []
self._lock = threading.RLock()
def add(self, flags, team_name):
with self._lock:
for item in flags:
if item not in self._flags_seen:
self._flags_seen.add(item)
self._queue.append({'flag': item, 'team': team_name})
def pick_flags(self, count):
with self._lock:
return self._queue[:count]
def mark_as_sent(self, count):
with self._lock:
self._queue = self._queue[count:]
@property
def queue_size(self):
with self._lock:
return len(self._queue)
flag_storage = FlagStorage()
POST_PERIOD = 5
POST_FLAG_LIMIT = 10000
# TODO: test that 10k flags won't lead to the hangup of the farm server
def run_post_loop(args):
try:
for _ in once_in_a_period(POST_PERIOD):
flags_to_post = flag_storage.pick_flags(POST_FLAG_LIMIT)
if flags_to_post:
try:
post_flags(args, flags_to_post)
flag_storage.mark_as_sent(len(flags_to_post))
logging.info('{} flags posted to the server ({} in the queue)'.format(
len(flags_to_post), flag_storage.queue_size))
except Exception as e:
logging.error("Can't post flags to the server: {}".format(repr(e)))
logging.info("The flags will be posted next time")
except Exception as e:
logging.critical('Posting loop died: {}'.format(repr(e)))
shutdown()
display_output_lock = threading.RLock()
def display_sploit_output(team_name, output_lines):
if not output_lines:
logging.info('{}: No output from the sploit'.format(team_name))
return
prefix = highlight(team_name + ': ')
with display_output_lock:
print('\n' + '\n'.join(prefix + line.rstrip() for line in output_lines) + '\n')
def process_sploit_output(stream, args, team_name, flag_format, attack_no):
try:
output_lines = []
instance_flags = set()
while True:
line = stream.readline()
if not line:
break
line = line.decode(errors='replace')
output_lines.append(line)
line_flags = set(flag_format.findall(line))
if line_flags:
flag_storage.add(line_flags, team_name)
instance_flags |= line_flags
if attack_no <= args.verbose_attacks and not exit_event.is_set():
# We don't want to spam the terminal on KeyboardInterrupt
display_sploit_output(team_name, output_lines)
if instance_flags:
logging.info('Got {} flags from "{}": {}'.format(
len(instance_flags), team_name, instance_flags))
except Exception as e:
logging.error('Failed to process sploit output: {}'.format(repr(e)))
class InstanceStorage:
"""
Storage comprised of a dictionary of all running sploit instances and some statistics.
Always acquire instance_lock before using this class. Do not release the lock
between actual spawning/killing a process and calling register_start()/register_stop().
"""
def __init__(self):
self._counter = 0
self.instances = {}
self.n_completed = 0
self.n_killed = 0
def register_start(self, process):
instance_id = self._counter
self.instances[instance_id] = process
self._counter += 1
return instance_id
def register_stop(self, instance_id, was_killed):
del self.instances[instance_id]
self.n_completed += 1
self.n_killed += was_killed
instance_storage = InstanceStorage()
instance_lock = threading.RLock()
def launch_sploit(args, team_name, team_addr, attack_no, flag_format):
# For sploits written in Python, this env variable forces the interpreter to flush
# stdout and stderr after each newline. Note that this is not default behavior
# if the sploit's output is redirected to a pipe.
env = os.environ.copy()
env['PYTHONUNBUFFERED'] = '1'
command = [os.path.abspath(args.sploit)]
if args.interpreter is not None:
command = [args.interpreter] + command
if team_addr is not None:
command.append(team_addr)
need_close_fds = (not os_windows)
if os_windows:
# On Windows, we block Ctrl+C handling, spawn the process, and
# then recover the handler. This is the only way to make Ctrl+C
# intercepted by us instead of our child processes.
kernel32.SetConsoleCtrlHandler(win_ignore_ctrl_c, True)
proc = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
bufsize=1, close_fds=need_close_fds, env=env)
if os_windows:
kernel32.SetConsoleCtrlHandler(win_ignore_ctrl_c, False)
threading.Thread(target=lambda: process_sploit_output(
proc.stdout, args, team_name, flag_format, attack_no)).start()
return proc, instance_storage.register_start(proc)
def run_sploit(args, team_name, team_addr, attack_no, max_runtime, flag_format):
try:
with instance_lock:
if exit_event.is_set():
return
proc, instance_id = launch_sploit(args, team_name, team_addr, attack_no, flag_format)
except Exception as e:
if isinstance(e, FileNotFoundError):
logging.error('Sploit file or the interpreter for it not found: {}'.format(repr(e)))
logging.error('Check presence of the sploit file and the shebang (use {} for compatibility)'.format(
highlight('#!/usr/bin/env ...', [Style.FG_GREEN])))
else:
logging.error('Failed to run sploit: {}'.format(repr(e)))
if attack_no == 1:
shutdown()
return
try:
try:
proc.wait(timeout=max_runtime)
need_kill = False
except subprocess.TimeoutExpired:
need_kill = True
if attack_no <= args.verbose_attacks:
logging.warning('Sploit for "{}" ({}) ran out of time'.format(team_name, team_addr))
with instance_lock:
if need_kill:
proc.kill()
instance_storage.register_stop(instance_id, need_kill)
except Exception as e:
logging.error('Failed to finish sploit: {}'.format(repr(e)))
def show_time_limit_info(args, config, max_runtime, attack_no):
if attack_no == 1:
min_attack_period = config['FLAG_LIFETIME'] - config['SUBMIT_PERIOD'] - POST_PERIOD
if args.attack_period >= min_attack_period:
logging.warning("--attack-period should be < {:.1f} sec, "
"otherwise the sploit will not have time "
"to catch flags for each round before their expiration".format(min_attack_period))
logging.info('Time limit for a sploit instance: {:.1f} sec'.format(max_runtime))
with instance_lock:
if instance_storage.n_completed > 0:
# TODO: Maybe better for 10 last attacks
logging.info('Total {:.1f}% of instances ran out of time'.format(
float(instance_storage.n_killed) / instance_storage.n_completed * 100))
PRINTED_TEAM_NAMES = 5
def get_target_teams(args, teams, attack_no):
if args.not_per_team:
return {'*': None}
if args.distribute is not None:
k, n = args.distribute
teams = {name: addr for name, addr in teams.items()
if binascii.crc32(addr.encode()) % n == k - 1}
if teams:
if attack_no <= args.verbose_attacks:
names = sorted(teams.keys())
if len(names) > PRINTED_TEAM_NAMES:
names = names[:PRINTED_TEAM_NAMES] + ['...']
logging.info('Sploit will be run on {} teams: {}'.format(len(teams), ', '.join(names)))
else:
logging.error('There is no teams to attack for this farm client, fix "TEAMS" value '
'in your server config or the usage of --distribute')
return teams
def main(args):
try:
fix_args(args)
except (ValueError, InvalidSploitError) as e:
logging.critical(str(e))
return
print(highlight(HEADER))
logging.info('Connecting to the farm server at {}'.format(args.server_url))
threading.Thread(target=lambda: run_post_loop(args)).start()
config = flag_format = None
pool = ThreadPoolExecutor(max_workers=args.pool_size)
for attack_no in once_in_a_period(args.attack_period):
try:
config = get_config(args)
flag_format = re.compile(config['FLAG_FORMAT'])
except Exception as e:
logging.error("Can't get config from the server: {}".format(repr(e)))
if attack_no == 1:
return
logging.info('Using the old config')
teams = get_target_teams(args, config['TEAMS'], attack_no)
if not teams:
if attack_no == 1:
return
continue
print()
logging.info('Launching an attack #{}'.format(attack_no))
max_runtime = args.attack_period / ceil(len(teams) / args.pool_size)
show_time_limit_info(args, config, max_runtime, attack_no)
for team_name, team_addr in teams.items():
pool.submit(run_sploit, args, team_name, team_addr, attack_no, max_runtime, flag_format)
def shutdown():
# Stop run_post_loop thread
exit_event.set()
# Kill all child processes (so consume_sploit_ouput and run_sploit also will stop)
with instance_lock:
for proc in instance_storage.instances.values():
proc.kill()
if __name__ == '__main__':
try:
main(parse_args())
except KeyboardInterrupt:
logging.info('Got Ctrl+C, shutting down')
finally:
shutdown()
|
utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Common utilities for auto_scheduler. """
from typing import Hashable
import multiprocessing
import multiprocessing.pool
import queue
import signal
import threading
import os
import numpy as np
try:
import psutil
except ImportError:
psutil = None
from tvm import rpc
from tvm.tir import expr
from tvm.tir.transform import Simplify
from tvm.ir.transform import Sequential
from ..te import Tensor, placeholder
def get_func_name(func):
"""Get name of a function.
Parameters
----------
func: Function
The input function.
Returns
-------
name: str
The function name.
"""
return func.func_name if hasattr(func, "func_name") else func.__qualname__
def get_const_int(exp):
"""Verifies expr is integer and get the constant value.
Parameters
----------
exp : Union[tvm.tir.expr, int]
The input expression.
Returns
-------
out_value : int
The output.
"""
if isinstance(exp, int):
return exp
if not isinstance(exp, expr.IntImm):
opt = Sequential([Simplify()])
exp = opt(exp)
if not isinstance(exp, expr.IntImm):
raise ValueError("Expect value to be constant int")
return exp.value
def get_const_tuple(in_tuple):
"""Verifies input tuple is IntImm, returns tuple of int.
Parameters
----------
in_tuple : Tuple[tvm.tir.expr]
The input.
Returns
-------
out_tuple : Tuple[int]
The output.
"""
return tuple(get_const_int(x) for x in in_tuple)
def list_to_tuple(x):
""" Convert a list to a tuple recursively. """
assert isinstance(x, list)
return tuple(list_to_tuple(y) if isinstance(y, list) else y for y in x)
def serialize_args(args):
"""
Serialize arguments of a function to a hashable and jsonable tuple.
Currently this is mainly used for tvm.tensor.Tensor
"""
ret = []
for t in args:
if isinstance(t, Tensor):
t = ("TENSOR", get_const_tuple(t.shape), t.dtype)
elif isinstance(t, list):
t = list_to_tuple(t)
assert isinstance(t, Hashable), str(t) + " is not hashable"
ret.append(t)
return tuple(ret)
def deserialize_args(args):
"""The inverse function of :code:`serialize_args`"""
ret = []
for t in args:
if isinstance(t, (tuple, list)) and t[0] == "TENSOR":
ret.append(placeholder(shape=t[1], dtype=t[2]))
else:
ret.append(t)
return ret
def kill_child_processes(parent_pid, sig=signal.SIGTERM):
"""kill all child processes recursively"""
if not psutil:
raise ImportError("psutil not found, try `pip install psutil` to fix this")
try:
parent = psutil.Process(parent_pid)
except psutil.NoSuchProcess:
return
children = parent.children(recursive=True)
for process in children:
try:
process.send_signal(sig)
except psutil.NoSuchProcess:
return
def _func_wrapper(que, func, args, kwargs):
"""Call function and return the result over the queue."""
if kwargs:
que.put(func(*args, **kwargs))
else:
que.put(func(*args))
def call_func_with_timeout(timeout, func, args=(), kwargs=None):
"""Call a function with timeout"""
que = multiprocessing.Queue(2)
process = multiprocessing.Process(target=_func_wrapper, args=(que, func, args, kwargs))
process.start()
process.join(timeout)
try:
res = que.get(block=False)
except queue.Empty:
res = TimeoutError()
# clean queue and process
kill_child_processes(process.pid)
process.terminate()
process.join()
que.close()
que.join_thread()
del process
del que
return res
def request_remote(device_key, host=None, port=None, priority=1, timeout=60):
"""Request a remote session.
Parameters
----------
device_key : str
The device key of registered device in tracker.
host : Optional[str]
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST".
port : Optional[int]
The port of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT".
priority : int = 1
The priority of this request, larger is more prior.
timeout : int = 60
The timeout of this session in second.
Returns
-------
remote : RPCSession
The connected remote RPCSession.
"""
# connect to the tracker
host = host or os.environ["TVM_TRACKER_HOST"]
port = port or int(os.environ["TVM_TRACKER_PORT"])
tracker = rpc.connect_tracker(host, port)
remote = tracker.request(device_key, priority=priority, session_timeout=timeout)
return remote
def check_remote(device_key, host=None, port=None, priority=100, timeout=10):
"""
Check the availability of a remote device.
Parameters
----------
device_key: str
device key of registered device in tracker.
host: Optional[str]
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST".
port: Optional[int]
The port address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT".
priority: int = 100
The priority of this request, larger is more prior.
timeout: int = 10
The timeout of this check in seconds.
Returns
-------
available: bool
True if can find available device.
"""
def _check():
request_remote(device_key, host, port, priority)
t = threading.Thread(
target=_check,
)
t.start()
t.join(timeout)
return not t.is_alive()
def array_mean(arr):
"""Compute mean of the elments in a TVM Array<PrimExpr>
Parameters
----------
arr: Array
A TVM Array<PrimExpr>
Returns
-------
mean: float
The mean of the elements in the array
"""
return sum(x.value for x in arr) / len(arr)
def to_str_round(x, decimal=6):
"""Convert an object to str and round float numbers
Parameters
----------
x: Union[str, list, int, float, np.ndarray]
The input object
decimal: int
The precision of decimal fraction
Returns
-------
ret: str
The string format of these objects
"""
if isinstance(x, str):
return x
if isinstance(x, (list, tuple, np.ndarray)):
return "[" + ", ".join([to_str_round(y, decimal=decimal) for y in x]) + "]"
if isinstance(x, dict):
return str({k: to_str_round(v) for k, v in x.items()})
if isinstance(x, int):
return str(x)
if isinstance(x, (np.float32, np.float64, float)):
format_str = "%%.%df" % decimal
return format_str % x
raise ValueError("Invalid value: " + str(x) + "\ttype: " + str(type(x)))
|
dbdlnolang.py | '''
Encoding image analyzing errors: Add the numbers below to 8 to encode all types of errors (so status=9...23 is reserved to describe the errors)
- general exception: 1
- bad format: 2
- image too big: 4
- image too small: 8
- any combination of above
'''
import gc
import os
import ssl
import sys
import time
import trio
import uuid
import ujson
import shutil
import tarfile
import pandas as pd
from glob import glob
from uuid import uuid1
from io import BytesIO
from datetime import datetime
from sqlalchemy import create_engine
from configparser import ConfigParser
from PIL import Image, ImageFile, UnidentifiedImageError
from random_user_agent.user_agent import UserAgent
from random_user_agent.params import SoftwareName, OperatingSystem
from multiprocessing import Process, cpu_count
sys.path.append('./crawlingathome-worker/')
import asks
asks.init("trio")
ImageFile.LOAD_TRUNCATED_IMAGES = True # https://stackoverflow.com/a/47958486
ssl_ctx = ssl.create_default_context()
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
def config(filename='database.ini', section='cah_production'):
# create a parser
parser = ConfigParser()
# read config file
parser.read(filename)
# get section, default to postgresql
db = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
db[param[0]] = param[1]
else:
raise Exception('Section {0} not found in the {1} file'.format(section, filename))
return db
class Tracer(trio.abc.Instrument):
def __init__(self):
self.exceptions = 0
self.requests = 0
self.downloads = 0
self.imgproc_duration = 0
self.download_duration = 0
self.error_duration = 0
def task_exited(self, task):
if task.custom_sleep_data is not None:
if task.custom_sleep_data[0] in [1, 3]: # this is exception
self.exceptions += 1
self.error_duration += task.custom_sleep_data[2]
if task.custom_sleep_data[0] == 0: # this is image downloaded
self.download_duration += task.custom_sleep_data[1]
self.imgproc_duration += task.custom_sleep_data[2]
self.downloads += 1
def after_run(self):
rate = round(self.exceptions / (self.exceptions + self.downloads + sys.float_info.epsilon), 2)
avg_download = round(self.download_duration / (self.downloads + sys.float_info.epsilon), 2)
avg_process = round(self.imgproc_duration / (self.downloads + sys.float_info.epsilon), 2)
avg_error = round(self.error_duration / (self.exceptions + sys.float_info.epsilon), 2)
print(f"[instrumentation] While scraping there were {self.exceptions} errors within {self.downloads + self.exceptions} candidates (error rate = {round(rate * 100,2)} %). {self.downloads} images were downloaded.")
print(f"[instrumentation] Cumulative image processing duration {round(self.imgproc_duration, 2)} s.")
print(f"[instrumentation] Average downloading time {avg_download} s/img, image processing time {avg_process} s/img, exceptions processing time {avg_error} s/link")
def log(e):
with open("errors.txt","a") as f:
f.write(str(e.__class__.__name__) + " " + str(e) + "\n")
def process_img_content(response, alt_text, license, sample_id, language, i):
"""
Function to process downloaded image. Use use PIL from pillow-simd
(faster than open cv that in return is faster than original pillow)
input: web request response, ALT text, license and sample id
output: list of image parameters or None if image is rejected
"""
img_output_folder = f"./{i}/save/images/"
error_code = 8
#temp 2 lines
if language == "" or language is None:
language = "en"
def _resize(im: Image):
width, height = im.size
ratio = min(width, height) / 224
new_width = int(round(width/ratio,0))
new_height = int(round(height/ratio,0))
im = im.resize((new_width, new_height), resample=Image.BICUBIC)
if new_width > 224 or new_height > 224:
left = (new_width - 224)/2
top = (new_height - 224)/2
right = (new_width + 224)/2
bottom = (new_height + 224)/2
# Crop the center of the image
im = im.crop((left, top, right, bottom))
return im
try:
# reject too small images
if len(response.content) < 5000:
error_code += 8
img_data = BytesIO(response.content)
with Image.open(img_data) as im:
width, height = im.size
# reject if too large (might be a DOS decompression bomb)
if width * height > 89478484:
error_code += 4
im_format = im.format
out_fname = f"{img_output_folder}{str(sample_id)}.{im_format.lower()}"
# reject if format is not in this list
if im_format not in ["JPEG", "JPG", "PNG", "WEBP"]:
error_code += 2
if min(width, height) > 224:
im = _resize(im)
# convert all images to RGB (necessary for CLIP, also CLIP is doing it again so do we need it here?)
if im.mode != "RGB":
im = im.convert("RGB")
if error_code == 8:
im.save(out_fname) # do not retain images we do not need
except (KeyError, UnidentifiedImageError):
out_fname = ""
width = 0
height = 0
error_code += 1
if error_code == 8:
error_code = 2 # mark succesful lines with status = 2
return [str(sample_id), out_fname, response.url, alt_text, width, height, license, language, error_code]
async def request_image(parsed_df, i):
"""
This function initiates many parallel async connections to try download the images from provided links
input: dataset of validated links, the sample id to start with
output: list of lists with succesfully downloaded images and their parameters. this list is dumped on disk as json file
"""
tmp_data = []
limit = trio.CapacityLimiter(1000)
# change the number of parallel connections based on CPU speed, network capabilities, etc.
# the number of 192 is optimized for 1 vCPU droplet at Hetzner Cloud (code CX11)
session = asks.Session(connections=64, ssl_context=ssl_ctx)
software_names = [SoftwareName.CHROME.value]
operating_systems = [OperatingSystem.LINUX.value]
user_agent_rotator = UserAgent(software_names=software_names, operating_systems=operating_systems, limit=2000)
user_agent = user_agent_rotator.get_random_user_agent()
# try to make the bot website friendly
session.headers = {
"User-Agent": user_agent,
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate",
"Referer": "https://google.com",
"DNT": "1",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
}
async def _request(row, i):
start=time.time()
sample_id = row[0]
url = row[1]
alt_text = row[2]
license = row[3]
language = row[4]
# the following 2 lines are related to Trio Instrument to capture events from multiple threads
task = trio.lowlevel.current_task()
try:
response = await session.get(url, timeout=10, connection_timeout=20)
dltime = round(time.time()-start, 2)
start=time.time()
proces = process_img_content(
# tune timeout and connection_timeout to grab more or less files. shorter timeouts will exclude bad performing websites
response, alt_text, license, sample_id, language, i
)
proctime = round(time.time()-start, 2)
task.custom_sleep_data = (0, dltime, proctime) # for success do not count errors
if proces is not None:
tmp_data.append(proces)
except Exception as e:
log(e)
task.custom_sleep_data = (1, 0, round(time.time()-start,2)) # when exception is hit, count it
async with trio.open_nursery() as n:
for index, row in parsed_df.iterrows():
async with limit:
n.start_soon(_request, row, i)
# trio makes sure at this point all async tasks were executed
with open(f"./{i}/.tmp/{uuid1()}.json", "w") as f:
ujson.dump(tmp_data, f)
gc.collect()
return
def dl_wat(parsed_df, i): # replace valid data and start sampleid with parsed_df
"""
This function initiates download attempt of validated parsed links
It launches multithreaded tasks by using trio module
input: dataset of validated links, the sample id to start with
output: dataframe of downloaded images and their parameters
"""
# Download every image available
processed_samples = []
#trio.run(request_image, valid_data, first_sample_id, instruments=[TrioProgress(len(valid_data), False)] )
trio.run( request_image, parsed_df, i, instruments=[Tracer()] )
for tmpf in glob(f"./{i}/.tmp/*.json"):
processed_samples.extend(ujson.load(open(tmpf)))
return pd.DataFrame(
processed_samples,
columns=["SAMPLE_ID", "PATH", "URL", "TEXT", "HEIGHT", "WIDTH", "LICENSE", "LANGUAGE", "STATUS"],
)
def upload(source: str, clientType: str, target: str):
with tarfile.open(f"{source}.tar.gz", "w:gz") as tar:
tar.add(source, arcname=os.path.basename(source))
result = os.system(f"rsync -av {source}.tar.gz {target}")
if os.path.exists(f"{source}.tar.gz"):
os.remove(f"{source}.tar.gz")
if os.path.exists(f"{source}"):
shutil.rmtree(f"{source}", ignore_errors=True)
return result
def newJob(engine):
# strict selection of distinct domains
#select_stmt1 = "UPDATE dataset SET status = 1 WHERE sampleid IN (SELECT DISTINCT ON (domain) sampleid FROM (SELECT domain, sampleid FROM dataset TABLESAMPLE SYSTEM (0.05) WHERE status = 0 LIMIT 1000000 FOR UPDATE SKIP LOCKED) as \"U\" LIMIT 10000) AND status = 0 RETURNING sampleid"
# selection on domains based on distribution of URLs per domain
select_stmt1 = "UPDATE dataset_nolang SET status = 1 WHERE sampleid IN (SELECT sampleid FROM dataset_nolang TABLESAMPLE SYSTEM (0.05) WHERE status = 0 LIMIT 10000 FOR UPDATE SKIP LOCKED) AND status = 0 RETURNING sampleid"
conn = engine.raw_connection()
cur = conn.cursor()
cur.execute(select_stmt1)
result = cur.fetchall()
conn.commit()
cur.close()
values = ",".join([str(tuple[0]) for tuple in result])
select_stmt2 = "SELECT sampleid, url, text, license, language FROM dataset_nolang WHERE sampleid in ({})".format(values)
#select_stmt2 = "UPDATE dataset_en SET status = 1 WHERE sampleid IN (SELECT sampleid FROM dataset_en TABLESAMPLE SYSTEM (0.1) WHERE status = 0 LIMIT 10000 FOR UPDATE SKIP LOCKED) AND status = 0 RETURNING sampleid, url, text, license, language"
df = pd.read_sql_query(select_stmt2, conn)
conn.close()
return df
def completeJob2(engine, prefix, parsed_df, dlparse_df):
# prepare data for EN
values2 = ",".join(parsed_df["sampleid"].astype(str))
update_stmt1 = ""
for i, row in dlparse_df.iterrows():
update_stmt1 += "UPDATE dataset_nolang SET status={}, width={}, height={} where sampleid = {};".format(row["STATUS"],row["HEIGHT"],row["WIDTH"],row["SAMPLE_ID"])
# this is intentional mix between width and heigth to account for the but in previous laion release
# the csv will go scrambled but in database we want good values
insert_stmt = "INSERT INTO jobs_nolang (jobid) VALUES ('{}')".format(prefix)
if len(dlparse_df.index > 0):
conn = engine.raw_connection()
cur = conn.cursor()
cur.execute(update_stmt1)
cur.execute(insert_stmt)
conn.commit()
cur.close()
conn.close()
# in case there are samples unaccounted for, we try to mark them with general error status
update_stmt2 = "UPDATE dataset_nolang SET status = 9 where status = 1 AND sampleid in ({})".format(values2)
conn = engine.raw_connection()
cur = conn.cursor()
cur.execute(update_stmt2)
conn.commit()
cur.close()
conn.close()
return
def worker(engine, params, i):
# initialize working folders
tmp_folder = f"./{i}/.tmp/"
output_folder = f"./{i}/save/"
img_output_folder = output_folder + "images/"
while True:
try:
start = time.time()
start0 = start
parsed_df = newJob(engine)
prefix = uuid.uuid4().hex
result = 0
# clear working folders for a new job
if os.path.exists(output_folder):
shutil.rmtree(output_folder, ignore_errors=True)
if os.path.exists(tmp_folder):
shutil.rmtree(tmp_folder, ignore_errors=True)
os.makedirs(output_folder)
os.makedirs(img_output_folder)
os.makedirs(tmp_folder)
# compute output file names base
out_fname = f"3_staged_workflow_job_{prefix}_full_wat"
print(f"[{datetime.now().strftime('%H:%M:%S')} stats {i}] Job acquired in {round(time.time()-start,2)} sec")
start = time.time()
print (f"[{datetime.now().strftime('%H:%M:%S')} stats {i}] This job has {len(parsed_df)} candidates")
# attempt to download validated links and save to disk for stats and blocking lists
dlparse_df = dl_wat(parsed_df, i)
dlparse_df_save = dlparse_df[dlparse_df["STATUS"]==2] # remove rejected items from gpu jobs
dlparse_df_save.to_csv(output_folder + out_fname + ".csv", index=False, sep="|")
# at this point we finishes the CPU node job, need to make the data available for GPU worker
os.mkdir(prefix)
os.system(f"mv ./{i}/save/* {prefix}/")
result += upload(prefix, "CPU", "archiveteam@176.9.4.150::gpujobsnolang") #todo find the IP and endpoint
if result == 0:
completeJob2(engine, prefix, parsed_df, dlparse_df)
print (f"[{datetime.now().strftime('%H:%M:%S')} stats {i}] pairs retained {len(dlparse_df_save)} in {round(time.time() - start, 2)}")
print (f"[{datetime.now().strftime('%H:%M:%S')} stats {i}] scraping efficiency {len(dlparse_df_save)/(time.time() - start)} img/sec")
print (f"[{datetime.now().strftime('%H:%M:%S')} stats {i}] crawling efficiency {len(parsed_df)/(time.time() - start)} links/sec")
last = round(time.time() - start0)
print(f"[{datetime.now().strftime('%H:%M:%S')} stats {i}] Job completed in {last} seconds")
except Exception as e:
print (e)
print (f"{datetime.now().strftime('%H:%M:%S')} Worker {i} crashed")
time.sleep(60)
if __name__ == "__main__":
print (f"starting session")
procs = cpu_count()
params = config()
engine = create_engine(f'postgresql://{params["user"]}:{params["password"]}@{params["host"]}:5432/{params["database"]}', pool_size=procs, max_overflow=int(procs*1.5), pool_recycle=60, pool_pre_ping=True )
for i in range(procs):
Process(target=worker, args=[engine, params, i], daemon=True).start()
try:
while True:
time.sleep(30)
except KeyboardInterrupt:
sys.exit()
|
fabfile.py | from fabric import api as fab
from functools import wraps
import os, re
#########################################################################################################
# Init / Settings
# Set the working directory to the build/ directory. This is necessary if you
# run "fab ..." in a subdirectory or with "fab ... -f build/fabfile.py"
BUILD_PATH = os.path.dirname(os.path.realpath(os.path.abspath(__file__)))
os.chdir(BUILD_PATH)
fab.env.use_ssh_config = True
fab.env.forward_agent = True
#########################################################################################################
# Utils
def _env_path():
return os.path.realpath(os.path.join(BUILD_PATH, '..', 'env'))
def _env_bin(executable):
return os.path.join(_env_path(), 'bin', executable)
def _env_choose_bin(*executables):
for executable in executables:
path = _env_bin(executable)
if os.path.exists(path) and os.access(path, os.X_OK):
return path
raise ValueError(
'There is none of the given executables available in the '
'virtualenv: {0}.'.format(', '.join(executables)))
def _env_python_bin():
return _env_choose_bin('python2', 'python')
def _env_pip_bin():
return _env_choose_bin('pip2', 'pip')
def _web_path():
return os.path.realpath(os.path.join(BUILD_PATH, '..', 'web'))
def _docs_path():
return os.path.realpath(os.path.join(BUILD_PATH, '..', 'docs'))
def _goto_web(func):
@wraps(func)
def goto_func(*args, **kwargs):
with fab.lcd(_web_path()):
return func(*args, **kwargs)
return goto_func
def _goto_docs(func):
@wraps(func)
def goto_func(*args, **kwargs):
with fab.lcd(_docs_path()):
return func(*args, **kwargs)
return goto_func
def _project_name():
return os.path.basename(_project_path())
def _project_path():
for dir in os.listdir(_web_path()):
fulldir = os.path.join(_web_path(), dir)
if os.path.isdir(fulldir):
if os.path.exists(os.path.join(fulldir, 'manage.py')): # older Django installs
return fulldir
if os.path.exists(os.path.join(fulldir, 'wsgi.py')): # newer Django installs
return fulldir
raise RuntimeError('Could not find project path')
def _create_path(path):
if not os.path.exists(path):
os.makedirs(path)
def _static_path():
return os.path.realpath(os.path.join(_web_path(), 'static'))
def _scss_path():
return os.path.realpath(os.path.join(_static_path(), 'scss'))
def _app_static_paths():
for dir, dirnames, filenames in os.walk(_project_path()):
for dirname in dirnames:
if dirname == 'static':
yield os.path.join(dir, dirname)
def _app_scss_paths():
for static_path in _app_static_paths():
for dir, dirnames, filenames in os.walk(static_path):
for dirname in dirnames:
if dirname == 'scss':
scss_path = os.path.join(dir, dirname)
yield scss_path
def _merge_dict(defaults, options=None):
if options is None:
return defaults
result = defaults.copy()
result.update(options)
return result
@_goto_web
def _run_django_manage_command(command, args=None, kwargs=None):
params = []
if kwargs:
for k, v in kwargs.iteritems():
if v in ('True', 'true', 'y', 'yes'):
v = True
if v is True:
if len(k) == 1:
params.append('-%s' % k)
else:
params.append('--%s' % k)
else:
if len(k) == 1:
params.append('-%s %s' % (k, v))
else:
params.append('--%s=%s' % (k, v))
if isinstance(args, (list, tuple)):
params += args
elif args is not None:
params.append(args)
fab.local('%s manage.py %s %s' % (
_env_python_bin(),
command,
' '.join(params),
))
def _filtered_files(paths, pattern):
if not isinstance(paths, (list, tuple)):
paths = [paths]
for path in paths:
for dir, dirnames, filenames in os.walk(path):
for filename in filenames:
filepath = os.path.join(dir, filename)
if pattern.match(filepath):
yield filepath
def _execute_watch(paths, func, **event_handler_kwargs):
from watchdog.observers import Observer
from watchdog.events import RegexMatchingEventHandler
import time
class FuncEventHandler(RegexMatchingEventHandler):
def on_any_event(self, event):
func(event)
if not isinstance(paths, (list, tuple)):
paths = [paths]
event_handler = FuncEventHandler(**event_handler_kwargs)
observer = Observer()
for path in paths:
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def _execute_bg(func):
from multiprocessing import Process
bg_process = Process(target=func)
bg_process.start()
#########################################################################################################
# Tasks
# Setup / Install
@fab.task
def env():
fab.local('virtualenv-3.4 %s' % _env_path())
@fab.task
def install():
fab.local('%s install -Ur PYTHON_REQUIREMENTS' % _env_pip_bin())
@fab.task
def freeze():
fab.local('%s freeze -r PYTHON_REQUIREMENTS > PYTHON_REQUIREMENTS.new' % _env_pip_bin())
fab.local('mv -f PYTHON_REQUIREMENTS.new PYTHON_REQUIREMENTS')
@fab.task
def install_js():
fab.local('npm install')
@fab.task
def setup():
fab.execute(env)
fab.execute(install)
fab.execute(install_js)
# Asset management (CSS / JS)
@fab.task
def css():
for filepath in _filtered_files([_scss_path()] + list(_app_scss_paths()), re.compile('.*/[^_/][^/]*\.scss$')):
output_filepath = filepath.replace('scss', 'css')
_create_path(os.path.dirname(output_filepath))
fab.local('sassc -lm %s %s' % (filepath, output_filepath))
@fab.task
@_goto_docs
def docs():
for filepath in _filtered_files(_docs_path(), re.compile('.*/[^_/][^/]*\.rst$')):
fab.local('make html')
@fab.task
@_goto_web
def watch():
def handle_event(event):
with fab.settings(warn_only=True):
fab.execute(css)
fab.execute(css)
_execute_watch(
[_scss_path()] + list(_app_scss_paths()),
handle_event,
regexes=['^.*\.(scss)$'],
ignore_directories=True,
)
@fab.task
def gulp():
fab.local('gulp')
@fab.task
@_goto_web
def watch_docs():
def handle_event(event):
with fab.settings(warn_only=True):
fab.execute(docs)
fab.execute(docs)
_execute_watch(
[_docs_path()],
handle_event,
regexes=['^.*\.(rst)$'],
ignore_directories=True,
)
@fab.task
def static(*args, **kwargs):
fab.execute(css)
_run_django_manage_command('collectstatic', args, kwargs)
# Django commands
@fab.task
def manage(command='help', *args, **kwargs):
_run_django_manage_command(command, args, kwargs)
@fab.task
def shell(*args, **kwargs):
_run_django_manage_command('shell_plus', args, kwargs)
@fab.task
def syncdb(*args, **kwargs):
_run_django_manage_command('syncdb', args, kwargs)
@fab.task
def migrate(*args, **kwargs):
_run_django_manage_command('migrate', args, kwargs)
@fab.task
def run(*args, **kwargs):
if not args:
args = ['0.0.0.0:8000']
_run_django_manage_command('runserver', args, kwargs)
@fab.task
def rundev(*args, **kwargs):
_execute_bg(lambda: fab.execute(watch))
fab.execute(run, *args, **kwargs)
@fab.task
def makemessages(*args, **kwargs):
# raise Exception("I'm sorry, I'm so so sorry")
args = list(args)
args.append('-a')
# args.append('-i')
# args.append("'models.py'")
args.append('-i')
args.append("'admin.py'")
# args.append('-i')
# args.append("'backend.py'")
# args.append('-i')
# args.append("'*utils/*'")
# args.append('-i')
# args.append("'*utils/*/*'")
_run_django_manage_command('makemessages', args, kwargs)
@fab.task
def compilemessages(*args, **kwargs):
_run_django_manage_command('compilemessages', args, kwargs)
# Deployment
def _deploy_git_factory():
import fabdeploit
class GitFilter(fabdeploit.GitFilter):
def filter(self):
for obj in self.filtered_tree:
if obj.path not in ('web', 'build', 'serverboot', 'serverconfig'):
self.remove(obj.name)
fab.execute(css)
# TODO: Automatically find all CSS files necessary to deployment
for filename in (
'web/static/css/main.css',
'web/static/css/email.css',
'web/static/css/override_backend.css',
):
if os.path.exists(os.path.join(self.repo.working_tree_dir, filename)):
self.add(filename)
else:
raise RuntimeError('File %s does not exist?' % filename)
class Git(fabdeploit.Git):
local_repository_path = os.path.dirname(BUILD_PATH)
# remote_repository_path = None
release_author = 'Team23 GmbH & Co. KG <info@team23.de>'
# release_branch = None
release_commit_filter_class = GitFilter
return Git
def _deploy_virtualenv_factory(_git):
import fabdeploit
class Virtualenv(fabdeploit.Virtualenv2):
virtualenv_path = '%s/env' % _git.remote_repository_path
requirements_file = '%s/build/PYTHON_REQUIREMENTS' % _git.remote_repository_path
return Virtualenv
def _deploy_django_factory(_git):
import fabdeploit
class Django(fabdeploit.Django):
manage_path = '%s/web/manage.py' % _git.remote_repository_path
return Django
def _deploy_base_env():
fab.require('git')
fab.env.use_ssh_config = True
fab.env.virtualenv = _deploy_virtualenv_factory(fab.env.git)()
fab.env.django = _deploy_django_factory(fab.env.git)(fab.env.virtualenv)
@fab.task
def production():
fab.env.git = _deploy_git_factory()(
remote_repository_path='/opt/penfold/production',
release_branch='production',
)
fab.env.hosts = ['s1']
fab.env.deploy_initscript = '/opt/webserver/buildout.webserver/supervisorctl'
_deploy_base_env()
@fab.task
def staging():
fab.env.git = _deploy_git_factory()(
remote_repository_path='/home/penfold/staging',
release_branch='staging',
)
fab.env.hosts = ['s1']
fab.env.deploy_initscript = '/opt/webserver/buildout.webserver/supervisorctl'
_deploy_base_env()
@fab.task
def deploy_push_files():
fab.require('git')
fab.env.git.pull()
fab.env.git.create_release_commit()
fab.env.git.push()
@fab.task
def deploy_apply_files():
fab.require('git')
fab.env.git.switch_release()
@fab.task
def deploy_virtualenv_files():
fab.require('virtualenv')
fab.env.virtualenv.init()
fab.env.virtualenv.update()
if fab.env.git.release_commit:
fab.env.virtualenv.git.commit(tag='release/%s' % fab.env.git.release_commit.hexsha)
@fab.task
def deploy_files():
fab.execute(deploy_push_files)
fab.execute(deploy_apply_files)
fab.execute(deploy_virtualenv_files)
@fab.task
def deploy_static():
fab.require('django')
fab.env.django.collectstatic()
fab.env.django.run('assets', 'build')
@fab.task
def deploy_migrate():
fab.require('django')
fab.env.django.syncdb()
fab.env.django.migrate()
@fab.task
def deploy_stop():
fab.run('%s stop' % fab.env.deploy_initscript)
@fab.task
def deploy_start():
fab.run('%s start' % fab.env.deploy_initscript)
@fab.task
def deploy_restart():
fab.run('%s restart' % fab.env.deploy_initscript)
@fab.task
def deploy_status():
fab.run('%s status' % fab.env.deploy_initscript)
@fab.task
def deploy(*args):
fab.require('git')
# prepare
fab.execute(deploy_push_files)
# The 'git status' is done so the virtualenv.create_commit later will not need to
# scan all files first. Scanning files may be bad for performance and we want to
# keep the downtime to a minimum.
if 'virtualenv' in fab.env and fab.env.virtualenv.virtualenv_path:
with fab.cd(fab.env.virtualenv.virtualenv_path):
fab.run('git status')
with fab.cd(fab.env.git.remote_repository_path):
fab.run('git status')
fab.execute(deploy_stop)
# run upgrade
fab.execute(deploy_apply_files)
if not 'novirtualenv' in args:
fab.execute(deploy_virtualenv_files)
elif 'virtualenv' in fab.env:
fab.env.virtualenv.git.commit(tag='release/%s' % fab.env.git.release_commit.hexsha)
if not 'nostatic' in args:
fab.execute(deploy_static)
if not 'nomigrate' in args:
fab.execute(deploy_migrate)
# start server again
fab.execute(deploy_start)
@fab.task
def deploy_setup(*args):
fab.execute(deploy_push_files)
fab.execute(deploy_apply_files)
fab.execute(deploy_virtualenv_files)
fab.execute(deploy_static)
print "-" * 70
print "Next: Setup the local_settings.py and initialize the database"
print "-" * 70
|
callbacks_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import re
import shutil
import sys
import threading
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import adam
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
model = self._get_model()
counter = Counter()
model.evaluate(x, y, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
model = self._get_model()
counter = Counter()
model.predict(x, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf)
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile(object):
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, dirnames, filenames) in os.walk(logdir):
del dirnames # unused
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in summary_iterator.summary_iterator(path):
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.images),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
},
)
def _strip_layer_names(self, summaries):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
new_tag = summary.tag.split('/', 1)[1]
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegexp(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
# Note that this test specifies model_type explicitly.
class TestTensorBoardV2WriteModelTest(test.TestCase):
def setUp(self):
super(TestTensorBoardV2WriteModelTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, write_graph=True)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensoriBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
if __name__ == '__main__':
test.main()
|
client.py | from contextlib import ExitStack
import threading
from typing import List
from ibapi import client
from ibapi import account_summary_tags
from ibapi import contract
from ibapi import order
from rx import operators as _
from rx.subject import ReplaySubject
from ibrx.mess import message_wrapper
from ibrx.mess.message import IbApiMessageType
from ibrx.mess.messages import account_summary
from ibrx.mess.messages import historical_data
from ibrx.mess.messages import open_orders
from ibrx.mess.messages import position
from ibrx.types.historical_data import HistoricalDataOptions
class IbApiClient(object):
def __init__(self, ip_addr: str, port: int, client_id: int):
self._messages = ReplaySubject()
self._message_wrapper = message_wrapper.IbApiMessageWrapper(
self._messages)
self._eclient = client.EClient(self._message_wrapper)
self._eclient.connect(ip_addr, port, client_id)
self._thread = threading.Thread(target=self._eclient.run)
self._thread.start()
def _next_valid_id(self) -> int:
self._eclient.reqIds(-1) # Argument is ignored
return self._messages.pipe(
_.first(lambda m: m.type == IbApiMessageType.NEXT_VALID_ID),
_.map(lambda m: m.payload[0])).run()
def get_account_summary(self) -> account_summary.AccountSummary:
request_id = self._next_valid_id()
self._eclient.reqAccountSummary(
request_id, "All", account_summary_tags.AccountSummaryTags.AllTags)
with ExitStack() as stack:
stack.callback(
lambda: self._eclient.cancelAccountSummary(request_id))
obs = account_summary.collect(self._messages, request_id)
return obs.run()
def get_open_orders(self) -> List[open_orders.OpenOrder]:
# reqAllOpenOrders is used instead of reqOpenOrders to include manual
# orders. Also, reqAllOpenOrders does not initiate a subscription.
self._eclient.reqAllOpenOrders()
obs = open_orders.collect(self._messages)
return obs.run()
def get_positions(self) -> List[position.Position]:
self._eclient.reqPositions()
with ExitStack() as stack:
stack.callback(self._eclient.cancelPositions)
obs = position.collect(self._messages)
return obs.run()
def place_order(self, contract: contract.Contract, order: order.Order):
self._eclient.placeOrder(self._next_valid_id(), contract, order)
def cancel_order(self, order_id: int):
self._eclient.cancelOrder(order_id)
def get_historical_data(self, contract: contract.Contract,
data_options: HistoricalDataOptions):
HistoricalDataOptions.validate(data_options)
if data_options.stream:
raise ValueError(
'get_historical_data should not be called with |options.stream| = True'
)
request_id = self._next_valid_id()
end_datetime_str = data_options.end_datetime.strftime(
'%Y%m%d %H:%M%S') if data_options.end_datetime else ''
self._eclient.reqHistoricalData(
request_id,
contract,
endDateTime=end_datetime_str,
durationStr=data_options.duration.as_string(),
barSizeSetting=data_options.bar_size.as_string(),
whatToShow=data_options.type.name,
useRTH=data_options.use_rth,
formatDate=data_options.format_time.value,
keepUpToDate=False,
chartOptions=None)
obs = historical_data.collect(self._messages, request_id,
data_options.type)
return obs.run()
|
glsvgoverlaysink.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cairo
import collections
import contextlib
import ctypes
import numpy
import os
import threading
import time
import gi
gi.require_version('Gdk', '3.0')
gi.require_version('GObject', '2.0')
gi.require_version('GLib', '2.0')
gi.require_version('Gst', '1.0')
gi.require_version('GstAllocators', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('GstGL', '1.0')
gi.require_version('GstVideo', '1.0')
from gi.repository import Gdk, GObject, GLib, Gst, GstAllocators, GstBase, GstGL, GstVideo
from OpenGL.arrays.arraydatatype import ArrayDatatype
from OpenGL.GLES3 import (
glActiveTexture, glBindBuffer, glBindTexture, glBindVertexArray, glBlendEquation, glBlendFunc,
glBufferData, glDeleteBuffers, glDeleteVertexArrays, glDisable, glDrawElements, glEnable,
glEnableVertexAttribArray, glGenBuffers, glGenVertexArrays, glGetUniformLocation,
glUniformMatrix4fv, glVertexAttribPointer, glViewport)
from OpenGL.GLES3 import (
GL_ARRAY_BUFFER, GL_BLEND, GL_ELEMENT_ARRAY_BUFFER, GL_FALSE, GL_FLOAT, GL_FUNC_ADD,
GL_ONE_MINUS_SRC_ALPHA, GL_SRC_ALPHA, GL_STATIC_DRAW, GL_TEXTURE0, GL_TEXTURE_2D,
GL_TRIANGLES, GL_UNSIGNED_SHORT, GL_VERTEX_SHADER)
# Gst.Buffer.map(Gst.MapFlags.WRITE) is broken, this is a workaround. See
# http://lifestyletransfer.com/how-to-make-gstreamer-buffer-writable-in-python/
# https://gitlab.gnome.org/GNOME/gobject-introspection/issues/69
class GstMapInfo(ctypes.Structure):
_fields_ = [('memory', ctypes.c_void_p), # GstMemory *memory
('flags', ctypes.c_int), # GstMapFlags flags
('data', ctypes.POINTER(ctypes.c_byte)), # guint8 *data
('size', ctypes.c_size_t), # gsize size
('maxsize', ctypes.c_size_t), # gsize maxsize
('user_data', ctypes.c_void_p * 4), # gpointer user_data[4]
('_gst_reserved', ctypes.c_void_p * 4)] # GST_PADDING
# ctypes imports for missing or broken introspection APIs.
libgst = ctypes.CDLL('libgstreamer-1.0.so.0')
GST_MAP_INFO_POINTER = ctypes.POINTER(GstMapInfo)
libgst.gst_buffer_map.argtypes = [ctypes.c_void_p, GST_MAP_INFO_POINTER, ctypes.c_int]
libgst.gst_buffer_map.restype = ctypes.c_int
libgst.gst_buffer_unmap.argtypes = [ctypes.c_void_p, GST_MAP_INFO_POINTER]
libgst.gst_buffer_unmap.restype = None
libgst.gst_mini_object_is_writable.argtypes = [ctypes.c_void_p]
libgst.gst_mini_object_is_writable.restype = ctypes.c_int
libgst.gst_context_writable_structure.restype = ctypes.c_void_p
libgst.gst_context_writable_structure.argtypes = [ctypes.c_void_p]
libgst.gst_structure_set.restype = ctypes.c_void_p
libgst.gst_structure_set.argtypes = [ctypes.c_void_p, ctypes.c_char_p,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
libgstgl = ctypes.CDLL('libgstgl-1.0.so.0')
libgstgl.gst_gl_memory_get_texture_id.argtypes = [ctypes.c_void_p]
libgstgl.gst_gl_memory_get_texture_id.restype = ctypes.c_uint
libgstgl.gst_gl_sync_meta_wait_cpu.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
libgstgl.gst_gl_sync_meta_wait_cpu.restype = None
libcairo = ctypes.CDLL('libcairo.so.2')
libcairo.cairo_image_surface_create_for_data.restype = ctypes.c_void_p
libcairo.cairo_image_surface_create_for_data.argtypes = [ctypes.c_void_p,
ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int]
libcairo.cairo_surface_flush.restype = None
libcairo.cairo_surface_flush.argtypes = [ctypes.c_void_p]
libcairo.cairo_surface_destroy.restype = None
libcairo.cairo_surface_destroy.argtypes = [ctypes.c_void_p]
libcairo.cairo_format_stride_for_width.restype = ctypes.c_int
libcairo.cairo_format_stride_for_width.argtypes = [ctypes.c_int, ctypes.c_int]
libcairo.cairo_create.restype = ctypes.c_void_p
libcairo.cairo_create.argtypes = [ctypes.c_void_p]
libcairo.cairo_destroy.restype = None
libcairo.cairo_destroy.argtypes = [ctypes.c_void_p]
librsvg = ctypes.CDLL('librsvg-2.so.2')
librsvg.rsvg_handle_new_from_data.restype = ctypes.c_void_p
librsvg.rsvg_handle_new_from_data.argtypes = [ctypes.c_char_p, ctypes.c_size_t, ctypes.c_void_p]
librsvg.rsvg_handle_render_cairo.restype = ctypes.c_bool
librsvg.rsvg_handle_render_cairo.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
librsvg.rsvg_handle_close.restype = ctypes.c_bool
librsvg.rsvg_handle_close.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
libgobject = ctypes.CDLL('libgobject-2.0.so.0')
libgobject.g_object_unref.restype = None
libgobject.g_object_unref.argtypes = [ctypes.c_void_p]
libgdk = ctypes.CDLL('libgdk-3.so.0')
libgdk.gdk_wayland_window_get_wl_surface.restype = ctypes.c_void_p
libgdk.gdk_wayland_window_get_wl_surface.argtypes = [ctypes.c_void_p]
libgdk.gdk_wayland_display_get_wl_display.restype = ctypes.c_void_p
libgdk.gdk_wayland_display_get_wl_display.argtypes = [ctypes.c_void_p]
@contextlib.contextmanager
def _gst_buffer_map(buffer, flags):
ptr = hash(buffer)
if flags & Gst.MapFlags.WRITE and libgst.gst_mini_object_is_writable(ptr) == 0:
raise ValueError('Buffer not writable')
mapping = GstMapInfo()
success = libgst.gst_buffer_map(ptr, mapping, flags)
if not success:
raise RuntimeError('gst_buffer_map failed')
try:
yield ctypes.cast(mapping.data, ctypes.POINTER(ctypes.c_byte * mapping.size)).contents
finally:
libgst.gst_buffer_unmap(ptr, mapping)
def _get_gl_texture_id(buf):
if not buf:
return 0
memory = buf.peek_memory(0)
assert GstGL.is_gl_memory(memory)
return libgstgl.gst_gl_memory_get_texture_id(hash(memory))
VERTEX_SHADER_SRC = '''
uniform mat4 u_transformation;
attribute vec4 a_position;
attribute vec2 a_texcoord;
varying vec2 v_texcoord;
void main()
{
gl_Position = u_transformation * a_position;
v_texcoord = a_texcoord;
}
'''
POSITIONS = numpy.array([
1.0, 1.0,
-1.0, 1.0,
-1.0, -1.0,
1.0, -1.0,
], dtype=numpy.float32)
TEXCOORDS = numpy.array([
1.0, 0.0,
0.0, 0.0,
0.0, 1.0,
1.0, 1.0,
], dtype=numpy.float32)
INDICES = numpy.array([
0, 1, 2, 0, 2, 3
], dtype=numpy.uint16)
IDENTITY_MATRIX = numpy.array([
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
], dtype=numpy.float16)
HFLIP_MATRIX = numpy.array([
[-1.0, 0.0, 0.0, 0.0],
[ 0.0, 1.0, 0.0, 0.0],
[ 0.0, 0.0, 1.0, 0.0],
[ 0.0, 0.0, 0.0, 1.0],
], dtype=numpy.float16)
VFLIP_MATRIX = numpy.array([
[1.0, 0.0, 0.0, 0.0],
[0.0, -1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
], dtype=numpy.float16)
NUM_BUFFERS = 2
class DmaOverlayBuffer():
def __init__(self, pool, glupload):
self.glcontext = glupload.context
res, self.dmabuf = pool.acquire_buffer()
assert res == Gst.FlowReturn.OK
assert GstAllocators.is_dmabuf_memory(self.dmabuf.peek_memory(0))
with _gst_buffer_map(self.dmabuf, Gst.MapFlags.WRITE) as mapped:
self.ptr = ctypes.addressof(mapped)
self.len = ctypes.sizeof(mapped)
self.clear()
meta = GstVideo.buffer_get_video_meta(self.dmabuf)
assert meta
self.surface = libcairo.cairo_image_surface_create_for_data(
self.ptr,
int(cairo.FORMAT_ARGB32),
meta.width,
meta.height,
meta.stride[0])
self.cairo = libcairo.cairo_create(self.surface)
res, self.gl_buffer = glupload.perform_with_buffer(self.dmabuf)
assert res == GstGL.GLUploadReturn.DONE
memory = self.gl_buffer.peek_memory(0)
assert GstGL.is_gl_memory(memory)
self.texture_id = libgstgl.gst_gl_memory_get_texture_id(hash(memory))
self.sync = GstGL.buffer_add_gl_sync_meta(self.glcontext, self.gl_buffer)
def __del__(self):
if self.surface:
libcairo.cairo_surface_destroy(self.surface)
if self.cairo:
libcairo.cairo_destroy(self.cairo)
def render_svg(self, svg):
self.wait_sync()
self.clear()
if svg:
data = svg.encode()
handle = librsvg.rsvg_handle_new_from_data(data, len(data), 0)
if handle:
librsvg.rsvg_handle_render_cairo(handle, self.cairo)
librsvg.rsvg_handle_close(handle, 0)
libgobject.g_object_unref(handle)
libcairo.cairo_surface_flush(self.surface)
def clear(self):
ctypes.memset(self.ptr, 0, self.len)
def set_sync_point(self):
self.sync.set_sync_point(self.glcontext)
def wait_sync(self):
libgstgl.gst_gl_sync_meta_wait_cpu(hash(self.sync), hash(self.glcontext))
class GlSvgOverlaySink(Gst.Bin, GstVideo.VideoOverlay):
__gstmetadata__ = ('GlSvgOverlaySink',
'Sink/Video',
'glimagesink with SVG overlays',
'Coral <coral-support@google.com>')
__gsttemplates__ = (Gst.PadTemplate.new('sink',
Gst.PadDirection.SINK,
Gst.PadPresence.ALWAYS,
GstGL.GLUpload.get_input_template_caps()
))
__gproperties__ = {
'svg': (str,
'SVG data',
'SVG overlay data',
'',
GObject.ParamFlags.WRITABLE
),
'rotate-method': (str,
'Rotate method',
'Rotate method according to glimagesink',
'none',
GObject.ParamFlags.WRITABLE
),
'qos': (bool,
'Generate Quality-of-Service events upstream',
'Generate Quality-of-Service events upstream',
True,
GObject.ParamFlags.READWRITE
),
'sync': (bool,
'Sync on the clock',
'Sync on the clock',
True,
GObject.ParamFlags.READWRITE
),
'max-lateness': (int,
'Maximum frame lateness (ns)',
'Maximum number of nanoseconds that a buffer can be late before it is dropped (-1 unlimited)',
-1,
GLib.MAXINT,
20000000,
GObject.ParamFlags.READWRITE
),
'fullscreen': (bool,
'Fullscreen',
'Requests that internally created windows are fullscreen',
False,
GObject.ParamFlags.READWRITE
),
}
__gsignals__ = {
'drawn': (GObject.SignalFlags.RUN_LAST, None, ())
}
def __init__(self):
Gst.Bin.__init__(self)
self.shader = None
self.vao = 0
self.positions_buffer = 0
self.texcoords_buffer = 0
self.vbo_indices = 0
self.u_transformation = 0
self.glcontext = None
self.glimagesink = Gst.ElementFactory.make('glimagesink')
self.add(self.glimagesink)
self.add_pad(Gst.GhostPad('sink', self.glimagesink.get_static_pad('sink')))
self.glimagesink.connect('client-draw', self.on_draw)
self.glimagesink.connect('client-reshape', self.on_reshape)
self.glimagesink.get_static_pad('sink').add_probe(
Gst.PadProbeType.EVENT_UPSTREAM, self.on_glimagesink_event)
self.get_static_pad('sink').add_probe(
Gst.PadProbeType.QUERY_DOWNSTREAM, self.on_downstream_query)
self.render_thread = None
self.cond = threading.Condition()
self.rendering = False
self.svg = None
self.buffers = [None] * NUM_BUFFERS
self.index = 0
self.matrix = IDENTITY_MATRIX
self.print_fps = int(os.environ.get('PRINT_FPS', '0'))
self.incoming_frames = 0
self.incoming_overlays = 0
self.rendered_overlays = 0
self.draws = 0
self.fps_start = 0
if self.print_fps:
self.glimagesink.get_static_pad('sink').add_probe(
Gst.PadProbeType.BUFFER, self.on_incoming_frame)
def do_expose(self):
self.glimagesink.expose()
def do_handle_events(self, handle_events):
self.glimagesink.handle_events(handle_events)
def do_set_render_rectangle(self, x, y, width, height):
return self.glimagesink.set_render_rectangle(x, y, width, height)
def do_set_window_handle(self, handle):
self.glimagesink.set_window_handle(handle)
def on_glimagesink_event(self, pad, info):
event = info.get_event()
if event.type == Gst.EventType.RECONFIGURE:
return Gst.PadProbeReturn.DROP
return Gst.PadProbeReturn.OK
def on_downstream_query(self, pad, info):
query = info.get_query()
if query.type == Gst.QueryType.ALLOCATION:
# Ask glimagesink, but remove the metas we don't support.
# Need to fiddle with refcount as Python bindings are buggy.
# refcount is really 1, but Python took another ref making
# it 2 and hence query is 'not writable'.
assert query.mini_object.refcount == 2
try:
query.mini_object.refcount = 1
if self.glimagesink.get_static_pad('sink').query(query):
for i in reversed(range(0, query.get_n_allocation_metas())):
gtype, params = query.parse_nth_allocation_meta(i)
if (gtype.name == 'GstVideoAffineTransformationAPI' or
gtype.name == 'GstVideoOverlayCompositionMetaAPI'):
query.remove_nth_allocation_meta(i)
return Gst.PadProbeReturn.HANDLED
finally:
query.mini_object.refcount = 2
return Gst.PadProbeReturn.OK
def on_glimagesink_event(self, pad, info):
event = info.get_event()
if event.type == Gst.EventType.RECONFIGURE:
return Gst.PadProbeReturn.DROP
return Gst.PadProbeReturn.OK
def on_incoming_frame(self, pad, info):
self.incoming_frames += 1
return Gst.PadProbeReturn.OK
def post_error(self, string, debug=''):
Gst.error(string)
gerror = GLib.Error.new_literal(Gst.ResourceError.quark(), string, Gst.CoreError.FAILED)
message = Gst.Message.new_error(self, gerror, debug)
return self.post_message(message)
def do_change_state(self, transition):
if transition == Gst.StateChange.READY_TO_NULL:
self.glcontext.thread_add(self.deinit_gl)
self.glcontext = None
elif transition == Gst.StateChange.PAUSED_TO_READY:
with self.cond:
self.rendering = False
self.cond.notify_all()
self.render_thread.join()
self.glcontext.thread_add(self.free_buffers)
result = Gst.Bin.do_change_state(self, transition)
if transition == Gst.StateChange.NULL_TO_READY:
self.glcontext = self.glimagesink.get_property('context')
if self.glcontext:
self.glcontext.thread_add(self.init_gl)
else:
self.post_error('failed to get gl context')
result = Gst.StateChangeReturn.FAILURE
elif transition == Gst.StateChange.READY_TO_PAUSED:
self.try_create_buffers()
self.rendering = True
self.render_thread = threading.Thread(target=self.render_loop)
self.render_thread.start()
elif transition == Gst.StateChange.PAUSED_TO_PLAYING:
self.try_create_buffers()
return result
def do_set_property(self, prop, value):
if prop.name == 'svg':
if not self.get_back_buffer():
Gst.warning('Not ready to draw overlays, dropping data')
return
with self.cond:
self.incoming_overlays += 1
self.svg = value or ''
self.cond.notify_all()
elif prop.name == 'rotate-method':
value = int(value) if value.isnumeric() else value
self.glimagesink.set_property(prop.name, value)
value = int(self.glimagesink.get_property(prop.name))
if value == 0:
self.matrix = IDENTITY_MATRIX
elif value == 4:
self.matrix = HFLIP_MATRIX
elif value == 5:
self.matrix = VFLIP_MATRIX
else:
Gst.warning('Unsupported rotate-method')
self.matrix = IDENTITY_MATRIX
else:
self.glimagesink.set_property(prop.name, value)
def do_get_property(self, prop):
return self.glimagesink.get_property(prop.name)
def init_gl(self, glcontext):
assert not self.shader
assert glcontext == self.glcontext
frag_stage = GstGL.GLSLStage.new_default_fragment(self.glcontext)
vert_stage = GstGL.GLSLStage.new_with_string(self.glcontext,
GL_VERTEX_SHADER,
GstGL.GLSLVersion.NONE,
GstGL.GLSLProfile.COMPATIBILITY | GstGL.GLSLProfile.ES,
VERTEX_SHADER_SRC)
self.shader = GstGL.GLShader.new(self.glcontext)
self.shader.compile_attach_stage(vert_stage)
self.shader.compile_attach_stage(frag_stage)
self.shader.link()
self.u_transformation = glGetUniformLocation.wrappedOperation(
self.shader.get_program_handle(), 'u_transformation')
a_position = self.shader.get_attribute_location('a_position')
a_texcoord = self.shader.get_attribute_location('a_texcoord')
self.vao = glGenVertexArrays(1)
glBindVertexArray(self.vao)
self.positions_buffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.positions_buffer)
glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(POSITIONS), POSITIONS, GL_STATIC_DRAW)
self.texcoords_buffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.texcoords_buffer)
glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(TEXCOORDS), TEXCOORDS, GL_STATIC_DRAW)
self.vbo_indices = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.vbo_indices)
glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(INDICES), INDICES, GL_STATIC_DRAW)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.vbo_indices);
glBindBuffer(GL_ARRAY_BUFFER, self.positions_buffer);
glVertexAttribPointer.wrappedOperation(a_position, 2, GL_FLOAT, GL_FALSE, 0, None)
glBindBuffer(GL_ARRAY_BUFFER, self.texcoords_buffer);
glVertexAttribPointer.wrappedOperation(a_texcoord, 2, GL_FLOAT, GL_FALSE, 0, None)
glEnableVertexAttribArray(a_position)
glEnableVertexAttribArray(a_texcoord)
glBindVertexArray(0)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)
glBindBuffer(GL_ARRAY_BUFFER, 0)
def deinit_gl(self, glcontext):
assert glcontext == self.glcontext
self.overlay_shader = None
self.shader = None
glDeleteVertexArrays(1, [self.vao])
self.vao = None
glDeleteBuffers(1, [self.positions_buffer])
self.positions_buffer = None
glDeleteBuffers(1, [self.texcoords_buffer])
self.texcoords_buffer = None
glDeleteBuffers(1, [self.vbo_indices])
self.vbo_indices = None
def on_reshape(self, sink, context, width, height):
sinkelement = self.glimagesink.get_by_interface(GstVideo.VideoOverlay)
if sinkelement.width and sinkelement.height:
src_ratio = sinkelement.width / sinkelement.height
dst_ratio = width / height
if src_ratio > dst_ratio:
w = width
h = width / src_ratio
x = 0
y = (height - h) / 2
elif src_ratio < dst_ratio:
w = height * src_ratio
h = height
x = (width - w) / 2
y = 0
else:
w = width
h = height;
x = 0
y = 0
else:
w = width
h = height
x = 0
y = 0
glViewport(int(x), int(y), int(w), int(h))
return True
# TODO: affine gltransformation support
def on_draw(self, sink, context, sample):
assert context == self.glcontext
self.draws += 1
assert context == self.glcontext
frame_texture = _get_gl_texture_id(sample.get_buffer())
overlay_buffer = self.get_front_buffer()
overlay_texture = overlay_buffer.texture_id if overlay_buffer else 0
glDisable(GL_BLEND)
glBindVertexArray(self.vao)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, frame_texture)
self.shader.use()
self.shader.set_uniform_1i('frame', 0)
glUniformMatrix4fv(self.u_transformation, 1, GL_FALSE, self.matrix)
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, None)
if overlay_texture:
glBindTexture(GL_TEXTURE_2D, overlay_texture)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glBlendEquation(GL_FUNC_ADD)
glUniformMatrix4fv(self.u_transformation, 1, GL_FALSE, IDENTITY_MATRIX)
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, None)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, 0)
glDisable(GL_BLEND)
context.clear_shader()
glBindVertexArray(0)
if overlay_buffer:
overlay_buffer.set_sync_point()
self.emit('drawn')
if not self.fps_start:
self.fps_start = time.monotonic()
elapsed = time.monotonic() - self.fps_start
if self.print_fps and elapsed > self.print_fps:
incoming_fps = self.incoming_frames / elapsed
draw_fps = self.draws / elapsed
incoming_overlay_fps = self.incoming_overlays / elapsed
render_fps = self.rendered_overlays / elapsed
print('glsvgoverlaysink: in frames {} ({:.2f} fps) svg {} ({:.2f} fps), rendered {} ({:.2f} fps), draw {} ({:.2f} fps)'.format(
self.incoming_frames, self.incoming_frames / elapsed,
self.incoming_overlays, self.incoming_overlays / elapsed,
self.rendered_overlays, self.rendered_overlays / elapsed,
self.draws, self.draws / elapsed))
self.incoming_frames = 0
self.incoming_overlays = 0
self.rendered_overlays = 0
self.draws = 0
self.fps_start = time.monotonic()
return True
def render_loop(self):
while True:
with self.cond:
rendering = self.rendering
svg = self.svg
self.svg = None
if not rendering:
break
if svg is None:
with self.cond:
self.cond.wait()
continue
buf = self.get_back_buffer()
buf.render_svg(svg)
self.rendered_overlays += 1
self.swap_buffers()
def try_create_buffers(self):
if self.buffers[0]:
return
sink = self.glimagesink.get_by_interface(GstVideo.VideoOverlay)
width = sink.width
height = sink.height
if not width or not height:
return
render_caps = 'video/x-raw, width={}, height={}, format=BGRA'
gl_caps = 'video/x-raw(memory:GLMemory), width={}, height={}, format=RGBA, texture-target=2D'
min_stride = libcairo.cairo_format_stride_for_width(int(cairo.FORMAT_ARGB32), width)
render_caps = Gst.Caps.from_string(render_caps.format(width, height))
gl_caps = Gst.Caps.from_string(gl_caps.format(width, height))
glupload = GstGL.GLUpload.new(self.glcontext)
glupload.set_caps(render_caps, gl_caps)
query = Gst.Query.new_allocation(render_caps, True)
glupload.propose_allocation(None, query)
assert query.get_n_allocation_pools()
pool, size, min_bufs, max_bufs = query.parse_nth_allocation_pool(0)
assert pool.set_active(True)
for i in range(0, NUM_BUFFERS):
self.buffers[i] = DmaOverlayBuffer(pool, glupload)
assert pool.set_active(False)
def free_buffers(self, glcontext):
self.buffers = [None] * NUM_BUFFERS
def get_front_buffer(self):
return self.buffers[(self.index - 1) % len(self.buffers)]
def get_back_buffer(self):
return self.buffers[self.index]
def swap_buffers(self):
self.index = (self.index + 1) % len(self.buffers)
def get_default_wayland_display_context(self):
wl_display = libgdk.gdk_wayland_display_get_wl_display(hash(Gdk.Display.get_default()))
context = Gst.Context.new('GstWaylandDisplayHandleContextType', True)
structure = libgst.gst_context_writable_structure(hash(context))
libgst.gst_structure_set(structure, ctypes.c_char_p('display'.encode()),
hash(GObject.TYPE_POINTER), wl_display, 0)
return context
def get_gl_display_context(self):
if not self.glcontext:
return None
context = Gst.Context.new(GstGL.GL_DISPLAY_CONTEXT_TYPE, True)
GstGL.context_set_gl_display(context, self.glcontext.get_display())
return context
def get_wayland_window_handle(self, widget):
return libgdk.gdk_wayland_window_get_wl_surface(hash(widget.get_window()))
def get_sharable_local_context(self):
if not self.glcontext:
return None
_, new_glcontext = self.glcontext.get_display().create_context(self.glcontext)
gst_context = Gst.Context.new('gst.gl.local_context', True)
structure = libgst.gst_context_writable_structure(hash(gst_context))
libgst.gst_structure_set(structure, ctypes.c_char_p('context'.encode()),
hash(GObject.GType.from_name('GstGLContext')), hash(new_glcontext), 0)
return gst_context
__gstelementfactory__ = ('glsvgoverlaysink', Gst.Rank.NONE, GlSvgOverlaySink)
|
ArduinoSerial_PyQt_Demo_AllOne.py | # -*- coding: utf-8 -*-
"""
This recipe opens a simple window in PyQt to poll the serial port for
data and print it out. Uses threads and a queue.
This recipe depends on Python3.5,PyQt4 and pySerial.
Tested on Qt4.7 / Win 10 / Arduino Uno
PS: This code is meant to demonstrate a concept only, not for actual use.
"""
import sys
import time
import threading
import random
import queue
from PyQt4 import QtGui, QtCore as qt
import serial
SERIALPORT = 'COM3'
class GuiPart(QtGui.QMainWindow):
def __init__(self, queue, endcommand, *args):
QtGui.QMainWindow.__init__(self, *args)
self.setWindowTitle('Arduino Serial Demo')
self.queue = queue
# We show the result of the thread in the gui, instead of the console
self.editor = QtGui.QTextEdit(self)
self.setCentralWidget(self.editor)
self.endcommand = endcommand
def closeEvent(self, ev):
self.endcommand()
def processIncoming(self):
"""
Handle all the messages currently in the queue (if any).
"""
while self.queue.qsize():
try:
msg = self.queue.get(0)
# Check contents of message and do what it says
# As a test, we simply print it
self.editor.insertPlainText(str(msg))
except Queue.Empty:
pass
class ThreadedClient:
"""
Launch the main part of the GUI and the worker thread. periodicCall and
endApplication could reside in the GUI part, but putting them here
means that you have all the thread controls in a single place.
"""
def __init__(self):
# Create the queue
self.queue = queue.Queue()
# Set up the GUI part
self.gui = GuiPart(self.queue, self.endApplication)
self.gui.show()
# A timer to periodically call periodicCall :-)
self.timer = qt.QTimer()
qt.QObject.connect(self.timer,
qt.SIGNAL("timeout()"),
self.periodicCall)
# Start the timer -- this replaces the initial call to periodicCall
self.timer.start(100)
# Set up the thread to do asynchronous I/O
# More can be made if necessary
self.running = 1
self.thread1 = threading.Thread(target=self.workerThread1)
self.thread1.start()
def periodicCall(self):
"""
Check every 100 ms if there is something new in the queue.
"""
self.gui.processIncoming()
if not self.running:
root.quit()
def endApplication(self):
self.running = 0
def workerThread1(self):
"""
This is where we handle the asynchronous I/O.
Put your stuff here.
"""
# rand = random.Random()
ser = serial.Serial(SERIALPORT, 9600)
while self.running:
# This is where we poll the Serial port.
# time.sleep(random.random() * 0.3)
# msg = random.random()
# self.queue.put(msg)
msg = ser.readline().decode()
if (msg):
self.queue.put(msg)
else:
pass
if self.running == 0:
ser.close()
root = QtGui.QApplication(sys.argv)
client = ThreadedClient()
sys.exit(root.exec_())
|
faster_parser_model_apply_inputs.py | # -*- coding: utf-8 -*-
"""
@author: mesar
"""
import pandas as pd
import json
from datetime import datetime
import numpy as np
import csv
from pathlib import Path
from progressbar import progressbar as pbar
import time
import sys
def parallel_parsing(i, key, number_of_clients, vehicle_capacity, package_data_list, route_data_list, travel_times_list):
route_info = {}
travel_time_matrix = []
row_values = ['key']
for k in travel_times_list.keys():
row_values.append(k)
travel_time_matrix.append(row_values)
for k in travel_times_list.keys():
row_values = []
row_values.append(k)
for j in travel_times_list[k].keys():
row_values.append(travel_times_list[k][j])
travel_time_matrix.append(row_values)
"""
for k in actual_sequences_list[key][0].keys():
for j in actual_sequences_list[key][0][k].keys():
route_info[j] = {}
route_info[j]['order'] = actual_sequences_list[key][0][k][j]
"""
latlongs = []
zone_ids = []
for k in route_data_list['stops'].keys():
latlongs.append((route_data_list['stops'][k]['lat'], route_data_list['stops'][k]['lng']))
depart_time_1 = str(route_data_list['date_YYYY_MM_DD']) + " " + str(route_data_list['departure_time_utc'])
departure_time = datetime.strptime(depart_time_1, '%Y-%m-%d %H:%M:%S')
departure_time_seconds = (departure_time.hour * 3600) + (departure_time.minute * 60) + departure_time.second
route_info[k] = {}
route_info[k]['latitude'] = route_data_list['stops'][k]['lat']
route_info[k]['longitude'] = route_data_list['stops'][k]['lng']
route_info[k]['zone_id'] = route_data_list['stops'][k]['zone_id']
id_zona = -1 if route_data_list['stops'][k]['zone_id'] == "NaN" else route_data_list['stops'][k]['zone_id']
zone_ids.append(id_zona)
route_info[k]['type'] = route_data_list['stops'][k]['type']
#route_info[k]['score'] = route_data_list['route_score']
route_info[k]['departure_time'] = route_data_list['departure_time_utc']
route_info[k]['departure_date'] = route_data_list['date_YYYY_MM_DD']
route_info[k]['route_id'] = key
route_info[k]['max_capacity'] = vehicle_capacity
time_windows = []
counter = 0
planned_service_times = []
dimensions = []
package_id_and_client = {}
double_visits = {}
for k in package_data_list.keys():
time_window1 = -1
time_window2 = -1
sum_dimensions = 0
number_packages = 0
max_depth = 0.0
max_width = 0.0
max_height = 0.0
planned_service_time_client = 0.0
#package_status = ""
for j in package_data_list[k].keys():
if j in package_id_and_client:
double_visits[k] = [package_id_and_client[j]]
double_visits[package_id_and_client[j]] = [k]
else:
package_id_and_client[j] = k
#new_package_status = 'D' if str(package_data_list[k][j]['scan_status']) == 'DELIVERED' else 'A'
#package_status = package_status +'_' + new_package_status
date_value1 = str(package_data_list[k][j]['time_window']['start_time_utc'])
date_value2 = str(package_data_list[k][j]['time_window']['end_time_utc'])
planned_service_time_client += package_data_list[k][j]['planned_service_time_seconds']
if(date_value1 != 'nan' and date_value2 != 'nan'):
real_date_1 = datetime.strptime(date_value1, '%Y-%m-%d %H:%M:%S')
real_date_2 = datetime.strptime(date_value2, '%Y-%m-%d %H:%M:%S')
date_1 = datetime.strptime(date_value1, '%Y-%m-%d %H:%M:%S') - departure_time
date_2 = datetime.strptime(date_value2, '%Y-%m-%d %H:%M:%S') - departure_time
if (real_date_1 <= departure_time):
time_window1 = 0
time_window2 = date_2.seconds
else:
time_window1 = date_1.seconds
time_window2 = date_2.seconds
#time_window1 = (date_1.hour * 3600) + (date_1.minute * 60) + date_1.second
#time_window2 = (date_2.hour * 3600) + (date_2.minute * 60) + date_2.second
#if(date_1.day != date_2.day):
#time_window2 += (24*3600)
else:
time_window1 = -1
time_window2 = -1
real_date_1 = -1
real_date_2 = -1
try:
#if(time_windows[counter][0] == -1 and time_windows[counter][1] == -1):
#time_windows[counter] = (time_window1, time_window2)
route_info[k]['time_window_start_seconds'] = time_window1
route_info[k]['time_window_end_seconds'] = time_window2
route_info[k]['time_window_start'] = real_date_1
route_info[k]['time_window_end'] = real_date_2
except(BaseException):
#time_windows.append((time_window1, time_window2))
route_info[k]['time_window_start_seconds'] = time_window1
route_info[k]['time_window_end_seconds'] = time_window2
route_info[k]['time_window_start'] = real_date_1
route_info[k]['time_window_end'] = real_date_2
depth = float(-1 if package_data_list[k][j]['dimensions']['depth_cm'] == 'NaN' else package_data_list[k][j]['dimensions']['depth_cm'])
height = float(-1 if package_data_list[k][j]['dimensions']['height_cm'] == 'NaN' else package_data_list[k][j]['dimensions']['height_cm'])
width = float(-1 if package_data_list[k][j]['dimensions']['width_cm'] == 'NaN' else package_data_list[k][j]['dimensions']['width_cm'])
max_depth = depth if ((depth >= max_depth) and (depth != -1)) else max_depth
max_height = height if ((height >= max_height) and (height != -1)) else max_height
max_width = width if ((width >= max_width) and (width != -1)) else max_width
sum_dimensions += (depth * height * width)
number_packages += 1
planned_service_times.append(planned_service_time_client)
dimensions.append(sum_dimensions)
route_info[k]['service_time'] = planned_service_time_client
route_info[k]['dimensions'] = sum_dimensions
route_info[k]['number_packages'] = number_packages
#route_info[k]['package_status'] = package_status
route_info[k]['max_depth'] = max_depth
route_info[k]['max_height'] = max_height
route_info[k]['max_width'] = max_width
#route_info[k]['double_visit'] = double_visits[k] if k in double_visits else -1
time_windows.append((time_window1, time_window2))
counter += 1
order_counter = 1
for k in route_info.keys():
route_info[k]['order'] = order_counter
if route_info[k]['type'] == "Station":
depot_key = k
route_info[k]['order'] = 0
else:
order_counter = order_counter + 1
for z in range(len(travel_time_matrix)):
if travel_time_matrix[z][0] == depot_key:
depot_number = z
#f = open("../../parsed_files/"+key+".txt", "w")
f = open("../data/model_apply_outputs/parsed_files_val/route_"+str(i)+".txt", "w")
f.write("number_of_clients\n")
f.write(str(number_of_clients) + "\n")
f.write("vehicle_capacity\n")
f.write(str(vehicle_capacity) + "\n")
f.write("depot_number\n")
f.write(str(depot_number) + "\n")
f.write("route_id\n")
f.write(str(key) + "\n")
f.write("travel_times\n")
for k in travel_time_matrix:
for j in k:
f.write(str(j) + " ")
f.write("\n")
f.write("time_windows\n")
for k in time_windows:
f.write(str(k[0]) + " " + str(k[1]) + "\n")
f.write("service_time\n")
for k in planned_service_times:
f.write(str(k) + "\n")
f.write("dimensions\n")
for k in dimensions:
f.write(str(k) + "\n")
f.write("latitude_longitude\n")
for k in latlongs:
f.write(str(k[0]) + " " + str(k[1]) + "\n")
f.write("zone_id\n")
for k in zone_ids:
f.write(str(k) + "\n")
f.close()
with open("../data/model_apply_outputs/travel_times_files_val/travel_times_route_"+str(i)+".csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(travel_time_matrix)
df = pd.DataFrame.from_dict(route_info, orient='index')
travel_times = pd.DataFrame(travel_time_matrix[1:], columns=travel_time_matrix[0] )
travel_times.set_index("key", inplace=True)
df.reset_index(inplace=True)
df.rename(columns={'index':'customer_id'}, inplace=True)
#df.sort_values(by=['order'], inplace=True, ignore_index=True)
#Calculate route times with HARD TIME WINDOWS
capacity_used = 0
df.at[0, 'double_visit'] = "nan"
#df.at[0, 'order'] = 0
df['double_visit']=df['double_visit'].astype('str')
for j in range(1, len(df)):
capacity_used += df.at[j, 'dimensions']
df.at[j, 'double_visit'] = double_visits[df.at[j, 'customer_id']][0] if df.at[j, 'customer_id'] in double_visits else "nan"
#df.at[j, 'order'] = j
#if df.at[j, 'customer_id'] in double_visits:
# print(i, df.at[j, 'customer_id'])
df.set_index('customer_id', inplace=True)
df.to_csv("../data/model_apply_outputs/df_routes_files_val/df_"+str(i)+".csv")
#print(mp.current_process().name)
#output.put([mp.current_process().name])
def parse_json(i, output, json_path):
with open(json_path) as f:
json_df = pd.DataFrame([json.loads(l) for l in f.readlines()])
output.put((i, json_df))
def read_json(number_of_records):
parsing_time_start = time.time()
with open('../data/model_apply_inputs/new_package_data.json') as f:
package_data_list = pd.DataFrame([json.loads(l) for l in f.readlines()])
#parsing_time = time.time() - parsing_time_start
with open('../data/model_apply_inputs/new_route_data.json') as f:
route_data_list = pd.DataFrame([json.loads(l) for l in f.readlines()])
#parsing_time = time.time() - parsing_time_start
with open('../data/model_apply_inputs/new_travel_times.json') as f:
travel_times_list = pd.DataFrame([json.loads(l) for l in f.readlines()])
parsing_time = time.time() - parsing_time_start
print("Parsing time " + str(parsing_time))
"""
#Initialize queue
output = mp.Queue()
#lock = mp.Lock()
# Create parallel activities
processes = []
parsing_time_start = time.time()
#all_json_paths = ['../data/model_apply_inputs/new_package_data.json', '../data/model_apply_inputs/new_route_data.json', '../data/model_apply_inputs/new_travel_times.json']
all_json_paths = ['../data/model_build_inputs/package_data.json', '../data/model_build_inputs/package_data.json', '../data/model_build_inputs/package_data.json']
for j in range(3):
json_path = all_json_paths[j]
p = mp.Process(target=parse_json, args=(j, output, json_path))
processes.append(p)
p.start()
for p in processes:
(j, json_df) = output.get(block=True, timeout=None)
if j == 0:
package_data_list = json_df
elif j == 1:
route_data_list = json_df
elif j == 2:
travel_times_list = json_df
print("Finished json " + str(j))
for p in processes:
p.join()
parsing_time = time.time() - parsing_time_start
print("Parsing time " + str(parsing_time))
"""
total_routes = route_data_list.shape[1]
#m_total_routes = [*range(0, total_routes)]
for i in pbar(range(total_routes)):
key = package_data_list.keys()[i]
#key = 'RouteID_fffd257c-3041-4736-be7a-5efea8af1173'
number_of_clients = len(package_data_list[key][0].keys())
vehicle_capacity = route_data_list[key][0]['executor_capacity_cm3']
parallel_parsing(i, key, number_of_clients, vehicle_capacity, package_data_list[key][0], route_data_list[key][0], travel_times_list[key][0])
#p = mp.Process(target=parallel_parsing, args=(i, key, number_of_clients, vehicle_capacity, package_data_list[key][0], route_data_list[key][0], travel_times_list[key][0]))
#processes.append(p)
#p.start()
"""
for p in processes:
#print("results")
[finished_iteration] = output.get(block=True, timeout=None)
print("Finished iteration " + str(finished_iteration))
for p in processes:
p.join()
"""
if __name__ == '__main__':
randomization = int(sys.argv[1])
Path("../data/model_apply_outputs/parsed_files_val").mkdir(parents=True, exist_ok=True)
Path("../data/model_apply_outputs/df_routes_files_val").mkdir(parents=True, exist_ok=True)
Path("../data/model_apply_outputs/travel_times_files_val").mkdir(parents=True, exist_ok=True)
Path("../data/model_apply_outputs/grasp_routes_prob").mkdir(parents=True, exist_ok=True)
Path("../data/model_apply_outputs/grasp_routes_prob_random_"+str(randomization)).mkdir(parents=True, exist_ok=True)
print("Parsing files")
total_time_start = time.time()
read_json(0)
total_time = time.time() - total_time_start
print("Total time " + str(total_time)) |
SMTPListener.py | import logging
import sys
import os
import threading
import SocketServer
import ssl
import socket
from . import *
class SMTPListener(object):
def taste(self, data, dport):
# Once the TCP connection has been established, the server initiates
# the conversation with '220' message. However, if the client connects
# to a nonstandard port there is no way for the proxy to know that
# SMTP is the protocol until the client sends a message.
commands = ['HELO', 'EHLO', 'MAIL FROM', 'RCPT TO', 'TURN', 'ATRN',
'SIZE', 'ETRN', 'PIPELINING', 'CHUNKING', 'DATA', 'DSN',
'RSET', 'VRFY', 'HELP', 'QUIT', 'X-EXPS GSSAPI',
'X-EXPS=LOGIN', 'X-EXCH50', 'X-LINK2STATE']
ports = [25, 587, 465]
confidence = 1 if dport in ports else 0
for command in commands:
if data.lstrip().startswith(command):
confidence += 2
continue
return confidence
def __init__(
self,
config,
name='SMTPListener',
logging_level=logging.INFO,
):
self.logger = logging.getLogger(name)
self.logger.setLevel(logging_level)
self.config = config
self.name = name
self.local_ip = config.get('ipaddr')
self.server = None
self.name = 'SMTP'
self.port = self.config.get('port', 25)
self.logger.info('Starting...')
self.logger.debug('Initialized with config:')
for key, value in config.iteritems():
self.logger.debug(' %10s: %s', key, value)
def start(self):
self.logger.debug('Starting...')
self.server = ThreadedTCPServer((self.local_ip, int(self.config['port'])), ThreadedTCPRequestHandler)
if self.config.get('usessl') == 'Yes':
self.logger.debug('Using SSL socket')
keyfile_path = 'listeners/ssl_utils/privkey.pem'
keyfile_path = ListenerBase.abs_config_path(keyfile_path)
if keyfile_path is None:
self.logger.error('Could not locate %s', keyfile_path)
sys.exit(1)
certfile_path = 'listeners/ssl_utils/server.pem'
certfile_path = ListenerBase.abs_config_path(certfile_path)
if certfile_path is None:
self.logger.error('Could not locate %s', certfile_path)
sys.exit(1)
self.server.socket = ssl.wrap_socket(self.server.socket, keyfile='privkey.pem', certfile='server.pem', server_side=True, ciphers='RSA')
self.server.logger = self.logger
self.server.config = self.config
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self):
self.logger.info('Stopping...')
if self.server:
self.server.shutdown()
self.server.server_close()
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
# Timeout connection to prevent hanging
self.request.settimeout(int(self.server.config.get('timeout', 5)))
try:
self.request.sendall("%s\r\n" % self.server.config.get('banner',"220 FakeNet SMTP Service Ready"))
while True:
data = self.request.recv(4096)
for line in data.split("\n"):
self.server.logger.debug(line)
command = data[:4].upper()
if command == '':
break
elif command in ['HELO','EHLO']:
self.request.sendall("250 evil.com\r\n")
elif command in ['MAIL', 'RCPT', 'NOOP', 'RSET']:
self.request.sendall("250 OK\r\n")
elif command == 'QUIT':
self.request.sendall("221 evil.com bye\r\n")
elif command == "DATA":
self.request.sendall("354 start mail input, end with <CRLF>.<CRLF>\r\n")
mail_data = ""
while True:
mail_data_chunk = self.request.recv(4096)
if not mail_data_chunk:
break
mail_data += mail_data_chunk
if "\r\n.\r\n" in mail_data:
break
self.server.logger.info('Received mail data.')
for line in mail_data.split("\n"):
self.server.logger.info(line)
self.request.sendall("250 OK\r\n")
else:
self.request.sendall("503 Command not supported\r\n")
except socket.timeout:
self.server.logger.warning('Connection timeout')
except socket.error as msg:
self.server.logger.error('Error: %s', msg.strerror or msg)
except Exception, e:
self.server.logger.error('Error: %s', e)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
###############################################################################
# Testing code
def test(config):
import smtplib
logger = logging.getLogger('SMTPListenerTest')
server = smtplib.SMTP_SSL('localhost', config.get('port', 25))
message = "From: test@test.com\r\nTo: test@test.com\r\n\r\nTest message\r\n"
logger.info('Testing email request.')
logger.info('-'*80)
server.set_debuglevel(1)
server.sendmail('test@test.com','test@test.com', message)
server.quit()
logger.info('-'*80)
def main():
logging.basicConfig(format='%(asctime)s [%(name)15s] %(message)s', datefmt='%m/%d/%y %I:%M:%S %p', level=logging.DEBUG)
config = {'port': '25', 'usessl': 'Yes', 'timeout': 10 }
listener = SMTPListener(config)
listener.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
###########################################################################
# Run tests
test(config)
if __name__ == '__main__':
main()
|
run_squad_ColabTCPTrans_Quick.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD 1.1 and SQuAD 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import math
import os, types
import random
import modeling
import optimization
import tokenization
import six
import copy
import tensorflow as tf
import numpy as np
import scipy.sparse as sp
# do excel
from openpyxl import Workbook
import uuid
# do
import code
import prettytable
from decimal import *
import decimal
getcontext().prec = 50
#Willy Define
example_in_set_eval_examples = 1
example_in_write_predictions = 0
predict_result_index = 0
checkState_in_AtenResult = 0
checkState_in_AtenResult2 = 0
checkState_in_GetAnswer = 0
checkState_add_retriever = 0
willy_check_code = "willy test on 201910051332"
from drqa import retriever
DOC2IDX = None
documents = []
db_class = retriever.get_class('sqlite')
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_string(
"predict_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPido_interactiveece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 8,
"Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer(
"n_best_size", 20,
"The total number of n-best predictions to generate in the "
"nbest_predictions.json output file.")
flags.DEFINE_integer(
"max_answer_length", 30,
"The maximum length of an answer that can be generated. This is needed "
"because the start and end predictions are not conditioned on one another.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.DEFINE_bool(
"version_2_with_negative", False,
"If true, the SQuAD examples contain some that do not have an answer.")
flags.DEFINE_float(
"null_score_diff_threshold", 0.0,
"If null_score - best_non_null is greater than the threshold predict null.")
flags.DEFINE_bool(
"do_retriever", False,
"If True, use retriever to help reader to filte good doc - add by willy.")
flags.DEFINE_string(
"retriever_model", None,
"retriever model path - add by willy.")
flags.DEFINE_float(
"retriever_weight", 0.0,
"retriever weight - add by willy.")
flags.DEFINE_integer("retriever_ranker", 1,"Rank with retriever.")
flags.DEFINE_string("document_type","SQuAD", "There are three document types: (1)paragraphs in SQuAD (2)SQlite (DataBase) (3) Text - add by willy." )
flags.DEFINE_string("question_type","SQuAD", "There are three question types: (1) SQuAD (2)one_question (3) interactive." )
flags.DEFINE_string("question", None, "give question to predict - Willy Test.")
flags.DEFINE_string("db_file", None, "give path with data base file to set SQlite State - Willy Test.")
flags.DEFINE_string("question_table", None, "set table path - Willy Test.")
flags.DEFINE_string("excel_name", None ,"set excel name -Willy Test.")
flags.DEFINE_integer("show_all_choice", 0, "show all choice-Willy Test.")
flags.DEFINE_float(
"choice_score", 0.15,
"choice score. - add by willy.")
flags.DEFINE_float(
"threshold_prob_ans_merge", 0.5,
"threshold prob ans_merge - add by willy.")
flags.DEFINE_string("Host_TCPServer", '127.0.0.1' ,"Set TCP Host-Willy Test.")
flags.DEFINE_integer("PORT_TCPServer", 1234, "Set TCP Port-Willy Test.")
class DecimalEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
return super(DecimalEncoder, self).default(obj)
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_id, #willy add
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_id = doc_id #willy add
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_id:[%s]" % (tokenization.printable_text(self.doc_id)) #willy add
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def TakeThird(val):
return val[2]
def set_squad_examples(input_file,question):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
file = open("Output1.txt", "r")
document = file.read()
file.close()
paragraphs = document.split('\n')
paragraphs = list(filter(None, paragraphs))
#-----------------------------------------------
doc_tokensList = []
for i , paragraph_text in enumerate(paragraphs):
# paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
doc_tokensList.append(doc_tokens)
#-----------------------------------------------
start_position = -1
end_position = -1
orig_answer_text = ""
is_impossible = False
for i, doc_tokens in enumerate(doc_tokensList):
example = SquadExample(
qas_id=str(uuid.uuid1()),
question_text=question,
doc_id=DOC2IDX[i],
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
'''
for entry in input_data:
for paragraph in entry["paragraphs"]:
for qa in paragraph["qas"]:
#qas_id = qa["id"]
# uuid reset by willy in 20190313
qas_id = str(uuid.uuid1())
question_text = qa["question"]
start_position = -1
end_position = -1
orig_answer_text = ""
is_impossible = False
for doc_tokens in doc_tokensList:
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
print(example)
examples.append(example)
'''
#-----------------------------------------------
return examples
def read_squad_examples(input_file, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if FLAGS.version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length -
1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
tf.logging.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
output_fn):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
if example_index < 10:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (unique_id))
tf.logging.info("example_index: %s" % (example_index))
tf.logging.info("doc_span_index: %s" % (doc_span_index))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
tf.logging.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and example.is_impossible:
tf.logging.info("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
tf.logging.info("start_position: %d" % (start_position))
tf.logging.info("end_position: %d" % (end_position))
tf.logging.info(
"answer: %s" % (tokenization.printable_text(answer_text)))
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible)
# Run callback
output_fn(feature)
unique_id += 1
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
final_hidden = model.get_sequence_output()
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/squad/output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"cls/squad/output_bias", [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden,
[batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0)
(start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
return (start_logits, end_logits)
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
seq_length = modeling.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
}
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
def input_fn_builder(input_file, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"unique_ids": tf.FixedLenFeature([], tf.int64),
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
}
if is_training:
name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64)
name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case,client,ranker
):
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
tf.logging.info("length of all_results: %d" % (len(all_results)))
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
# Willy Addd collections -> for results
#-------------------------------------------------------------------------------
_AllPredictions = collections.namedtuple( # pylint: disable=invalid-name
"AllPredictions",
["question", "PredictListOneQues"])
_AllPredictResultsInOneQuestion = collections.namedtuple( # pylint: disable=invalid-name
"AllPredictResultsInOneQuestion",
["doc_text", "doc_id", "doc_score", "PredictListOneDoc"])
_AllPredictResultsInOneDocument = collections.namedtuple( # pylint: disable=invalid-name
"AllPredictResultsInOneDocument",
["answer", "prob", "start", "end"])
_FinalResult = collections.namedtuple( # pylint: disable=invalid-name
"FinalResult",
["question", "text", "text_id", "ans", "prob"])
_FinalResult2 = collections.namedtuple( # pylint: disable=invalid-name
"FinalResult2",
["question", "text", "ans", "prob"])
_FinalResult3 = collections.namedtuple( # pylint: disable=invalid-name
"FinalResult3",
["question", "text", "ans", "ans_prob", "TFIDF", "Score", "choice"])
_FinalResultAll = collections.namedtuple( # pylint: disable=invalid-name
"FinalResultAll",
["question", "text1", "ans1", "ans_prob1", "TFIDF1", "Score1", "text2", "ans2", "ans_prob2", "TFIDF2", "Score2", "choice"])
_TempAllpredict_Layer1 = collections.namedtuple( # pylint: disable=invalid-name
"TempAllpredict_Layer1",
["question" , "TempAllpredictList_Layer2"])
_TempAllpredict_Layer2 = collections.namedtuple( # pylint: disable=invalid-name
"TempAllpredict_Layer2",
["doc_id","doc_text","best_ans","best_prob"])
#-------------------------------------------------------------------------------
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
all_predicts = []
all_predictsInOneQues = []
quesList = []
Aten_result_list = []
Aten_result3_list = []
TempAllpredictLayer1_list = []
TempAllpredictLayer2_list = []
best_answer=""
best_prob=0.0
ans_is_null = True
ranker = retriever.get_class('tfidf')(tfidf_path=FLAGS.retriever_model)
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
if example_in_write_predictions == 1:
print ("example idx:%d" %example_index)
print("question in example from predict")
print(example.question_text)
print("doc_tokens in example from predict")
print(example.doc_tokens)
print('-'*60)
print('\n')
doc_names, doc_scores = ranker.closest_docs( example.question_text, 10 )
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if FLAGS.version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if FLAGS.version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if FLAGS.version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
#參考
probs = _compute_softmax(total_scores)
nbest_json = []
for i, entry in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
#----------------------------------------------
# presupposition : Question is in order
#"question", "PredictResults"
if example.question_text not in quesList :
if len(quesList)!=0 :
#1. Save to all predicts
#print('all_predictsInOneQues-')
#print(all_predictsInOneQues)
temp = copy.deepcopy(all_predictsInOneQues)
#print('temp')
#print(temp)
all_predicts.append(
_AllPredictions(
question=quesList[-1],
PredictListOneQues=temp
)
)
#2.TODO : Find the result (move to outside)
#3. reset all_predictsInOneQues
all_predictsInOneQues.clear()
#. Add to questList
quesList.append(example.question_text)
#----------------------------------------------
# save answer dataset
#----------------------------------------------
all_predictsInOneDoc = []
#print('go to (1)')
for i, entry in enumerate(nbest):
if predict_result_index == 1:
print(entry)
if i==2:
if predict_result_index == 1:
print('In state 2')
break
tp_answer = entry.text
if i==0 :
if tp_answer.isspace() or not tp_answer:
if predict_result_index == 1:
print('In state 0,tp_ans: %s' %tp_answer)
continue
if i == 1 and len(all_predictsInOneDoc)!=0:
if predict_result_index == 1:
print('In state 1,tp_ans: %s' %tp_answer)
break
if predict_result_index == 1:
print('In state set pridict. tp_ans: %s' %tp_answer )
all_predictsInOneDoc.append(
_AllPredictResultsInOneDocument(
answer=entry.text,
prob=Decimal(probs[i]),
start = entry.start_logit,
end = entry.end_logit
)
)
#print('go to (2)')
#----------------------------------------------
# End of save answer dataset
if predict_result_index == 1:
for i, entry in enumerate(all_predictsInOneDoc):
print('index:%d' %i)
print("answer: %s" %(entry.answer))
print("prob: %s" %(entry.prob))
print("start: %s" %(entry.start))
print("end: %s" %(entry.end))
print('\n')
print('-'*15)
print('\n')
# append predicts to OneQues
#print('go to (3)')
#----------------------------------------------
tp_docscore = 0.0
if example.doc_id in doc_names :
tp_docindex = doc_names.index(example.doc_id)
tp_docscore = doc_scores [tp_docindex]
#print('go to (4)')
#print('go to (5)')
#print('all_predictsInOneQues-in set')
#print(all_predictsInOneQues)
all_predictsInOneQues.append(
_AllPredictResultsInOneQuestion(
doc_text=example.doc_tokens,
doc_id=example.doc_id,
doc_score=tp_docscore,
PredictListOneDoc=all_predictsInOneDoc
)
)
#print('go to (6)')
#print('all_predictsInOneQues-in set')
#print(all_predictsInOneQues)
#----------------------------------------------
# if example is examples last data
if example == all_examples[-1] :
all_predicts.append(
_AllPredictions(question=example.question_text,PredictListOneQues=all_predictsInOneQues))
#----------------------------------------------
assert len(nbest_json) >= 1
if not FLAGS.version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
if best_non_null_entry == None :
score_diff = FLAGS.null_score_diff_threshold + 1.0
else:
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > FLAGS.null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
#TODO: Find the best answer from Aten collections
#----------------------------------------------
const_AtenQuest_index = [3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,
4,3,6,5,5,4,5,5,5,4,
5,5,3,5,4,5,5,5,5,5,
1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1]
const_AtenIntent_index = [1,1,1,0,1,1,1,1,1,1,
1,1,1,1,1,1,1,0,1,1,
1,1,1,1,1,0,1,1,1,0,
1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1]
excel_NOtGoodAns_index = [0,0,0,3,0,0,0,0,0,0,
0,0,0,0,0,0,0,3,0,0,
0,0,3,2,0,4,3,0,2,4,
0,0,2,0,3,1,0,2,0,4,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0]
excel_count = 0
excel_index = 1
excel_Answer_count = const_AtenQuest_index[excel_index-1]
excel_Intent_count = const_AtenIntent_index[excel_index-1]
excel_NOtGoodAns_count = excel_NOtGoodAns_index[excel_index-1]
wb = Workbook()
ws = wb.active
retriever_weight = FLAGS.retriever_weight
#print('len of all_predicts:%d' %len(all_predicts))
print('\n')
print('\n')
intent_count = 1
for i, entry_predicts in enumerate(all_predicts):
tp_ques = entry_predicts.question
QuesList = entry_predicts.PredictListOneQues
#print("ques: %s" %(tp_ques))
# set score only with bert , TF-IDF used to be choice doc.
#----------------------------------------------
QuesList.sort(key=TakeThird, reverse=True)
#print('len with QuesList:%d' %len(QuesList))
tp_text1 = QuesList[0].doc_text
text1=""
for word in tp_text1:
text1= text1 + " " + word
ans1=""
ans1_prob = 0.0
TFIDF1 = QuesList[0].doc_score
Score1 = 0.0
entry_OneDoc = QuesList [0].PredictListOneDoc
if len(entry_OneDoc) != 0 :
ans1 = entry_OneDoc[0].answer
ans1_prob = entry_OneDoc[0].prob
for k, entry_OneAns in enumerate(entry_OneDoc):
#print('index:%d' %k)
tp_ans1_prob = Decimal(entry_OneAns.prob)
if tp_ans1_prob > ans1_prob:
ans1_prob = tp_ans1_prob
ans1 = entry_OneAns.answer
#print('Ans_ans:%s' %(entry_OneAns.answer))
#print('Ans_prob:%e , start:%e , end:%e' %(entry_OneAns.prob , entry_OneAns.start , entry_OneAns.end))
Score1 = ans1_prob
#----------------------------------------------
# set score with bert and TF-IDF
#----------------------------------------------
text2=""
ans2=""
ans2_prob = 0.0
TFIDF2 = 0.0
Score2 = 0.0
for j , entry_OneDoc in enumerate(QuesList):
tp_TFIDF2 = entry_OneDoc.doc_score
tp_text2=""
for word in entry_OneDoc.doc_text:
tp_text2 = tp_text2 + " " + word
DocList = []
DocList = entry_OneDoc.PredictListOneDoc
for k, entry_OneAns in enumerate(DocList):
tp_ans2_prob = Decimal(entry_OneAns.prob)
tp_Score2 = Decimal(retriever_weight)*Decimal(tp_TFIDF2) + Decimal(1.0-retriever_weight)*Decimal(tp_ans2_prob)
if tp_Score2>Score2:
text2=tp_text2
ans2=entry_OneAns.answer
ans2_prob=tp_ans2_prob
TFIDF2=tp_TFIDF2
Score2 =tp_Score2
#----------------------------------------------
fin_text = text1
fin_ans = ans1
fin_ans_prob = ans1_prob
fin_TFIDF = TFIDF1
fin_Score = Score1
choice_value = 0
if TFIDF1<FLAGS.choice_score:
fin_text = text2
fin_ans = ans2
fin_ans_prob = ans2_prob
fin_TFIDF = TFIDF2
fin_Score = Score2
choice_value = 1
elif ans2_prob>ans1_prob*2:
if ans2_prob > FLAGS.threshold_prob_ans_merge:
fin_text = text2
fin_ans = ans2
fin_ans_prob = ans2_prob
fin_TFIDF = TFIDF2
fin_Score = Score2
choice_value = 1
if FLAGS.show_all_choice == 0:
Aten_result3_list.append(
_FinalResult3(
question = tp_ques,
text = fin_text,
ans = fin_ans,
ans_prob = fin_ans_prob,
TFIDF = fin_TFIDF,
Score = fin_Score,
choice = choice_value
)
)
else :
Aten_result3_list.append(
_FinalResultAll(
question = tp_ques,
text1 = text1,
ans1 = ans1,
ans_prob1 = ans1_prob,
TFIDF1 = TFIDF1,
Score1 = Score1,
text2 = text2,
ans2 = ans2,
ans_prob2 = ans2_prob,
TFIDF2 = TFIDF2,
Score2 = Score2,
choice = choice_value
)
)
print('ques: %s' %tp_ques)
if FLAGS.show_all_choice==1:
print('-'*5)
print('Only Bert (TF-IDF used to be choice document):')
print('text: %s' %text1)
print('ans: %s' %ans1)
print('ans_prob: %s' %ans1_prob)
print('TFIDF: %s' %TFIDF1)
print('Score: %s' %Score1)
print('')
print('-'*5)
print('Merge TF-IDF:')
print('text: %s' %text2)
print('ans: %s' %ans2)
print('ans_prob: %s' %ans2_prob)
print('TFIDF: %s' %TFIDF2)
print('Score: %s' %Score2)
print('-'*5)
print('My Choice ans(%d):' %choice_value)
print('text: %s' %fin_text)
print('ans: %s' %fin_ans)
print('ans_prob: %s' %fin_ans_prob)
print('TFIDF: %s' %fin_TFIDF)
print('Score: %s' %fin_Score)
# ack message to Colab Client
temp_answer = 'Dr_Answer'+fin_ans
client.send(temp_answer.encode('utf8'))
temp_answer = 'Dr_QA' + fin_text
client.send(temp_answer.encode('utf8'))
print('-'*5)
'''
if excel_Answer_count == excel_count+1 :
print('-'*15)
print('\n')
if excel_Answer_count == excel_count :
ws['C' + str(excel_index)] = excel_Answer_count
ws['D' + str(excel_index)] = excel_NOtGoodAns_count
ws['F' + str(excel_index)] = excel_Intent_count
excel_index = excel_index+1
excel_Answer_count = const_AtenQuest_index[excel_index-1]
excel_NOtGoodAns_count = excel_NOtGoodAns_index[excel_index-1]
excel_Intent_count = const_AtenIntent_index[excel_index-1]
excel_count = 0
if excel_index <= len(const_AtenQuest_index) :
# print('Set my fin_Score with excel: %s' %fin_Score)
index_str = chr(73+excel_count) + str(excel_index)
ws[index_str] = fin_Score
excel_count = excel_count + 1
'''
'''
ws['A60'] = 'All'
ws['A61'] = '40QA'
ws['B59'] = 'Right answer'
ws['B60'] = '=SUM(B1:B40)+SUM(A41:A58)'
ws['B61'] = '=SUM(B1:B40)'
ws['C59'] = 'All answer'
ws['C60'] = '=SUM(C1:C58)-SUM(D1:D40)'
ws['C61'] = '=SUM(C1:C40)-SUM(D1:D40)'
ws['E59'] = 'Right Intent'
ws['E60'] = '=SUM(E1:E40)+SUM(A41:A58)'
ws['E61'] = '=SUM(E1:E40)'
ws['F59'] = 'All intent'
ws['F60'] = '=SUM(F1:F40)+SUM(C41:C58)'
ws['F61'] = '=SUM(F1:F40)'
ws['G59'] = 'answer prob'
ws['G60'] = '=B60/C60'
ws['G61'] = '=B61/C61'
ws['H59'] = 'Intent prob'
ws['H60'] = '=E60/F60'
ws['H61'] = '=E61/F61'
wb.save(FLAGS.excel_name + '.xlsx')
print('\n')
with tf.gfile.GFile(output_Aten_predict_file, "w") as writer:
writer.write(json.dumps(Aten_result3_list, indent=4,cls=DecimalEncoder) + "\n")
with tf.gfile.GFile(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with tf.gfile.GFile(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if FLAGS.version_2_with_negative:
with tf.gfile.GFile(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
'''
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if FLAGS.verbose_logging:
tf.logging.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if FLAGS.verbose_logging:
tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
#print("tf_example:")
#print(tf_example)
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def validate_flags_or_throw(bert_config):
"""Validate the input FLAGS or throw an exception."""
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if FLAGS.do_train:
if not FLAGS.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
# Retriever - added by Willy
if FLAGS.do_retriever:
if not FLAGS.retriever_model:
raise ValueError("You have to set retriever model(give the path) when you set do_retriever to Yes.")
if FLAGS.document_type != 'Sqlite' or FLAGS.db_file == None :
raise ValueError("You have to set document_type to Sqlit and set the db_file when you set do_retriever to Yes.")
# TODO : think a mechanism to chek these key word
'''
if FLAGS.document_type is 'SQlite':
# TODO: set database
elif FLAGS.document_type is 'Text':
# TODO: set text file
elif FLAGS.document_type is 'SQuAD':
# is original method
else :
raise ValueError(
"You have to set correct document_type: (1)'SQlite' (2)'Text' (3)SQuAD.")
'''
def read_squad_documents(input_file):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
documents = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
documents.append(paragraph["context"])
return documents
def read_sqlite_documents(input_file):
# TODO
with db_class(input_file) as doc_db:
doc_ids = doc_db.get_doc_ids()
for ids in doc_ids:
documents.append(doc_db.get_doc_text(ids))
DOC2IDX = {doc_id: i for i, doc_id in enumerate(doc_ids)}
return DOC2IDX, documents
def read_text_documents(input_file):
examples = []
file = open(input_file, "r")
documents = file.read()
file.close()
documents_split = documents.split('\n')
documents_final = list(filter(None, documents))
return documents_final
def read_squad_question(input_file):
questions = []
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
for entry in input_data:
for paragraph in entry["paragraphs"]:
for qa in paragraph["qas"]:
questions.append(qa["question"])
return questions
def set_eval_examples(questions, DOC2IDX):
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
eval_examples = []
temp_list = []
for i, DOCID in enumerate(DOC2IDX) :
temp_list.append(DOCID)
for question in questions:
#-------------------------questions - Start---------------------------#
question_text = question
start_position = -1
end_position = -1
orig_answer_text = ""
is_impossible = False
#-------------documents - Start--------------#
for i , paragraph_text in enumerate(documents):
paragraph_text = paragraph_text
#-------paragraphs - Start-------#
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
#-------paragraphs - End-------#
qas_id = str(uuid.uuid1())
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_id = temp_list[i],
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
eval_examples.append(example)
#-------------documents - Start--------------#
#-------------------------questions - End-----------------------------#
if example_in_set_eval_examples == 1:
print('len of eval_examples:%d' %len(eval_examples))
for i, example in enumerate(eval_examples):
print(i)
print (example.question_text)
'''
for i, example in enumerate(eval_examples):
print('idx:%d:%s' %(i,example.question_text))
'''
return eval_examples
from socket import *
import sys
import threading
import time
from time import localtime
import imp
BUFSIZ = 1024
if sys.version[0] == '2':
imp.reload(sys)
sys.setdefaultencoding("utf-8")
class TcpServer():
def __init__(self):
self.HOST = FLAGS.Host_TCPServer
self.PORT = FLAGS.PORT_TCPServer
self.ADDR = (self.HOST,self.PORT)
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
validate_flags_or_throw(bert_config)
tf.gfile.MakeDirs(FLAGS.output_dir)
self.tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
ranker = None
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
if FLAGS.do_retriever:
# Set Document
# ------------------------------------------------------
print('WillyTest...do SQlite')
DOC2IDX, docments = read_sqlite_documents(input_file=FLAGS.db_file)
# ------------------------------------------------------
else:
# Set Document
tf.logging.info("my document_type is %s", FLAGS.document_type)
if FLAGS.document_type is 'Text':
# TODO
print('WillyTest...do Text')
docments = read_text_documents(input_file=FLAGS.predict_file)
elif FLAGS.document_type is 'SQuAD':
# TODO
print('WillyTest...do SQuAD')
docments = read_squad_documents(input_file=FLAGS.predict_file)
# else:
# #raise ValueError("Your document_type: %s is undefined or wrong, please reset it." %(FLAGS.document_type))
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
self.estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
self.DOC2IDX = DOC2IDX
self.ranker = retriever.get_class('tfidf')(tfidf_path=FLAGS.retriever_model)
try:
self.STOP_CHAT = False
self.sock = socket(AF_INET, SOCK_STREAM)
print('%d is open' %self.PORT)
self.sock.bind(self.ADDR)
self.sock.listen(5)
# 设置退出条件
# 所有监听的客户端
self.clients = {}
self.thrs = {}
self.stops = []
except Exception as e:
print("%d is down" %self.PORT)
return None
def listen_client(self):
while not self.STOP_CHAT:
print(u'等待接入,侦听端口:%d' %self.PORT)
self.tcpClientSock, self.addr = self.sock.accept()
print(u'接受连接,客户端地址:', self.addr)
address = self.addr
# 将建立的client socket链接放到列表self.clients中
self.clients[address] = self.tcpClientSock
# 分别将每个建立的链接放入进程中,接收且分发消息
self.thrs[address] = threading.Thread(target=self.readmsg, args=[address])
self.thrs[address].start()
time.sleep(0.5)
#self.tcpClientSock.send(b'you are connect...')
print(u'系統結束')
def readmsg(self, address):
# 如果地址不存在,则返回False
if address not in self.clients:
return False
# 得到发送消息的client socket
client = self.clients[address]
while True:
try:
# 获取到消息内容data
data = client.recv(BUFSIZ)
except:
print(error)
self.close_client(address)
break
if not data:
break
# python3使用bytes,所以要进行编码
# s='%s发送给我的信息是:[%s] %s' %(addr[0],ctime(), data.decode('utf8'))
# 对日期进行一下格式化
ISOTIMEFORMAT = '%Y-%m-%d %X'
stime = time.strftime(ISOTIMEFORMAT, localtime())
print([address], '@',[stime],':', data.decode('utf8'))
self.STOP_CHAT = (data.decode('utf8').upper() == "QUIT")
if self.STOP_CHAT:
print("quit")
self.close_client(address)
print("already quit")
break
tokenizer = self.tokenizer
estimator = self.estimator
DOC2IDX = self.DOC2IDX
question = data.decode('utf8')
print('My question:',question)
if FLAGS.do_predict:
# define
#---------------------------------------------------
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
# ---------------------------------------------------
print('WillyTest(1)...do Set question:%s' %(FLAGS.question_type))
# ---------------------set question , changed by willy---------------------#
questions = list()
questions.append(question)
#-------------------------------------------------------------------------#
print('WillyTest(2)...do Set eval_examples')
eval_examples=set_eval_examples(questions,DOC2IDX)
print('WillyTest(2.1)...do FeatureWriter')
eval_writer = FeatureWriter(
filename=os.path.join(FLAGS.output_dir, "eval.tf_record"),
is_training=False
)
eval_features = []
print('WillyTest(2.2)...do convert_examples_to_features')
convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature
)
eval_writer.close()
tf.logging.info("***** Running predictions *****")
tf.logging.info(" Num orig examples = %d", len(eval_examples))
tf.logging.info(" Num split examples = %d", len(eval_features))
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
print('WillyTest(5)...before redict_input_fn = input_fn_builder: eval_writer.filename=%s, FLAGS.max_seq_length=%d' %(eval_writer.filename,FLAGS.max_seq_length))
predict_input_fn = input_fn_builder(
input_file=eval_writer.filename,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False
)
all_results = []
for result in estimator.predict(predict_input_fn, yield_single_examples=True):
if len(all_results) % 1000 == 0:
tf.logging.info("Processing example: %d" % (len(all_results)))
unique_id = int(result["unique_ids"])
start_logits = [float(x) for x in result["start_logits"].flat]
end_logits = [float(x) for x in result["end_logits"].flat]
all_results.append(RawResult(unique_id=unique_id,start_logits=start_logits,end_logits=end_logits))
print('WillyTest(8)...before write_predictions')
write_predictions(
eval_examples, eval_features, all_results,
FLAGS.n_best_size, FLAGS.max_answer_length,
FLAGS.do_lower_case, client,self.ranker
)
def close_client(self, address):
try:
'''
print(u'try leave')
client = self.clients.pop(address)
print(u'try leave1')
self.stops.append(address)
print(u'try leave2')
client.close()
print(u'try leave3')
'''
for k in self.clients:
print(u'try leave')
print(u'try client1:', [self.clients[k]])
print(u'try client2:', [self.clients[address]])
print(u'try client3:', [k])
print(u'try client4:', [address])
client = self.clients.pop(k)
#print(u'try leave1')
#self.stops.append(k)
print(u'try leave2')
client.close()
print(u'try leave3')
'''
print(u'try leave4:client:',[self.clients[k]])
self.clients[k].send(str(address) + u"已经离开了")
'''
except:
print(u'try fault')
pass
print(str(address) + u'已经退出')
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
print(willy_check_code)
print("do tcp server")
tserver = None
tserver = TcpServer()
while tserver == None:
tserver = TcpServer()
print("do tcp server-listen")
tserver.listen_client()
if __name__ == "__main__":
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
|
rlock_management.py | import threading
import time
class Box(object):
lock = threading.RLock()
def __init__(self):
self.total_items = 0
def execute(self,n):
Box.lock.acquire()
self.total_items += n
Box.lock.release()
def add(self):
Box.lock.acquire()
self.execute(1)
Box.lock.release()
def remove(self):
Box.lock.acquire()
self.execute(-1)
Box.lock.release()
## These two functions run n in separate
## threads and call the Box's methods
def adder(box,items):
while items > 0:
print ("adding 1 item in the box\n")
box.add()
time.sleep(5)
items -= 1
def remover(box,items):
while items > 0:
print ("removing 1 item in the box")
box.remove()
time.sleep(5)
items -= 1
## the main program build some
## threads and make sure it works
if __name__ == "__main__":
items = 5
print ("putting %s items in the box " % items)
box = Box()
t1 = threading.Thread(target=adder,args=(box,items))
t2 = threading.Thread(target=remover,args=(box,items))
t1.start()
t2.start()
t1.join()
t2.join()
print ("%s items still remain in the box " % box.total_items)
|
hypothesis_test.py | import numpy as np
import copy
import time
from functools import partial, reduce
from future.utils import viewitems, viewkeys
from hypothesis import assume, given, settings, HealthCheck
import hypothesis.strategies as st
import unittest
import threading
from caffe2.python import core, workspace, tt_core, dyndep
import caffe2.python.hypothesis_test_util as hu
from caffe2.proto import caffe2_pb2
dyndep.InitOpsLibrary('@/caffe2/caffe2/fb/optimizers:sgd_simd_ops')
if workspace.has_gpu_support:
# NOTE: During GPU stress tests, the number of workers exceeds the number
# of GPUs which results in flakiness from GPU contention. As a
# result, deadlines are not enforced on CUDA runs.
_hypothesis_settings = settings
def settings(**kwargs):
if 'deadline' in kwargs:
kwargs['deadline'] = None
kwargs.setdefault('max_examples', 50)
def wrapped(f):
return _hypothesis_settings(**kwargs)(f)
return wrapped
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
@st.composite
def _tensor_and_prefix(draw, dtype, elements, min_dim=1, max_dim=4, **kwargs):
dims_ = draw(
st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim))
extra_ = draw(
st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim))
assume(len(dims_) + len(extra_) < max_dim)
return (draw(hu.arrays(dims_ + extra_, dtype, elements)),
draw(hu.arrays(extra_, dtype, elements)))
def _tensor_and_indices(min_dim=1, max_dim=4, dtype=np.float32,
elements=None, **kwargs):
""" generates a tensor and a list of indices of larger tensor of same dim"""
data_dims_ = st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim)
original_dim = st.integers(min_value=2, max_value=10)
return st.tuples(data_dims_, original_dim).flatmap(lambda pair: st.tuples(
st.just(pair[1]), # original dimension
hu.arrays(pair[0], dtype, elements), # data tensor
hu.arrays(pair[0][0], dtype=np.int64, elements=st.integers(
min_value=0, max_value=pair[1] - 1)),
))
_NUMPY_TYPE_TO_ENUM = {
np.float32: core.DataType.FLOAT,
np.int32: core.DataType.INT32,
np.bool: core.DataType.BOOL,
np.uint8: core.DataType.UINT8,
np.int8: core.DataType.INT8,
np.uint16: core.DataType.UINT16,
np.int16: core.DataType.INT16,
np.int64: core.DataType.INT64,
np.float64: core.DataType.DOUBLE,
}
def _dtypes(dtypes=None):
dtypes = dtypes if dtypes else [np.int32, np.int64, np.float32]
return st.sampled_from(dtypes)
def _test_binary(name, ref, filter_=None, gcs=hu.gcs,
test_gradient=False, allow_inplace=False, dtypes=_dtypes):
@given(
inputs=dtypes().flatmap(
lambda dtype: hu.tensors(
n=2, dtype=dtype,
elements=hu.elements_of_type(dtype, filter_=filter_))),
out=st.sampled_from(('Y', 'X1', 'X2') if allow_inplace else ('Y',)),
**gcs)
@settings(max_examples=20, deadline=None)
def test_binary(self, inputs, out, gc, dc):
op = core.CreateOperator(name, ["X1", "X2"], [out])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
# We only do gradient check with float32 types.
if test_gradient and X1.dtype == np.float32:
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
self.assertReferenceChecks(gc, op, [X1, X2], ref)
return test_binary
def _test_binary_broadcast(name, ref, filter_=None,
gcs=hu.gcs, allow_inplace=False, dtypes=_dtypes):
@given(
inputs=dtypes().flatmap(lambda dtype: _tensor_and_prefix(
dtype=dtype,
elements=hu.elements_of_type(dtype, filter_=filter_))),
in_place=(st.booleans() if allow_inplace else st.just(False)),
**gcs)
@settings(max_examples=3, deadline=100)
def test_binary_broadcast(self, inputs, in_place, gc, dc):
op = core.CreateOperator(
name, ["X1", "X2"], ["X1" if in_place else "Y"], broadcast=1)
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
def cast_ref(x, y):
return (np.array(ref(x, y)[0], dtype=x.dtype), )
# gradient not implemented yet
# self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
self.assertReferenceChecks(gc, op, [X1, X2], cast_ref)
return test_binary_broadcast
class TestOperators(hu.HypothesisTestCase):
def test_comparison_ops(self):
ops = {"LT": lambda x1, x2: [x1 < x2],
"LE": lambda x1, x2: [x1 <= x2],
"GT": lambda x1, x2: [x1 > x2],
"GE": lambda x1, x2: [x1 >= x2]}
for name, ref in viewitems(ops):
_test_binary(name, ref, gcs=hu.gcs_cpu_only)(self)
_test_binary_broadcast(name, ref, gcs=hu.gcs_cpu_only)(self)
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_sum(self, inputs, in_place, gc, dc):
op = core.CreateOperator("Sum", ["X1", "X2"],
["Y" if not in_place else "X1"])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
@given(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_row_mul(self, inputs, gc, dc):
op = core.CreateOperator("RowMul", ["X1", "X2"], ["Y"])
X1, Xtmp = inputs
X2 = Xtmp[:, 0]
def ref(x, y):
ret = np.zeros(shape=x.shape, dtype=x.dtype)
for i in range(y.size):
ret[i, ] = x[i, ] * y[i]
return [ret]
self.assertDeviceChecks(dc, op, [X1, X2], [0])
for i in range(2):
self.assertGradientChecks(gc, op, [X1, X2], i, [0])
self.assertReferenceChecks(gc, op, [X1, X2], ref)
@given(inputs=hu.tensors(n=2), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_max(self, inputs, gc, dc):
op = core.CreateOperator("Max", ["X1", "X2"], ["Y"])
X1, X2 = inputs
# Make X1 and X2 far from each other, since X1=X2 is not differentiable
# and the step size of gradient checker is 0.05
X1[np.logical_and(X1 >= X2 - 0.05, X1 <= X2)] -= 0.05
X1[np.logical_and(X1 <= X2 + 0.05, X1 >= X2)] += 0.05
self.assertDeviceChecks(dc, op, [X1, X2], [0])
for i in range(2):
self.assertGradientChecks(gc, op, [X1, X2], i, [0])
def elementwise_max(X, Y):
return [np.maximum(X, Y)]
self.assertReferenceChecks(gc, op, [X1, X2], elementwise_max)
def test_add(self):
def not_overflow(x):
if not isinstance(x, float):
return abs(x) < (1 << 30) - 1
return True
def ref(x, y):
return (x + y, )
_test_binary("Add", ref, filter_=not_overflow, test_gradient=True)(self)
_test_binary_broadcast("Add", ref, filter_=not_overflow)(self)
def test_sub(self):
def ref(x, y):
return (x - y, )
# TODO(jiayq): enable gradient test when implemented.
_test_binary("Sub", ref, test_gradient=True)(self)
_test_binary_broadcast("Sub", ref)(self)
def test_mul(self):
def not_overflow(x):
if not isinstance(x, float):
return abs(x) < (1 << 15) - 1
return True
def ref(x, y):
return (x * y, )
_test_binary("Mul", ref, filter_=not_overflow, test_gradient=True)(self)
_test_binary_broadcast("Mul", ref, filter_=not_overflow)(self)
def test_div(self):
def ref(x, y):
return (x / y, )
def non_zero(x):
return abs(x) > 1e-2
def div_dtypes():
return st.sampled_from([np.float32, np.float64])
_test_binary(
"Div", ref, filter_=non_zero, test_gradient=True,
dtypes=div_dtypes, gcs=hu.gcs_cpu_only
)(self)
_test_binary(
"Div", ref, filter_=non_zero, test_gradient=False,
dtypes=div_dtypes
)(self)
_test_binary_broadcast(
"Div", ref, filter_=non_zero, dtypes=div_dtypes)(self)
@given(X=hu.tensor(), in_place=st.booleans(), **hu.gcs)
@settings(deadline=1000)
def test_negative(self, X, in_place, gc, dc):
op = core.CreateOperator("Negative", ["X"],
["Y" if not in_place else "X"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), **hu.gcs)
@settings(deadline=1000)
def test_tanh(self, X, gc, dc):
op = core.CreateOperator("Tanh", "X", "Y")
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), **hu.gcs)
@settings(deadline=10000)
def test_averaged_loss(self, X, gc, dc):
op = core.CreateOperator("AveragedLoss", ["X"], ["loss"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), inplace=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_softsign(self, X, inplace, gc, dc):
op = core.CreateOperator("Softsign", ["X"], ["X" if inplace else "Y"])
def softsign(X):
return (X / (1 + np.abs(X)),)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertReferenceChecks(gc, op, [X], softsign)
if inplace:
with self.assertRaises(Exception):
self.assertGradientChecks(gc, op, [X], 0, [0])
else:
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(
device_options=st.lists(
min_size=2,
max_size=4,
elements=st.sampled_from(hu.expanded_device_options)),
set_seed=st.booleans())
@settings(deadline=10000)
def test_random_seed_behaviour(self, device_options, set_seed):
# Assume we are always operating on CUDA or CPU, since RNG is
# inconsistent between CPU and GPU.
device_options = copy.deepcopy(device_options)
assume(len({do.device_type for do in device_options}) == 1)
if set_seed:
for do in device_options:
do.random_seed = 1000
def run(do):
# Reset each time because 'Y' may already exist in the workspace
# on a different device
workspace.ResetWorkspace()
ws = workspace.C.Workspace()
op = core.CreateOperator(
"XavierFill", [], ["Y"],
device_option=do,
shape=[2])
ws.run(op)
return ws.blobs["Y"].fetch()
ys = [run(do) for do in device_options]
for y in ys[1:]:
if set_seed:
np.testing.assert_array_equal(ys[0], y)
else:
with self.assertRaises(AssertionError):
np.testing.assert_array_equal(ys[0], y)
@given(axis=st.integers(min_value=1, max_value=4),
num_output=st.integers(min_value=4, max_value=8),
engine=st.sampled_from(["", "PACKED"]),
**hu.gcs)
@settings(deadline=10000)
def test_fully_connected_axis(self, axis, num_output, engine, gc, dc):
np.random.seed(1)
X = np.random.randn(1, 2, 3, 2, 1).astype(np.float32)
def prod(xs):
p = 1
for x in xs:
p *= x
return p
K = prod(list(X.shape)[axis:])
N = num_output
W = np.random.randn(N, K).astype(np.float32)
b = np.random.randn(N).astype(np.float32)
op = core.CreateOperator(
"FC",
["X", "W", "b"],
["Y"],
engine=engine,
axis=axis)
for name, param in [("X", X), ("W", W), ("b", b)]:
self.ws.create_blob(name).feed(param)
self.ws.run(op)
Y = self.ws.blobs["Y"].fetch()
self.assertEqual(list(Y.shape), list(X.shape)[:axis] + [N])
inputs = [X, W, b]
self.assertDeviceChecks(dc, op, inputs, [0])
for param, _ in enumerate(inputs):
self.assertGradientChecks(gc, op, inputs, param, [0])
@unittest.skipIf(not workspace.has_gpu_support,
"Skipping test due to no gpu present.")
@settings(deadline=None)
@given(hidden_size=st.integers(min_value=1, max_value=3),
num_layers=st.integers(min_value=1, max_value=3),
bidirectional=st.booleans(),
rnn_mode=st.sampled_from(["lstm"]), # TODO: "gru"
input_mode=st.sampled_from(["linear"]),
dropout=hu.floats(min_value=1.0, max_value=1.0),
T=st.integers(min_value=2, max_value=6),
N=st.integers(min_value=1, max_value=4),
D=st.integers(min_value=1, max_value=4))
def test_recurrent(self, hidden_size, num_layers, bidirectional, rnn_mode,
input_mode, dropout, T, N, D):
#there's a bug in miopen for N=1 which would be resolved in the next release.
if workspace.has_hip_support:
assume(N>1)
# Random seed, this one happens to pass
seed = 1234
np.random.seed(seed)
# set device option
if workspace.has_hip_support:
device_option = hu.hip_do
engine = 'MIOPEN'
else:
device_option = hu.gpu_do
engine = 'CUDNN'
input_weight_size = hidden_size * D
upper_layer_input_weight_size = hidden_size * hidden_size
if bidirectional:
upper_layer_input_weight_size *= 2
recurrent_weight_size = hidden_size * hidden_size
input_bias_size = hidden_size
recurrent_bias_size = hidden_size
num_directions = 2 if bidirectional else 1
first_layer_sz = input_weight_size + recurrent_weight_size + \
input_bias_size + recurrent_bias_size
upper_layer_sz = upper_layer_input_weight_size + \
recurrent_weight_size + input_bias_size + \
recurrent_bias_size
total_sz = 4 * (first_layer_sz + (num_layers - 1) * upper_layer_sz)
total_sz *= num_directions
W = np.random.rand(total_sz).astype(np.float32)
self.ws.create_blob("WEIGHT").feed(W, device_option=device_option)
op = core.CreateOperator(
"Recurrent",
["INPUT", "HIDDEN_INPUT", "CELL_INPUT", "WEIGHT"],
["OUTPUT", "HIDDEN_OUTPUT", "CELL_OUTPUT",
"RNN_SCRATCH", "DROPOUT_STATES"],
hidden_size=hidden_size,
bidirectional=bidirectional,
rnn_mode=rnn_mode,
dropout=dropout,
input_mode=input_mode,
num_layers=num_layers,
seed=seed,
engine=engine)
X = np.random.randn(T, N, D).astype(np.float32)
self.ws.create_blob("INPUT").feed(X, device_option=device_option)
W = self.ws.blobs["WEIGHT"].fetch()
H = np.random.randn(
num_layers, N, hidden_size * num_directions).astype(
np.float32)
C = np.random.randn(
num_layers, N, hidden_size * num_directions).astype(
np.float32) if rnn_mode == "lstm" else \
np.empty((1,)).astype(np.float32) # unused in GRU
inputs = [X, H, C, W]
input_idxs = [i for (i, _) in enumerate(inputs)] \
if rnn_mode == "lstm" else [0, 1, 3] # ignore C
for input_idx in input_idxs:
self.assertGradientChecks(
device_option, op, inputs, input_idx, [0],
stepsize=0.01, threshold=0.01)
@given(ndim=st.integers(1, 4),
axis=st.integers(0, 3),
add_axis=st.integers(0, 1),
num_inputs=st.integers(2, 4), **hu.gcs)
@settings(deadline=None, max_examples=50)
def test_depth_concat(self, ndim, axis, add_axis, num_inputs, gc, dc):
assume(axis < ndim)
input_names = ['X0', 'X1', 'X2', 'X3'][:num_inputs]
shape = [2, 3, 5, 7][:ndim]
individual_dims = [1, 2, 3, 4, 5][:num_inputs]
inputs = []
for i in range(num_inputs):
if add_axis == 0:
# Sets a unique dim and create the input.
shape[axis] = individual_dims[i]
inputs.append(np.random.randn(*shape).astype(np.float32))
op = core.CreateOperator("Concat", input_names, ["Y", "Y_dims"],
axis=axis, add_axis=add_axis)
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(num_inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
# Reference
def depth_concat(*inputs):
inputs = list(inputs)
if add_axis:
for i in range(len(inputs)):
inputs[i] = np.expand_dims(inputs[i], axis)
input_dims = np.array([np.shape(x)[axis] for x in inputs])
return [np.concatenate(inputs, axis=axis), input_dims]
self.assertReferenceChecks(gc, op, inputs, depth_concat)
@given(num_inputs=st.integers(2, 4),
order=st.sampled_from([("NCHW", 1), ("NHWC", 3)]),
**hu.gcs)
@settings(deadline=10000)
def test_depth_concat_with_order(self, num_inputs, order, gc, dc):
input_names = ['X0', 'X1', 'X2', 'X3'][:num_inputs]
shape = [2, 3, 5, 7]
individual_dims = [1, 2, 3, 4][:num_inputs]
inputs = []
for i in range(num_inputs):
# Sets a unique dim and create the input.
shape[order[1]] = individual_dims[i]
inputs.append(np.random.rand(*shape).astype(np.float32))
op = core.CreateOperator("Concat", input_names, ["Y", "Y_dims"],
order=order[0])
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(num_inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
# Reference
def depth_concat_with_order(*inputs):
inputs = list(inputs)
axis = order[1]
input_dims = np.array([np.shape(x)[axis] for x in inputs])
return [np.concatenate(inputs, axis=axis), input_dims]
self.assertReferenceChecks(gc, op, inputs, depth_concat_with_order)
@given(X=hu.arrays(dims=[5, 2],
elements=hu.floats(
min_value=1.0,
max_value=10.0)
),
**hu.gcs_cpu_only)
@settings(deadline=1000)
def test_last_n_windows(self, X, gc, dc):
workspace.FeedBlob('input', X)
workspace.FeedBlob('next', np.array(0, dtype=np.int32))
workspace.CreateBlob('output')
collect_net = core.Net('collect_net')
collect_net.LastNWindowCollector(
['output', 'next', 'input'],
['output', 'next'],
num_to_collect=7,
)
plan = core.Plan('collect_data')
plan.AddStep(core.execution_step('collect_data',
[collect_net], num_iter=2))
workspace.RunPlan(plan)
output = workspace.FetchBlob('output')
inputs = workspace.FetchBlob('input')
new_output = np.zeros([7, inputs.shape[1]])
for i in range(inputs.shape[0] * 2):
new_output[i % 7] = inputs[i % inputs.shape[0]]
import numpy.testing as npt
npt.assert_almost_equal(output, new_output, decimal=5)
@given(dtype=st.sampled_from([np.float32, np.float64, np.int32, np.bool]))
@settings(deadline=1000)
def test_print(self, dtype):
data = np.random.permutation(6).astype(dtype)
self.ws.create_blob("data").feed(data)
op = core.CreateOperator("Print", "data", [])
self.ws.run(op)
@given(inputs=hu.tensors(n=2),
in_place=st.booleans(),
momentum=hu.floats(min_value=0.1, max_value=0.9),
nesterov=st.booleans(),
lr=hu.floats(min_value=0.1, max_value=0.9),
**hu.gcs)
@settings(deadline=10000)
def test_momentum_sgd(
self, inputs, in_place, momentum, nesterov, lr, gc, dc):
grad, m = inputs
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"MomentumSGD",
["grad", "m", "lr"],
["grad" if in_place else "grad_o",
"m" if in_place else "m_o"],
momentum=momentum,
nesterov=int(nesterov),
device_option=gc)
self.assertDeviceChecks(
dc, op, [grad, m, lr], [0])
# Reference
def momentum_sgd(grad, m, lr):
lr = lr[0]
if not nesterov:
adjusted_gradient = lr * grad + momentum * m
return (adjusted_gradient, adjusted_gradient)
else:
m_new = momentum * m + lr * grad
return ((1 + momentum) * m_new - momentum * m, m_new)
self.assertReferenceChecks(gc, op, [grad, m, lr], momentum_sgd)
@given(inputs=hu.tensors(n=3),
in_place=st.booleans(),
decay=hu.floats(min_value=0.1, max_value=0.9),
momentum=hu.floats(min_value=0.1, max_value=0.9),
lr=hu.floats(min_value=0.1, max_value=0.9),
epsilon=hu.floats(min_value=1e-5, max_value=1e-2),
**hu.gcs)
@settings(deadline=10000)
def test_rmsprop_sgd(self, inputs, in_place, decay, momentum, lr, epsilon,
gc, dc):
grad, ms, mom = inputs
ms = np.abs(ms) + 0.01
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"RmsProp",
["grad", "ms", "mom", "lr"],
["grad" if in_place else "grad_o",
"ms" if in_place else "ms_o",
"mom" if in_place else "mom_o"],
momentum=momentum, decay=decay, epsilon=epsilon, device_option=gc)
self.assertDeviceChecks(dc, op, [grad, ms, mom, lr], [0])
def rmsprop(grad, ms, mom, lr):
lr = lr[0]
ms_o = ms + (1. - decay) * (np.square(grad) - ms)
mom_o = momentum * mom + lr * grad / np.sqrt(epsilon + ms_o)
grad_o = mom_o
return (grad_o, ms_o, mom_o)
self.assertReferenceChecks(gc, op, [grad, ms, mom, lr], rmsprop)
# Reference
@staticmethod
def _dense_ftrl(alpha, beta, lambda1, lambda2, w, nz, g):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
n = np.take(nz, 0, axis=-1)
z = np.take(nz, 1, axis=-1)
# python port of Sigrid's implementation
g2 = g * g
sigma = (np.sqrt(n + g2) - np.sqrt(n)) / alpha
z += g - sigma * w
n += g2
w = (np.sign(z) * lambda1 - z) / (
(beta + np.sqrt(n)) / alpha + lambda2)
w[np.abs(z) <= lambda1] = 0
return (w, np.stack([n, z], axis=-1))
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=hu.floats(min_value=0.01, max_value=0.1),
beta=hu.floats(min_value=0.1, max_value=0.9),
lambda1=hu.floats(min_value=0.001, max_value=0.1),
lambda2=hu.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
@settings(deadline=1000)
def test_ftrl_sgd(self, inputs, in_place, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"Ftrl",
["var", "nz", "grad"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad],
partial(self._dense_ftrl, alpha, beta, lambda1, lambda2))
# Reference
@staticmethod
def _dense_gftrl(alpha, beta, lambda1, lambda2, w, nz, g):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
old_shape = g.shape
n = np.take(nz, 0, axis=-1)
z = np.take(nz, 1, axis=-1)
output_dim = g.shape[0]
w = w.reshape(output_dim, -1)
g = g.reshape(output_dim, -1)
n = n.reshape(output_dim, -1)
z = z.reshape(output_dim, -1)
input_dim = g.shape[1]
g2 = g * g
sigma = (np.sqrt(n + g2) - np.sqrt(n)) / alpha
z += g - sigma * w
n += g2
z_norms = np.linalg.norm(z, 2, axis=0)
z_norms = z_norms + 1e-6
w = z * ((lambda1 * np.sqrt(output_dim)) / z_norms - 1) / \
((beta + np.sqrt(n)) / alpha + lambda2)
for i in range(input_dim):
if z_norms[i] <= lambda1 * np.sqrt(output_dim):
w[:, i] = 0
w = w.reshape(old_shape)
n = n.reshape(old_shape)
z = z.reshape(old_shape)
return (w, np.stack([n, z], axis=-1))
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=hu.floats(min_value=0.01, max_value=0.1),
beta=hu.floats(min_value=0.1, max_value=0.9),
lambda1=hu.floats(min_value=0.001, max_value=0.1),
lambda2=hu.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_gftrl_sgd(self, inputs, in_place, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"GFtrl",
["var", "nz", "grad"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad],
partial(self._dense_gftrl, alpha, beta, lambda1, lambda2))
@given(inputs=hu.tensors(n=4),
alpha=hu.floats(min_value=0.01, max_value=0.1),
beta=hu.floats(min_value=0.1, max_value=0.9),
lambda1=hu.floats(min_value=0.001, max_value=0.1),
lambda2=hu.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_ftrl_sgd(self, inputs, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
# generate fake subset manually because hypothesis is too complicated :)
indices = np.arange(var.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"SparseFtrl",
["var", "nz", "indices", "grad"],
["var", "nz"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, indices, grad], [0])
# Reference
def ftrl(w, nz, i, g):
sw, snz = self._dense_ftrl(alpha, beta, lambda1, lambda2,
w[i], nz[i], g)
w[i] = sw
nz[i] = snz
return (w, nz)
self.assertReferenceChecks(gc, op, [var, nz, indices, grad], ftrl)
# Reference
@staticmethod
def _dense_ftrl_send_alpha_by_input(beta, lambda1, lambda2, w, nz, g, alpha):
return TestOperators._dense_ftrl(alpha, beta, lambda1, lambda2, w, nz,
g)
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=hu.floats(min_value=0.01, max_value=0.1),
beta=hu.floats(min_value=0.1, max_value=0.9),
lambda1=hu.floats(min_value=0.001, max_value=0.1),
lambda2=hu.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_ftrl_sgd_send_alpha_by_input(self, inputs, in_place, alpha, beta,
lambda1, lambda2, engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
alpha = np.array(alpha).astype(np.float32)
op = core.CreateOperator(
"Ftrl",
["var", "nz", "grad", "alpha"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad, alpha], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad, alpha],
partial(self._dense_ftrl_send_alpha_by_input, beta, lambda1, lambda2))
@given(inputs=hu.tensors(n=4),
alpha=hu.floats(min_value=0.01, max_value=0.1),
beta=hu.floats(min_value=0.1, max_value=0.9),
lambda1=hu.floats(min_value=0.001, max_value=0.1),
lambda2=hu.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_ftrl_sgd_send_alpha_by_input(self, inputs, alpha, beta,
lambda1, lambda2, engine, gc,
dc):
var, n, z, grad = inputs
# generate fake subset manually because hypothesis is too complicated :)
indices = np.arange(var.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
alpha = np.array(alpha).astype(np.float32)
op = core.CreateOperator(
"SparseFtrl",
["var", "nz", "indices", "grad", "alpha"],
["var", "nz"],
beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, indices, grad, alpha], [0])
# Reference
def ftrl(w, nz, i, g, alpha):
sw, snz = self._dense_ftrl_send_alpha_by_input(beta, lambda1,
lambda2, w[i], nz[i],
g, alpha)
w[i] = sw
nz[i] = snz
return (w, nz)
self.assertReferenceChecks(gc, op, [var, nz, indices, grad, alpha],
ftrl)
@given(input=hu.tensor(max_value=20,
max_dim=1,
dtype=np.int32,
elements=st.integers(min_value=0, max_value=10)),
with_remapping=st.booleans(),
**hu.gcs_no_hip)
@settings(deadline=10000)
def test_unique(self, input, with_remapping, gc, dc):
op = core.CreateOperator(
"Unique",
["input"],
["unique"] + (["remapping"] if with_remapping else []),
device_option=gc)
self.assertDeviceChecks(dc, op, [input], [0])
# Validator
def unique_valid(input, unique, remapping=None):
self.assertEqual(unique.size, len(set(input)))
self.assertEqual(sorted(unique), sorted(set(input)))
if with_remapping:
self.assertEqual(remapping.shape, input.shape)
remapped = [unique[remapping[i]] for i in range(len(input))]
np.testing.assert_array_equal(remapped, input)
self.assertValidationChecks(gc, op, [input], unique_valid)
@given(prediction=hu.arrays(dims=[10, 3],
elements=hu.floats(allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1)),
labels=hu.arrays(dims=[10],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=3 - 1)),
top_k=st.integers(min_value=1, max_value=3),
**hu.gcs)
@settings(deadline=1000)
def test_accuracy(self, prediction, labels, top_k, gc, dc):
if(top_k > 1):
gc = hu.cpu_do
op = core.CreateOperator(
"Accuracy",
["prediction", "labels"],
["accuracy"],
top_k=top_k,
device_option=gc
)
def op_ref(prediction, labels, top_k):
N = prediction.shape[0]
correct = 0
for i in range(0, len(prediction)):
pred_sorted = sorted(
([item, j] for j, item in enumerate(prediction[i])),
key=lambda x: x[0],
reverse=True
)
max_ids = [x[1] for x in pred_sorted[0:top_k]]
for m in max_ids:
if m == labels[i]:
correct += 1
accuracy = correct / N
return (accuracy,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[prediction, labels, top_k],
reference=op_ref)
@given(target_probabilities=hu.arrays(
dims=[10], elements=hu.floats(allow_nan=False,
allow_infinity=False,
min_value=0.01,
max_value=1)),
**hu.gcs)
@settings(deadline=1000)
def test_perplexity(self, target_probabilities, gc, dc):
op = core.CreateOperator(
"Perplexity",
["target_probabilities"],
["perplexity"]
)
def op_ref(target_probabilities):
N = target_probabilities.shape[0]
perplexities = np.power(target_probabilities, -1.0 / N)
perplexity = reduce(lambda x, y: x * y, perplexities)
return (perplexity,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[target_probabilities],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_lengths_to_segment_ids(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsToSegmentIds",
["lengths"],
["segment_ids"])
def op_ref(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_lengths_range_fill(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsRangeFill",
["lengths"],
["increasing_seq"])
def op_ref(lengths):
sids = []
for _, l in enumerate(lengths):
sids.extend(list(range(l)))
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_segment_ids_to_ranges(self, gc, dc):
lengths = [4, 6, 3, 2, 0, 4]
op = core.CreateOperator(
"SegmentIdsToRanges",
["segment_ids"],
["ranges"])
def op_ref(segment_ids):
ranges = [np.array([0, 0], dtype=np.int32)]
prev = 0
for i, sid in enumerate(segment_ids):
while sid != prev:
prev += 1
ranges.append(np.array([i, 0], dtype=np.int32))
ranges[-1][1] += 1
return (np.array(ranges, dtype=np.int32), )
def lengths_to_segment_ids(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=np.array(lengths_to_segment_ids(lengths), dtype=np.int32),
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_lengths_to_ranges(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsToRanges",
["lengths"],
["ranges"])
def op_ref(x):
if not x.size:
return (x.reshape((0, 2)), )
return (np.column_stack((np.concatenate(([0], np.cumsum(x)[:-1])),
x)), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(
lengths=st.lists(
st.integers(min_value=0, max_value=10), min_size=0, max_size=10
),
include_last_offset=st.booleans(),
**hu.gcs_cpu_only
)
@settings(deadline=None)
def test_lengths_to_offsets(self, lengths, include_last_offset, gc, dc):
op = core.CreateOperator(
"LengthsToOffsets",
["lengths"],
["ranges"],
include_last_offset=include_last_offset,
)
def op_ref(x):
if not x.size:
arr = [x.reshape(0)]
else:
arr = [np.concatenate(([0], np.cumsum(x)[:-1]))]
if include_last_offset:
arr[0] = np.concatenate((arr[0], np.array([np.sum(x)])))
return tuple(arr)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref,
)
@given(prediction=hu.arrays(dims=[10, 3],
elements=hu.floats(allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1)),
labels=hu.arrays(dims=[10],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=3 - 1)),
**hu.gcs)
@settings(deadline=10000)
def test_multi_class_accuracy(self, prediction, labels, gc, dc):
op = core.CreateOperator(
"MultiClassAccuracy",
["prediction", "labels"],
["accuracies", "amounts"]
)
def op_ref(prediction, labels):
N = prediction.shape[0]
D = prediction.shape[1]
accuracies = np.empty(D, dtype=float)
accuracies.fill(0)
amounts = np.empty(D, dtype=int)
amounts.fill(0)
max_ids = np.argmax(prediction, axis=1)
for i in range(0, N):
max_id = max_ids[i]
label_id = labels[i]
if max_id == label_id:
accuracies[label_id] += 1
amounts[label_id] += 1
for i in range(0, D):
amount = amounts[i]
if amount:
accuracies[i] /= amount
return (accuracies, amounts,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[prediction, labels],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_segment_ids_to_lengths(self, lengths, gc, dc):
op = core.CreateOperator(
"SegmentIdsToLengths",
["segment_ids"],
["lengths"])
def lengths_to_ids(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return sids
segment_ids = lengths_to_ids(lengths)
def ids_to_lengths(ids):
ids_length = len(ids)
if ids_length == 0:
return (np.array([], dtype=np.int32),)
lengths = []
# segment id starts with 0
prev_id = -1
tmp_length = 0
for idx in range(ids_length):
cur_id = ids[idx]
if cur_id != prev_id:
if idx != 0:
lengths.append(tmp_length)
while prev_id + 1 != cur_id:
lengths.append(0)
prev_id += 1
prev_id = cur_id
tmp_length = 0
tmp_length += 1
lengths.append(tmp_length)
return (np.array(lengths, dtype=np.int32),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(segment_ids, dtype=np.int32)],
reference=ids_to_lengths)
@given(lengths=st.lists(st.integers(min_value=1, max_value=10),
min_size=0,
max_size=10),
power=st.sampled_from([0.5, 1.0, 1.5, 2.0]),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_lengths_to_weights(self, lengths, power, gc, dc):
op = core.CreateOperator(
"LengthsToWeights",
["lengths"],
["weights"],
power=power)
def lengths_to_weights(lengths):
weighted_length = []
for l in lengths:
weighted_length.extend(l * [1 / pow(l, power)])
return (np.array(weighted_length, dtype=float),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=lengths_to_weights)
@given(input_tensor=hu.arrays(
dims=[10], elements=hu.floats(allow_nan=False,
allow_infinity=False)),
**hu.gcs)
@settings(deadline=10000)
def test_abs(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Abs",
["input"],
["output"]
)
def abs_ref(input_tensor):
return (np.abs(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=abs_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=hu.floats(min_value=-10,
max_value=10)),
**hu.gcs)
@settings(deadline=10000)
def test_cos(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Cos",
["input"],
["output"]
)
def cos_ref(input_tensor):
return (np.cos(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=cos_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=hu.floats(min_value=-10,
max_value=10)),
**hu.gcs)
@settings(deadline=1000)
def test_sin(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Sin",
["input"],
["output"]
)
def sin_ref(input_tensor):
return (np.sin(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=sin_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=hu.floats(allow_nan=False,
allow_infinity=False)),
**hu.gcs)
@settings(deadline=10000)
def test_exp(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Exp",
["input"],
["output"]
)
def exp_ref(input_tensor):
return (np.exp(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=exp_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=hu.floats(min_value=1,
max_value=10000)),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_log(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Log",
["input"],
["output"]
)
def log_ref(input_tensor):
return (np.log(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=log_ref)
self.assertGradientChecks(gc, op, [input_tensor], 0, [0])
def test_blobs_dequeue_timeout(self):
op = core.CreateOperator(
"CreateBlobsQueue",
[],
["queue"],
capacity=5,
num_blobs=1)
self.ws.run(op)
t = time.time()
op = core.CreateOperator(
"DequeueBlobs",
["queue"],
["out"],
timeout_secs=0.2)
self.assertRaises(RuntimeError, lambda: self.ws.run(op))
t = time.time() - t
self.assertGreater(t, 0.19)
@given(num_threads=st.integers(1, 10), # noqa
num_elements=st.integers(1, 100),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3),
do=st.sampled_from(hu.device_options))
@settings(deadline=10000)
def test_blobs_queue_threading(self, num_threads, num_elements,
capacity, num_blobs, do):
"""
- Construct matrices of size N x D
- Start K threads
- Push all N rows into the queue of capacity C
- Pull all N rows out of the queue.
- Verify that the output matrices are permutation of the rows of the
original matrices.
"""
import threading
import queue
op = core.CreateOperator(
"CreateBlobsQueue",
[],
["queue"],
capacity=capacity,
num_blobs=num_blobs,
device_option=do)
self.ws.run(op)
xs = [np.random.randn(num_elements, 5).astype(np.float32)
for _ in range(num_blobs)]
q = queue.Queue()
for i in range(num_elements):
q.put([x[i] for x in xs])
def enqueue(t):
while True:
feed_blobs = ["x_{}_{}".format(i, t) for i in range(num_blobs)]
op = core.CreateOperator(
"EnqueueBlobs",
["queue"] + feed_blobs,
feed_blobs,
device_option=do)
try:
elems = q.get_nowait()
for elem, feed_blob in zip(elems, feed_blobs):
self.ws.create_blob(feed_blob).feed(
elem, device_option=do)
self.ws.run(op)
except queue.Empty:
return
# Create all blobs before racing on multiple threads
# (blob creation is not threadsafe)
for t in range(num_threads):
for i in range(num_blobs):
self.ws.create_blob("x_{}_{}".format(i, t))
threads = [threading.Thread(target=enqueue, args=(t,))
for t in range(num_threads)]
for thread in threads:
thread.start()
for n in range(num_elements):
dequeue_blobs = ["y_{}_{}".format(i, n) for i in range(num_blobs)]
op = core.CreateOperator(
"DequeueBlobs",
["queue"],
dequeue_blobs,
device_option=do)
self.ws.run(op)
for thread in threads:
thread.join()
op = core.CreateOperator("CloseBlobsQueue", ["queue"], [])
self.ws.run(op)
ys = [np.vstack([self.ws.blobs["y_{}_{}".format(i, n)].fetch()
for n in range(num_elements)])
for i in range(num_blobs)]
for i in range(num_blobs):
self.assertEqual(ys[i].shape, xs[i].shape)
for j in range(num_elements):
# Verify that the rows of the returned blob are a
# permutation. The order may be different due to
# different threads racing.
self.assertTrue(
any(np.array_equal(xs[i][j], ys[i][k])
for k in range(num_elements)))
@given(num_producers=st.integers(1, 10),
num_consumers=st.integers(1, 10),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3),
do=st.sampled_from(hu.device_options))
@settings(deadline=None, max_examples=50)
def test_safe_blobs_queue(self, num_producers, num_consumers,
capacity, num_blobs, do):
init_net = core.Net('init_net')
queue = init_net.CreateBlobsQueue(
[], 1, capacity=capacity, num_blobs=num_blobs)
producer_steps = []
truth = 0
for i in range(num_producers):
name = 'producer_%d' % i
net = core.Net(name)
blobs = [net.ConstantFill([], 1, value=1.0, run_once=False)
for times in range(num_blobs)]
status = net.NextName()
net.SafeEnqueueBlobs([queue] + blobs, blobs + [status])
count = (i + 1) * 10
step = core.execution_step(name, net, num_iter=count)
truth += count
producer_steps.append(step)
producer_exit_net = core.Net('producer_exit_net')
producer_exit_net.CloseBlobsQueue([queue], 0)
producer_step = core.execution_step('producer', [
core.execution_step(
'producers', producer_steps, concurrent_substeps=True),
core.execution_step('producer_exit', producer_exit_net)]
)
consumer_steps = []
counters = []
const_1 = init_net.ConstantFill([], 1, value=1.0)
for i in range(num_consumers):
name = 'consumer_%d' % i
net1 = core.Net(name)
blobs = net1.SafeDequeueBlobs([queue], num_blobs + 1)
status = blobs[-1]
net2 = core.Net(name + '_counter')
counter = init_net.ConstantFill([], 1, value=0.0)
counters.append(counter)
net2.Add([counter, const_1], counter)
consumer_steps.append(core.execution_step(
name, [net1, net2], should_stop_blob=status))
consumer_step = core.execution_step(
'consumer', consumer_steps, concurrent_substeps=True)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [consumer_step, producer_step], concurrent_substeps=True)
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
v = 0
for counter in counters:
v += self.ws.blobs[str(counter)].fetch().tolist()
self.assertEqual(v, truth)
@given(num_queues=st.integers(1, 5),
num_iter=st.integers(5, 10),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3))
@settings(deadline=None, max_examples=50)
def test_weighted_sample_blobs_queue(
self, num_queues, num_iter, capacity, num_blobs
):
# Create BlobsQueue for each input queue
print("num_queues", num_queues)
init_net = core.Net('init_net')
queues = [
init_net.CreateBlobsQueue(
[], 1, capacity=capacity, num_blobs=num_blobs
) for _ in range(num_queues)
]
# Create multiple producer nets and one producer exist net
producer_steps = []
producer_exit_nets = []
for i in range(num_queues):
name = 'producer_%d' % i
net = core.Net(name)
blobs = [net.ConstantFill([], 1, value=1.0, run_once=False)
for _ in range(num_blobs)]
status = net.NextName()
net.SafeEnqueueBlobs([queues[i]] + blobs, blobs + [status])
exit_net = core.Net('producer_exit_%d' % i)
exit_net.CloseBlobsQueue(queues[i], 0)
producer_exit_nets.append(exit_net)
step = core.execution_step(
name, [
core.execution_step(
'producer_%d' % i, [net], num_iter=num_iter
),
core.execution_step('producer_exit_%d' % i, [exit_net]),
]
)
producer_steps.append(step)
producer_step = core.execution_step(
'producer', [
core.execution_step(
'producers',
producer_steps,
concurrent_substeps=True,
),
]
)
status_lst = []
def append(ins, outs):
status_lst.append(ins)
# Create one consumer dequeue net and one consumer exist net
consumer_net = core.Net('weight_sample_dequeue_net')
table_idx_blob = np.random.randint(low=-1, high=num_blobs, size=1)
blobs = consumer_net.WeightedSampleDequeueBlobs(
queues,
num_blobs + 1,
weights=np.random.uniform(low=0.0, high=1.0, size=(num_queues,)),
table_idx_blob=table_idx_blob[0],
)
status = blobs[-1]
consumer_net.Python(append)(status)
consumer_step = core.execution_step(
'consumer',
[
core.execution_step(
'consumer', [consumer_net], should_stop_blob=status
),
core.execution_step('producer_exit', producer_exit_nets)
]
)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [producer_step, consumer_step], concurrent_substeps=True)
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
assert len(status_lst) >= num_iter + 1
assert len(status_lst) <= num_iter * num_queues + 1
@given(
data=hu.tensor(),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_squeeze_expand_dims(self, data, gc, dc):
dims = [0, 0]
if len(data.shape) > 2:
dims.append(2)
op = core.CreateOperator(
"ExpandDims",
["data"],
["expanded"],
dims=dims)
def expand_dims_ref(data, *args, **kw):
inc_dims = list(set(dims))
inc_dims.sort()
r = data
for dim in inc_dims:
r = np.expand_dims(r, axis=dim)
return (r, )
def squeeze_ref(data, *args, **kw):
dec_dims = list(set(dims))
dec_dims.sort(reverse=True)
r = data
for dim in dec_dims:
r = np.squeeze(r, axis=dim)
return (r, )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data],
reference=expand_dims_ref,
output_to_grad='expanded',
grad_reference=squeeze_ref)
@given(**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_tt_layer(self, gc, dc):
seed = 1234
np.random.seed(seed)
inp_sizes = [2, 2, 2, 2]
out_sizes = [2, 2, 2, 2]
tt_ranks = [1, 3, 3, 3, 1]
op = core.CreateOperator(
"TT",
["X", "b", "cores"],
["Y"],
inp_sizes=inp_sizes,
out_sizes=out_sizes,
tt_ranks=tt_ranks,
)
X = np.expand_dims(
np.random.rand(16).astype(np.float32), axis=0)
b = np.array([0] * 16).astype(np.float32)
cores = tt_core.init_tt_cores(inp_sizes, out_sizes, tt_ranks)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("b").feed(b)
self.ws.create_blob("cores").feed(cores)
self.ws.run(op)
Y = self.ws.blobs[("Y")].fetch()
Y = Y.reshape([16])
golden = np.array([-9.51763490e-07, -1.28442286e-06,
-2.86281141e-07, 2.28865644e-07,
-1.96180017e-06, -1.78920531e-06,
9.31094666e-07, -2.04273989e-07,
1.70017107e-06, 1.64845711e-06,
-1.06099132e-06, -4.69111137e-07,
6.57552358e-08, -1.28942040e-08,
-2.29114004e-07, -1.04262714e-06])
# This golden array is dependent on the specified inp_sizes, out_sizes,
# tt_ranks, and seed. Changing these will cause the test to fail.
self.assertAlmostEqual(np.linalg.norm(golden - Y), 0, delta=1e-10)
@given(**hu.gcs_cpu_only)
def test_tt_sls_layer(self, gc, dc):
seed = 1234
np.random.seed(seed)
factor_voc = [10, 10, 10]
factor_width = [2, 2, 2]
op = core.CreateOperator(
"TTSparseLengthsSum",
["core0", "core1", "core2", "index", "lengths"],
["Y", "core0_output", "core1_output", "indices"],
factor_i=factor_voc,
factor_j=factor_width,
ranks=[1, 16, 16, 1],
emb_size=8
)
c0 = np.ones([10, 1, 2, 16]).astype(np.float32)
c1 = np.ones([10, 16, 2, 16]).astype(np.float32)
c2 = np.ones([10, 16, 2, 1]).astype(np.float32)
# index = np.array([0, 1, 2, 1, 4], dtype=np.int)
# lengths = np.array([3, 2], dtype=np.int)
index = np.array([0, 1, 2, 1, 4], np.int64)
lengths = np.array([3, 2], np.int32)
self.ws.create_blob("core0").feed(c0)
self.ws.create_blob("core1").feed(c1)
self.ws.create_blob("core2").feed(c2)
self.ws.create_blob("index").feed(index)
self.ws.create_blob("lengths").feed(lengths)
self.ws.run(op)
Y = self.ws.blobs[("Y")].fetch()
self.assertEqual(list(Y.shape), [2, 8])
golden = np.array([[768, 768, 768, 768, 768, 768, 768, 768],
[512, 512, 512, 512, 512, 512, 512, 512]])
self.assertAlmostEqual(np.linalg.norm(golden - Y), 0, delta=0)
@given(**hu.gcs_cpu_only)
def test_tt_sls_gradientop(self, gc, dc):
op = core.CreateOperator(
"TTSparseLengthsSumGradient",
["core0", "core1", "core2", "lengths",
"core0_out", "core1_out", "indices", "dY"],
["dCore0", "dCore1", "dCore2"]
)
c0 = np.ones([10, 1, 4, 16]).astype(np.float32)
c1 = np.ones([10, 16, 4, 16]).astype(np.float32)
c2 = np.ones([10, 16, 4, 1]).astype(np.float32)
lengths = np.array([3, 2], np.int32)
c0_out = np.ones([5, 4, 16]).astype(np.float32)
c1_out = np.ones([5, 16, 16]).astype(np.float32)
indices = np.array([[0, 0, 0],
[1, 0, 0],
[2, 0, 0],
[1, 0, 0],
[4, 0, 0]], np.int64)
dY = np.ones([2, 64]).astype(np.float32)
self.ws.create_blob("core0").feed(c0)
self.ws.create_blob("core1").feed(c1)
self.ws.create_blob("core2").feed(c2)
self.ws.create_blob("lengths").feed(lengths)
self.ws.create_blob("core0_out").feed(c0_out)
self.ws.create_blob("core1_out").feed(c1_out)
self.ws.create_blob("indices").feed(indices)
self.ws.create_blob("dY").feed(dY)
self.ws.run(op)
dCore0 = self.ws.blobs[("dCore0")].fetch()
dCore1 = self.ws.blobs[("dCore1")].fetch()
dCore2 = self.ws.blobs[("dCore2")].fetch()
self.assertEqual(list(dCore0.shape), list(c0.shape))
self.assertEqual(list(dCore1.shape), list(c1.shape))
self.assertEqual(list(dCore2.shape), list(c2.shape))
@given(**hu.gcs_cpu_only)
def test_tt_sls_gradientop1(self, gc, dc):
op = core.CreateOperator(
"TTSparseLengthsSumGradient",
["core0", "core1", "core2", "lengths",
"core0_out", "core1_out", "indices", "dY"],
["dCore0", "dCore1", "dCore2"]
)
c0 = np.ones([101, 1, 2, 16]).astype(np.float32)
c1 = np.ones([102, 16, 2, 16]).astype(np.float32)
c2 = np.ones([153, 16, 4, 1]).astype(np.float32)
lengths = np.array([3, 2], np.int32)
c0_out = np.ones([5, 2, 16]).astype(np.float32)
c1_out = np.ones([5, 4, 16]).astype(np.float32)
indices = np.array([[0, 0, 0],
[1, 0, 0],
[2, 0, 0],
[1, 0, 0],
[4, 0, 0]], np.int64)
dY = np.ones([2, 16]).astype(np.float32)
self.ws.create_blob("core0").feed(c0)
self.ws.create_blob("core1").feed(c1)
self.ws.create_blob("core2").feed(c2)
self.ws.create_blob("lengths").feed(lengths)
self.ws.create_blob("core0_out").feed(c0_out)
self.ws.create_blob("core1_out").feed(c1_out)
self.ws.create_blob("indices").feed(indices)
self.ws.create_blob("dY").feed(dY)
self.ws.run(op)
dCore0 = self.ws.blobs[("dCore0")].fetch()
dCore1 = self.ws.blobs[("dCore1")].fetch()
dCore2 = self.ws.blobs[("dCore2")].fetch()
self.assertEqual(list(dCore0.shape), list(c0.shape))
self.assertEqual(list(dCore1.shape), list(c1.shape))
self.assertEqual(list(dCore2.shape), list(c2.shape))
@given(**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_tt_sls(self, gc, dc):
factor_voc = [10, 10, 10]
factor_width = [2, 2, 2]
op = core.CreateOperator(
"TTSparseLengthsSum",
["core0", "core1", "core2", "index", "lengths"],
["Y", "core0_output", "core1_output", "indices"],
factor_i=factor_voc,
factor_j=factor_width,
ranks=[1, 16, 16, 1],
emb_size=8
)
c0 = np.ones([10, 1, 2, 16]).astype(np.float32)
c1 = np.ones([10, 16, 2, 16]).astype(np.float32)
c2 = np.ones([10, 16, 2, 1]).astype(np.float32)
index = np.array([0, 1, 2, 1, 4], np.int64)
lengths = np.array([0, 3, 0, 0, 2, 0, 0], np.int32)
self.assertGradientChecks(gc, op, [c0, c1, c2, index, lengths], 0, [0])
@given(**hu.gcs_cpu_only)
def test_tt_sls_repro(self, gc, dc):
factor_voc = [125, 160, 200]
factor_width = [4, 4, 4]
op = core.CreateOperator(
"TTSparseLengthsSum",
["core0", "core1", "core2", "index", "lengths"],
["Y", "core0_output", "core1_output", "indices"],
factor_i=factor_voc,
factor_j=factor_width,
ranks=[1, 16, 16, 1],
emb_size=64
)
c0 = np.ones([125, 1, 4, 16]).astype(np.float32)
c1 = np.ones([160, 16, 4, 16]).astype(np.float32)
c2 = np.ones([200, 16, 4, 1]).astype(np.float32)
index = np.array([0, 4000000 - 1, 20000, 1000000, 4000000 - 1], np.int64)
lengths = np.array([0, 3, 0, 0, 2, 0, 0], np.int32)
self.ws.create_blob("core0").feed(c0)
self.ws.create_blob("core1").feed(c1)
self.ws.create_blob("core2").feed(c2)
self.ws.create_blob("index").feed(index)
self.ws.create_blob("lengths").feed(lengths)
self.ws.run(op)
Y = self.ws.blobs[("Y")].fetch()
self.assertEqual(list(Y.shape), [7, 64])
golden = np.array([[0] * 64, [768] * 64, [0] * 64, [0] * 64, [512] * 64, [0] * 64, [0] * 64])
self.assertAlmostEqual(np.linalg.norm(golden - Y), 0, delta=0)
@given(**hu.gcs_cpu_only)
def test_tt_sls_gradientop2(self, gc, dc):
op = core.CreateOperator(
"TTSparseLengthsSumGradient",
["core0", "core1", "core2", "lengths",
"core0_out", "core1_out", "indices", "dY"],
["dCore0", "dCore1", "dCore2"]
)
c0 = np.ones([101, 1, 2, 16]).astype(np.float32)
c1 = np.ones([102, 16, 2, 16]).astype(np.float32)
c2 = np.ones([153, 16, 4, 1]).astype(np.float32)
lengths = np.array([0, 3, 0, 0, 2, 0, 0], np.int32)
c0_out = np.ones([5, 2, 16]).astype(np.float32)
c1_out = np.ones([5, 4, 16]).astype(np.float32)
indices = np.array([[0, 0, 0],
[1, 0, 0],
[2, 0, 0],
[1, 0, 0],
[4, 0, 0]], np.int64)
dY = np.ones([7, 16]).astype(np.float32)
self.ws.create_blob("core0").feed(c0)
self.ws.create_blob("core1").feed(c1)
self.ws.create_blob("core2").feed(c2)
self.ws.create_blob("lengths").feed(lengths)
self.ws.create_blob("core0_out").feed(c0_out)
self.ws.create_blob("core1_out").feed(c1_out)
self.ws.create_blob("indices").feed(indices)
self.ws.create_blob("dY").feed(dY)
self.ws.run(op)
dCore0 = self.ws.blobs[("dCore0")].fetch()
dCore1 = self.ws.blobs[("dCore1")].fetch()
dCore2 = self.ws.blobs[("dCore2")].fetch()
self.assertEqual(list(dCore0.shape), list(c0.shape))
self.assertEqual(list(dCore1.shape), list(c1.shape))
self.assertEqual(list(dCore2.shape), list(c2.shape))
@given(num_workers=st.integers(1, 10),
net_type=st.sampled_from(
["simple", "dag"] +
(["async_dag"] if workspace.has_gpu_support else [])),
**hu.gcs)
@settings(deadline=10000)
def test_dag_net_forking(self, net_type, num_workers, gc, dc):
from caffe2.python.model_helper import ModelHelper
from caffe2.python import brew
m = ModelHelper(name="test_model")
n = 10
d = 2
depth = 2
iters = 5
np.random.seed(1701)
# Build a binary tree of FC layers, summing at each node.
for i in reversed(range(depth)):
for j in range(2 ** i):
bottom_1 = "{}_{}".format(i + 1, 2 * j)
bottom_2 = "{}_{}".format(i + 1, 2 * j + 1)
mid_1 = "{}_{}_m".format(i + 1, 2 * j)
mid_2 = "{}_{}_m".format(i + 1, 2 * j + 1)
top = "{}_{}".format(i, j)
brew.fc(
m,
bottom_1, mid_1,
dim_in=d, dim_out=d,
weight_init=('ConstantFill', dict(value=np.random.randn())),
bias_init=('ConstantFill', dict(value=np.random.randn())))
brew.fc(
m,
bottom_2, mid_2,
dim_in=d, dim_out=d,
weight_init=('ConstantFill', dict(value=np.random.randn())),
bias_init=('ConstantFill', dict(value=np.random.randn())))
m.net.Sum([mid_1, mid_2], top)
m.net.SquaredL2Distance(["0_0", "label"], "xent")
m.net.AveragedLoss("xent", "loss")
input_to_grad = m.AddGradientOperators(["loss"])
m.Proto().device_option.CopyFrom(gc)
m.param_init_net.Proto().device_option.CopyFrom(gc)
m.Proto().type = net_type
m.Proto().num_workers = num_workers
self.ws.run(m.param_init_net)
print(str(m.Proto()))
def run():
import numpy as np
np.random.seed(1701)
input_blobs = ["{}_{}".format(depth, j) for j in range(2 ** depth)]
for input_blob in input_blobs:
self.ws.create_blob(input_blob).feed(
np.random.randn(n, d).astype(np.float32),
device_option=gc)
self.ws.create_blob("label").feed(
np.random.randn(n, d).astype(np.float32),
device_option=gc)
self.ws.run(m.net)
gradients = [
self.ws.blobs[str(input_to_grad[input_blob])].fetch()
for input_blob in input_blobs]
return gradients
outputs = [run() for _ in range(iters)]
for output in outputs[1:]:
np.testing.assert_array_equal(outputs[0], output)
self.assertAlmostEqual(np.sum(np.square(output)), 91.81752,
delta=1e-2)
@given(input=hu.tensor(min_dim=2, max_dim=6),
slice_dim=st.integers(),
a=st.integers(),
b=st.integers(),
is_empty=st.booleans(),
**hu.gcs_cpu_only)
@settings(deadline=None, max_examples=50)
def test_slice(self, input, slice_dim, a, b, is_empty, gc, dc):
slice_dim = slice_dim % len(input.shape)
if (is_empty):
input = np.random.rand(*([0] + list(input.shape))).astype(np.int32)
slice_dim += 1
a = a % input.shape[slice_dim]
b = b % input.shape[slice_dim] + 1
start_vec = np.zeros(len(input.shape), dtype=np.int32)
end_vec = np.ones(len(input.shape), dtype=np.int32) * -1
start_vec[slice_dim] = min(a, b)
end_vec[slice_dim] = max(a, b)
op = core.CreateOperator(
"Slice",
["input", "start", "end"],
["output"])
def slice_ref(x, s, e):
if len(s.shape) == 0:
return x
slc = [slice(si, None if ei == -1 else ei) for si, ei in zip(s, e)]
return (x[slc], )
self.assertReferenceChecks(gc, op, [input, start_vec, end_vec],
slice_ref)
self.assertGradientChecks(gc, op, [input, start_vec, end_vec], 0, [0])
@given(data=hu.tensor(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_shape(self, data, gc, dc):
op = core.CreateOperator("Shape", ["data"], ["shape"])
self.assertReferenceChecks(gc, op, [data], lambda x: (x.shape, ))
@given(data=hu.tensor(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_shape_with_axes(self, data, gc, dc):
def shape_ref(x, y):
return ([x.shape[i] for i in y],)
axes = np.random.randint(len(data.shape), size=10).tolist()
op = core.CreateOperator("Shape", ["data"], ["shape"], axes=axes)
self.assertReferenceChecks(gc, op, [data, axes], shape_ref)
@given(x=hu.tensor(), y=hu.tensor(), **hu.gcs_cpu_only)
@settings(deadline=1000)
def test_has_elements(self, x, y, gc, dc):
op = core.CreateOperator("HasElements", ["x", "y"], ["has_elements"])
self.assertReferenceChecks(gc, op, [x, y], lambda x, y: (len(x) > 0 or len(y) > 0, ))
op = core.CreateOperator("IsEmpty", ["x"], ["is_empty"])
self.assertReferenceChecks(gc, op, [x], lambda x: (len(x) == 0, ))
@given(initial_iters=st.integers(0, 100),
max_iters=st.integers(0, 100))
@settings(deadline=10000)
def test_should_stop_as_criteria_net_execution_step(
self, initial_iters, max_iters):
net = core.Net("net")
net.Iter(["iter"], ["iter"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
self.ws.create_blob("num_iters").feed(
np.asarray([max_iters]).astype(np.int64))
criteria_net = core.Net("criteria")
criteria_net.GE(["iter", "num_iters"], ["stop"])
criteria_net.Proto().external_output.extend(["stop"])
plan = core.Plan('plan')
plan.AddStep(core.execution_step(
'step', [criteria_net, net],
should_stop_blob=core.BlobReference("stop")))
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], max(initial_iters, max_iters))
def test_disabled_execution_step(self):
def createNets(i, disabled):
should_stop = 'should_stop_{}'.format(i)
output = 'output_{}'.format(i)
# init content and stop signal
init = core.Net("init_{}".format(i))
init.ConstantFill(
[],
[output],
shape=[1],
value=0.0
)
init.Cast([output], [should_stop], to='bool')
# decide if disabled or not
criterion = core.Net("criterion_{}".format(i))
tmp = criterion.ConstantFill(
[],
shape=[1],
value=1.0 if disabled else 0.0
)
criterion.Cast([tmp], [should_stop], to='bool')
criterion.Proto().external_output.extend([should_stop])
# the body net is just to turn a 0 blob to 1
net = core.Net("net_{}".format(i))
net.ConstantFill(
[],
[output],
shape=[1],
value=1.0
)
# always end the loop
ender = core.Net("ender_{}".format(i))
tmp = ender.ConstantFill(
[],
shape=[1],
value=1.0
)
ender.Cast([tmp], [should_stop], to='bool')
ender.Proto().external_output.extend([should_stop])
return [init, criterion, net, ender]
nets = [createNets(1, False),
createNets(2, True),
createNets(3, False)]
steps = [
core.execution_step(
'step_1', nets[0],
should_stop_blob=core.BlobReference('should_stop_1')),
core.execution_step(
'step_2', nets[1],
should_stop_blob=core.BlobReference('should_stop_2')),
core.execution_step('step_3', nets[2])
]
expected = [1.0, 0.0, 1.0]
plan = core.Plan('plan')
plan.AddStep(core.execution_step('all_steps', steps, num_iter=3))
self.ws.run(plan)
for i, _ in enumerate(nets):
self.assertEqual(
self.ws.blobs['output_{}'.format(i + 1)].fetch()[0],
expected[i])
@given(initial_iters=st.integers(0, 100),
num_iters=st.integers(0, 100))
@settings(deadline=10000)
def test_iter_count_with_execution_step(self, initial_iters, num_iters):
net = core.Net("net")
net.Iter(["iter"], ["iter"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
step = core.ExecutionStep("step", [net])
step.SetIter(num_iters)
plan = core.Plan("plan")
plan.AddStep(step)
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], initial_iters + num_iters)
@given(initial_iters=st.integers(0, 100),
num_iters=st.integers(0, 100),
num_nets=st.integers(0, 5))
@settings(deadline=None, max_examples=50)
def test_atomic_iter_with_concurrent_steps(self, initial_iters, num_iters,
num_nets):
init_net = core.Net("init_net")
iter_mutex = init_net.CreateMutex([], ["iter_mutex"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
concurrent_steps = core.ExecutionStep("concurrent_steps",
num_iter=num_iters)
for i in range(num_nets):
net = core.Net("net_{}".format(i))
net.AtomicIter([iter_mutex, "iter"], ["iter"])
step = core.ExecutionStep("step", [net])
concurrent_steps.AddSubstep(step)
concurrent_steps.SetConcurrentSubsteps(True)
plan = core.Plan("plan")
plan.AddStep(concurrent_steps)
stats_net = core.Net("stats_net")
stats_net.StatRegistryExport([], ["stats_key", "stats_val", "stats_ts"])
self.ws.run(init_net)
self.ws.run(plan)
self.ws.run(stats_net)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], initial_iters + num_iters * num_nets)
if num_iters * num_nets > 0:
stats_key = self.ws.blobs[("stats_key")].fetch()
atomic_iter_key = b'atomic_iter/stats/iter/num_iter'
self.assertTrue(atomic_iter_key in stats_key)
stat_val = self.ws.blobs[("stats_val")].fetch()
self.assertEqual(num_iters * num_nets, stat_val[list(stats_key).index(atomic_iter_key)])
@given(a=hu.tensor(),
src=st.sampled_from(list(viewkeys(_NUMPY_TYPE_TO_ENUM))),
dst=st.sampled_from(list(viewkeys(_NUMPY_TYPE_TO_ENUM))),
use_name=st.booleans(),
**hu.gcs)
@settings(deadline=1000)
def test_cast(self, a, src, dst, use_name, gc, dc):
a = a.astype(src)
# Casting from a float type outside the range of the integral
# type is UB.
ftypes = [np.float32, np.float64]
if src in ftypes and dst not in ftypes and dst is not np.bool:
info = np.iinfo(dst)
a = np.clip(a, info.min, info.max)
def ref(data):
return [data.astype(dst)]
to = _NUMPY_TYPE_TO_ENUM[dst]
if use_name:
to = caffe2_pb2.TensorProto.DataType.Name(to).lower()
op = core.CreateOperator('Cast', ["X"], ["Y"], to=to)
self.assertDeviceChecks(dc, op, [a], [0])
out, = self.assertReferenceChecks(gc, op, [a], ref)
self.assertEqual(dst, out.dtype)
@given(a=hu.tensor(),
eps=hu.floats(min_value=1e-4, max_value=1e-2),
a_grad=hu.tensor(elements=hu.floats(min_value=0.01, max_value=0.99)),
eps_grad=hu.floats(min_value=1e-4, max_value=1e-3),
**hu.gcs)
@settings(deadline=10000)
def test_logit(self, a, eps, a_grad, eps_grad, gc, dc):
def ref(data):
data = np.clip(data, eps, 1.0 - eps)
return (np.log(data / (1 - data)), )
# forward testing carried out in the full range of input
# to ensure original test coverage.
# gradient test carried out with reduced input range
# because the sharp increase of the logit curve at 0 and 1
# error increases dramtically when input is close to 0 or 1
# and it will fail the test.
# So we only run gradient test in the range of (0.01, 0.99)
# very occasionally, test may fail due to random accumulated error
# reduce test range to (0.02, 0.98) will improve test stability
op = core.CreateOperator('Logit', ["X"], ["Y"], eps=eps)
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)
op_grad = core.CreateOperator('Logit', ["X"], ["Y"], eps=eps_grad)
self.assertGradientChecks(gc, op_grad, [a_grad], 0, [0],
threshold=0.04, stepsize=2e-3)
@given(a=hu.tensor(elements=hu.floats(allow_nan=True)),
value=hu.floats(min_value=-10, max_value=10),
**hu.gcs)
@settings(deadline=1000)
def test_replace_nan(self, a, value, gc, dc):
def ref(data):
out = np.copy(data)
out[np.isnan(data)] = value
return (out, )
op = core.CreateOperator('ReplaceNaN', ["X"], ["Y"], value=value)
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)
@given(data=_dtypes(dtypes=[np.int32, np.int64, np.float32, np.bool]).
flatmap(lambda dtype: hu.tensor(
min_dim=1, dtype=dtype, elements=hu.elements_of_type(dtype))),
has_input=st.booleans(),
has_extra_shape=st.booleans(),
extra_shape=st.lists(
min_size=1, max_size=5, elements=st.integers(1, 5)),
**hu.gcs)
@settings(deadline=10000)
def test_constant_fill(self, data, has_input, has_extra_shape, extra_shape,
gc, dc):
dtype = data.dtype.type
# in opt mode, np.bool is converted into np.bool_
if data.dtype == np.dtype(np.bool):
dtype = np.bool
value = data.item(0)
gt_shape = data.shape
inputs = [data]
enum_type = _NUMPY_TYPE_TO_ENUM[dtype]
if has_input:
if has_extra_shape:
op = core.CreateOperator('ConstantFill', ["X"], ["Y"],
dtype=enum_type,
extra_shape=extra_shape,
value=value)
gt_shape += tuple(extra_shape)
else:
op = core.CreateOperator('ConstantFill', ["X"], ["Y"],
dtype=enum_type,
value=value)
else:
op = core.CreateOperator('ConstantFill', [], ["Y"],
dtype=enum_type,
value=value,
shape=list(gt_shape))
inputs = []
def ref(inputs=None):
outputs = np.full(shape=gt_shape, fill_value=value, dtype=dtype)
return [outputs]
self.assertDeviceChecks(dc, op, inputs, [0])
out, = self.assertReferenceChecks(gc, op, inputs, ref)
self.assertEqual(dtype, out.dtype)
@given(data=_dtypes(dtypes=[np.int32, np.int64, np.float32, np.bool]).
flatmap(lambda dtype: hu.tensor(
min_dim=1, dtype=dtype, elements=hu.elements_of_type(dtype))),
**hu.gcs)
@settings(deadline=1000)
def test_constant_fill_from_tensor(self, data, gc, dc):
dtype = data.dtype.type
if data.dtype == np.dtype(np.bool):
dtype = np.bool
value = np.array([data.item(0)], dtype=dtype)
inputs = [data, value]
enum_type = _NUMPY_TYPE_TO_ENUM[dtype]
op = core.CreateOperator(
'ConstantFill',
["X", "V"],
["Y"],
dtype=enum_type,
)
def ref(x, v):
outputs = np.full(shape=data.shape, fill_value=value[0], dtype=dtype)
return [outputs]
self.assertDeviceChecks(dc, op, inputs, [0])
out, = self.assertReferenceChecks(gc, op, inputs, ref)
self.assertEqual(dtype, out.dtype)
@given(t=st.integers(1, 5),
n=st.integers(1, 5),
d=st.integers(1, 5))
@settings(deadline=10000)
def test_elman_recurrent_network(self, t, n, d):
from caffe2.python import model_helper, brew
np.random.seed(1701)
step_net = model_helper.ModelHelper(name="Elman")
# TODO: name scope external inputs and outputs
step_net.Proto().external_input.extend(
["input_t", "seq_lengths", "timestep",
"hidden_t_prev", "gates_t_w", "gates_t_b"])
step_net.Proto().type = "simple"
step_net.Proto().external_output.extend(["hidden_t", "gates_t"])
brew.fc(step_net,
"hidden_t_prev", "gates_t", dim_in=d, dim_out=d, axis=2)
step_net.net.Sum(["gates_t", "input_t"], ["gates_t"])
step_net.net.Sigmoid(["gates_t"], ["hidden_t"])
# Initialize params for step net in the parent net
for op in step_net.param_init_net.Proto().op:
workspace.RunOperatorOnce(op)
backward_ops, backward_mapping = core.GradientRegistry.GetBackwardPass(
step_net.Proto().op, {"hidden_t": "hidden_t_grad"})
backward_mapping = {
str(k): str(v) for k, v in viewitems(backward_mapping)
}
backward_step_net = core.Net("ElmanBackward")
del backward_step_net.Proto().op[:]
backward_step_net.Proto().op.extend(backward_ops)
assert backward_mapping["input_t"] == "gates_t_grad"
links = [
("hidden_t_prev", "hidden", 0),
("hidden_t", "hidden", 1),
("input_t", "input", 0),
]
link_internal, link_external, link_offset = zip(*links)
backward_links = [
("hidden_t_prev_grad", "hidden_grad", 0),
("hidden_t_grad", "hidden_grad", 1),
("gates_t_grad", "input_grad", 0),
]
backward_link_internal, backward_link_external, backward_link_offset = \
zip(*backward_links)
backward_step_net.Proto().external_input.extend(["hidden_t_grad"])
backward_step_net.Proto().external_input.extend(
step_net.Proto().external_input)
backward_step_net.Proto().external_input.extend(
step_net.Proto().external_output)
inputs = ["input", "seq_lengths", "gates_t_w", "gates_t_b", "hidden_input"]
recurrent_inputs = ["hidden_input"]
op = core.CreateOperator(
"RecurrentNetwork",
inputs,
["output", "hidden", "hidden_output", "step_workspaces"],
alias_src=["hidden", "hidden"],
alias_dst=["output", "hidden_output"],
alias_offset=[1, -1],
recurrent_states=["hidden"],
initial_recurrent_state_ids=[
inputs.index(i) for i in recurrent_inputs
],
link_internal=link_internal,
link_external=link_external,
link_offset=link_offset,
backward_link_internal=backward_link_internal,
backward_link_external=backward_link_external,
backward_link_offset=backward_link_offset,
param=[inputs.index(p) for p in step_net.params],
step_net=step_net.Proto(),
backward_step_net=backward_step_net.Proto(),
outputs_with_grads=[0],
)
workspace.FeedBlob(
"input", np.random.randn(t, n, d).astype(np.float32))
workspace.FeedBlob(
"hidden_input", np.random.randn(1, n, d).astype(np.float32))
workspace.FeedBlob(
"seq_lengths", np.random.randint(0, t, size=(n,)).astype(np.int32))
def reference(input, seq_lengths, gates_w, gates_b, hidden_input):
T = input.shape[0]
N = input.shape[1]
D = input.shape[2]
hidden = np.zeros(shape=(T + 1, N, D))
assert hidden.shape[0] == T + 1
assert hidden.shape[1] == N
assert hidden.shape[2] == D
hidden[0, :, :] = hidden_input
for t in range(T):
input_t = input[t].reshape(1, N, D)
hidden_t_prev = hidden[t].reshape(1, N, D)
gates = np.dot(hidden_t_prev, gates_w.T)
gates = gates.reshape(1, N, D) + input_t.reshape(1, N, D)
hidden[t + 1] = sigmoid(gates)
return hidden[1:], hidden, hidden[-1].reshape(1, N, D)
self.assertReferenceChecks(
hu.cpu_do,
op,
[workspace.FetchBlob(name)
for name in ["input", "seq_lengths", "gates_t_w", "gates_t_b",
"hidden_input"]],
reference,
outputs_to_check=[0, 1, 2])
for param in [0, 2, 3]:
self.assertGradientChecks(
hu.cpu_do,
op,
[workspace.FetchBlob(name)
for name in ["input", "seq_lengths", "gates_t_w", "gates_t_b",
"hidden_input"]],
param,
[0])
@settings(suppress_health_check=[HealthCheck.filter_too_much], deadline=10000)
@given(n=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
pad=st.integers(0, 2),
block_size=st.integers(2, 3),
**hu.gcs)
def test_space_to_batch(self, n, c, h, w, pad, block_size, gc, dc):
assume((h + 2 * pad) % block_size == 0)
assume((w + 2 * pad) % block_size == 0)
X = np.random.randn(n, c, h, w).astype(np.float32)
op = core.CreateOperator("SpaceToBatch", ["X"], ["Y"],
pad=pad, block_size=block_size)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@settings(suppress_health_check=[HealthCheck.filter_too_much], deadline=10000)
@given(n=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
pad=st.integers(0, 2),
block_size=st.integers(2, 3),
**hu.gcs)
def test_batch_to_space(self, n, c, h, w, pad, block_size, gc, dc):
assume((h + 2 * pad) % block_size == 0)
assume((w + 2 * pad) % block_size == 0)
X = np.random.randn(
n * block_size * block_size,
c,
(h + 2 * pad) // block_size,
(w + 2 * pad) // block_size).astype(np.float32)
op = core.CreateOperator("BatchToSpace", ["X"], ["Y"],
pad=pad, block_size=block_size)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(),
in_place=st.booleans(),
scale=hu.floats(min_value=-2.0, max_value=2.0),
**hu.gcs)
@settings(deadline=10000)
def test_scale(self, X, in_place, scale, gc, dc):
op = core.CreateOperator(
"Scale", ["X"], ["Y" if not in_place else "X"],
scale=scale)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(s=st.text())
def test_string_serde(self, s):
s = s.encode('ascii', 'ignore')
self.ws.create_blob("a").feed(s)
serialized = self.ws.blobs["a"].serialize("a")
self.ws.create_blob("b").deserialize(serialized)
self.assertEqual(s, self.ws.blobs[("a")].fetch())
self.assertEqual(s, self.ws.blobs[("b")].fetch())
@given(pad=st.integers(0, 3),
size=st.integers(1, 10),
input_channels=st.integers(1, 5),
batch_size=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
mode=st.sampled_from(["constant", "reflect", "edge"]),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_same_pad_image(self, pad, size, input_channels, batch_size, order,
mode, gc, dc):
assume(size > pad)
op = core.CreateOperator(
"PadImage",
["X"],
["Y"],
pad=pad,
mode=mode,
order=order,
)
if order == "NHWC":
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (pad, pad), (pad, pad), (0, 0)), mode),)
else:
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode),)
self.assertReferenceChecks(gc, op, [X], numpy_pad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(pad_t=st.integers(0, 3),
pad_l=st.integers(0, 3),
pad_b=st.integers(0, 3),
pad_r=st.integers(0, 3),
size=st.integers(1, 10),
input_channels=st.integers(1, 5),
batch_size=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
mode=st.sampled_from(["constant", "reflect", "edge"]),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_pad_image(self, pad_t, pad_l, pad_b, pad_r, size, input_channels,
batch_size, order, mode, gc, dc):
assume(size > max(pad_b, pad_r, pad_t, pad_l))
op = core.CreateOperator(
"PadImage",
["X"],
["Y"],
pad_t=pad_t,
pad_l=pad_l,
pad_b=pad_b,
pad_r=pad_r,
mode=mode,
order=order,
)
if order == "NHWC":
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (pad_t, pad_b), (pad_l, pad_r), (0, 0)),
mode),)
else:
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)),
mode),)
self.assertReferenceChecks(gc, op, [X], numpy_pad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=hu.floats(min_value=1e-4, max_value=1e-2),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_instance_norm(self, size, input_channels, batch_size, order,
epsilon, gc, dc):
op = core.CreateOperator(
"InstanceNorm",
["X", "scale", "bias"],
["Y"],
order=order,
epsilon=epsilon,
)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
def ref_nchw(x, scale, bias):
x = x.reshape(batch_size * input_channels, size * size)
y = (x - x.mean(1)[:, np.newaxis])
y /= np.sqrt(x.var(1) + epsilon)[:, np.newaxis]
y = y.reshape(batch_size, input_channels, size, size)
y = y * scale.reshape(1, input_channels, 1, 1)
y = y + bias.reshape(1, input_channels, 1, 1)
return (y, )
def ref_nhwc(x, scale, bias):
x = x.swapaxes(2, 3).swapaxes(1, 2)
y = ref_nchw(x, scale, bias)[0]
return (y.swapaxes(1, 2).swapaxes(2, 3), )
self.assertReferenceChecks(
gc, op, [X, scale, bias],
ref_nchw if order == "NCHW" else ref_nhwc)
# TODO(jiayq): when there are backward and GPU implementations, enable
# these two.
# self.assertDeviceChecks(dc, op, [X, scale, bias], [0])
# self.assertGradientChecks(gc, op, [X, scale, bias], 0, [0])
ws = workspace.C.Workspace()
feeds = [("X", X), ("scale", scale), ("bias", bias)]
for blob, arr in feeds:
ws.create_blob(blob).feed(arr)
for _ in range(100):
ws.run(op)
for blob, arr in feeds:
np.testing.assert_array_equal(ws.blobs[blob].fetch(), arr)
@given(inp=_dtypes().flatmap(lambda dt: _tensor_and_indices(
elements=hu.elements_of_type(dt), dtype=dt)),
**hu.gcs)
@settings(deadline=10000)
def test_sparse_to_dense(self, inp, gc, dc):
first_dim, X, I = inp
if X.dtype != np.dtype('float32') and gc.device_type in {caffe2_pb2.CUDA, caffe2_pb2.HIP} :
# Cuda only support 32 bit float
print("Bailout {}".format(X.dtype))
return
if gc.device_type in {caffe2_pb2.CUDA, caffe2_pb2.HIP}:
# Cuda version only support int32
I = I.astype(np.int32)
if X.dtype in (np.dtype('int64'), np.dtype('int32')):
assume((np.abs(X.ravel()).max() < np.iinfo('int32').max).all())
assume(np.abs(X.ravel()).astype(np.int64).sum() < np.iinfo('int32').max)
# values don't matter
D = np.zeros((first_dim,) + X.shape[1:]).astype(X.dtype)
op = core.CreateOperator("SparseToDense", ["I", "X", "D"], ["Y"])
op_noshapeinfer = core.CreateOperator("SparseToDense", ["I", "X"], ["Y"])
def sparse_to_dense(I, X, D):
O = np.zeros(D.shape, dtype=X.dtype)
for i, p in enumerate(I):
O[p] += X[i]
return [O]
def sparse_to_dense_noshapeinfer(I, X):
O = np.zeros((np.max(I) + 1,) + X.shape[1:], dtype=X.dtype)
for i, p in enumerate(I):
O[p] += X[i]
return [O]
self.assertReferenceChecks(gc, op, [I, X, D], sparse_to_dense)
self.assertReferenceChecks(gc, op_noshapeinfer, [I, X], sparse_to_dense_noshapeinfer)
if X.dtype == np.float32:
self.assertGradientChecks(gc, op, [I, X, D], 1, [0])
@given(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_dot_product(self, inputs, gc, dc):
X, Y = inputs
op = core.CreateOperator("DotProduct", ["X", "Y"], 'out')
def dotproduct(X, Y):
return (np.sum(X * Y, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10),
K=st.integers(min_value=2, max_value=10),
pad_value=hu.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_dot_product_with_padding(self, N, M, K, pad_value, gc, dc):
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
pad_value=pad_value)
def dotproduct(X, Y):
Z = np.ones((N, max(M, K))).astype(np.float32) * pad_value
if M < K:
Z[:, :M] = X
return (np.sum(Z * Y, axis=1), )
else:
Z[:, :K] = Y
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10),
pad_value=hu.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_dot_product_with_rep_padding(self, N, M, pad_value, gc, dc):
K = 2 * M
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
replicate=True,
pad_value=pad_value)
def dotproduct(X, Y):
import numpy.matlib as npm
if M < K:
Z = npm.repmat(X, 1, K // M)
return (np.sum(Z * Y, axis=1), )
else:
Z = npm.repmat(Y, 1, M // K)
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_ensure_dense(self, N, M, gc, dc):
# in place
X = np.random.rand(N, M).astype(np.float32) - 0.5
op = core.CreateOperator("EnsureDense", ["X"], "X")
self.assertReferenceChecks(gc, op, [X], lambda x: [x])
self.assertDeviceChecks(dc, op, [X], [0])
# or not
X = np.random.rand(N, M).astype(np.float32) - 0.5
op = core.CreateOperator("EnsureDense", ["X"], "out")
self.assertReferenceChecks(gc, op, [X], lambda x: [x])
self.assertDeviceChecks(dc, op, [X], [0])
@given(N=st.integers(min_value=10, max_value=100),
M=st.integers(min_value=2, max_value=10),
num_buckets=st.integers(min_value=1, max_value=5),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_accumulate_histogram_op(self, N, M, num_buckets, gc, dc):
X = np.random.rand(N, M).astype(np.float32)
lower_bound, upper_bound = 0.1, 0.9
op = core.CreateOperator("AccumulateHistogram", ["X"],
['cur_hist', 'acc_hist'],
lower_bound=lower_bound,
upper_bound=upper_bound,
num_buckets=num_buckets)
def histogram(X):
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Y = np.zeros((N, M), dtype=np.int32)
Y[X < lower_bound] = 0
Y[X >= upper_bound] = num_buckets + 1
Y[(X >= lower_bound) & (X < upper_bound)] = \
((X[(X >= lower_bound) & (X < upper_bound)] - lower_bound) /
segment + 1).astype(np.int32)
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
hist[Y[i][j]] += 1
cur_hist, acc_hist = hist, hist
return [cur_hist, acc_hist]
self.assertDeviceChecks(dc, op, [X], [0, 1])
self.assertReferenceChecks(gc, op, [X], histogram)
@settings(max_examples=1, deadline=None)
@given(
queue_capacity=st.integers(2, 2),
time_sleep=st.integers(5, 10),
num_blobs_to_equeue=st.integers(1, 1),
num_blobs_to_dequeue=st.integers(2, 2),
)
def test_safe_dequeue_blob__raises_exception_when_hang(
self,
queue_capacity,
time_sleep,
num_blobs_to_equeue,
num_blobs_to_dequeue,
):
r"""
Tests SafeDequeueBlobsOp being cancellable.
Create a queue with the number of BlobsQueue less than the number
SafeDequeueBlobs to cause the hanging behavior when running the Net.
Then call cancel from the previous sleeping thread to ensure exception
is raised.
"""
def _net_instance_cancel(net_instance):
time.sleep(time_sleep)
net_instance.cancel()
init_net = core.Net("init_net")
init_net.Proto().type = "async_scheduling"
queue = init_net.CreateBlobsQueue(
[],
"queue_name",
capacity=queue_capacity,
num_blobs=num_blobs_to_equeue,
)
ws = workspace.Workspace()
ws.create_net(init_net).run()
net = core.Net("net")
net.Proto().type = "async_scheduling"
blobs = net.SafeDequeueBlobs([queue], num_blobs_to_dequeue)
net_instance = ws.create_net(net)
t = threading.Thread(target=_net_instance_cancel, args=[net_instance])
t.start()
with self.assertRaises(Exception):
net_instance.run()
t.join()
if __name__ == "__main__":
unittest.main()
|
ut_launch.py | import socket
import random
import os
import pathlib
from datetime import datetime
from billiard import Process
from ament_index_python.packages import get_package_prefix
from launch import LaunchService, LaunchDescription
from launch.actions.execute_process import ExecuteProcess
from launch_ros.actions import Node
import gym_gazebo2
from gym_gazebo2.utils import ut_generic
def startLaunchServiceProcess(launchDesc):
"""Starts a Launch Service process. To be called from subclasses.
Args:
launchDesc : LaunchDescription obj.
"""
# Create the LauchService and feed the LaunchDescription obj. to it.
launchService = LaunchService()
launchService.include_launch_description(launchDesc)
process = Process(target=launchService.run)
#The daemon process is terminated automatically before the main program exits,
# to avoid leaving orphaned processes running
process.daemon = True
process.start()
return process
def isPortInUse(port):
"""Checks if the given port is being used.
Args:
port(int): Port number.
Returns:
bool: True if the port is being used, False otherwise.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as socket1:
return socket1.connect_ex(('localhost', port)) == 0
def getExclusiveNetworkParameters():
"""Creates appropriate values for ROS_DOMAIN_ID and GAZEBO_MASTER_URI.
Returns:
Dictionary {ros_domain_id (string), ros_domain_id (string)}
"""
randomPortROS = random.randint(0, 230)
randomPortGazebo = random.randint(10000, 15000)
while isPortInUse(randomPortROS):
print("Randomly selected port is already in use, retrying.")
randomPortROS = random.randint(0, 230)
while isPortInUse(randomPortGazebo):
print("Randomly selected port is already in use, retrying.")
randomPortGazebo = random.randint(10000, 15000)
# Save network segmentation related information in a temporary folder.
tempPath = '/tmp/gym-gazebo-2/running/'
pathlib.Path(tempPath).mkdir(parents=True, exist_ok=True)
# Remove old tmp files.
ut_generic.cleanOldFiles(tempPath, ".log", 2)
filename = datetime.now().strftime('running_since_%H_%M__%d_%m_%Y.log')
file = open(tempPath + '/' + filename, 'w+')
file.write(filename + '\nROS_DOMAIN_ID=' + str(randomPortROS) \
+ '\nGAZEBO_MASTER_URI=http://localhost:' + str(randomPortGazebo))
file.close()
return {'ros_domain_id':str(randomPortROS),
'gazebo_master_uri':"http://localhost:" + str(randomPortGazebo)}
def generateLaunchDescriptionMara(gzclient, realSpeed, multiInstance, port, urdf):
"""
Returns ROS2 LaunchDescription object.
Args:
realSpeed: bool True if RTF must be set to 1, False if RTF must be set to maximum.
"""
installDir = get_package_prefix('mara_gazebo_plugins')
if 'GAZEBO_MODEL_PATH' in os.environ:
os.environ['GAZEBO_MODEL_PATH'] = os.environ['GAZEBO_MODEL_PATH'] + ':' + installDir \
+ '/share'
else:
os.environ['GAZEBO_MODEL_PATH'] = installDir + "/share"
if 'GAZEBO_PLUGIN_PATH' in os.environ:
os.environ['GAZEBO_PLUGIN_PATH'] = os.environ['GAZEBO_PLUGIN_PATH'] + ':' + installDir \
+ '/lib'
else:
os.environ['GAZEBO_PLUGIN_PATH'] = installDir + '/lib'
if port != 11345: # Default gazebo port
os.environ["ROS_DOMAIN_ID"] = str(port)
os.environ["GAZEBO_MASTER_URI"] = "http://localhost:" + str(port)
print("******* Manual network segmentation *******")
print("ROS_DOMAIN_ID=" + os.environ['ROS_DOMAIN_ID'])
print("GAZEBO_MASTER_URI=" + os.environ['GAZEBO_MASTER_URI'])
print("")
elif multiInstance:
# Exclusive network segmentation, which allows to launch multiple instances of ROS2+Gazebo
networkParams = getExclusiveNetworkParameters()
os.environ["ROS_DOMAIN_ID"] = networkParams.get('ros_domain_id')
os.environ["GAZEBO_MASTER_URI"] = networkParams.get('gazebo_master_uri')
print("******* Exclusive network segmentation *******")
print("ROS_DOMAIN_ID=" + networkParams.get('ros_domain_id'))
print("GAZEBO_MASTER_URI=" + networkParams.get('gazebo_master_uri'))
print("")
try:
envs = {}
for key in os.environ.__dict__["_data"]:
key = key.decode("utf-8")
if key.isupper():
envs[key] = os.environ[key]
except BaseException as exception:
print("Error with Envs: " + str(exception))
return None
# Gazebo visual interfaze. GUI/no GUI options.
if gzclient:
gazeboCmd = "gazebo"
else:
gazeboCmd = "gzserver"
# Creation of ROS2 LaunchDescription obj.
if realSpeed:
worldPath = os.path.join(os.path.dirname(gym_gazebo2.__file__), 'worlds',
'empty.world')
else:
worldPath = os.path.join(os.path.dirname(gym_gazebo2.__file__), 'worlds',
'empty_speed_up.world')
launchDesc = LaunchDescription([
ExecuteProcess(
cmd=[gazeboCmd, '-s', 'libgazebo_ros_factory.so', '-s',
'libgazebo_ros_init.so', worldPath], output='screen', env=envs),
Node(package='mara_utils_scripts', node_executable='spawn_mara.py',
arguments=[urdf],
output='screen'),
Node(package='hros_cognition_mara_components',
node_executable='hros_cognition_mara_components', output='screen',
arguments=["-motors", installDir \
+ "/share/hros_cognition_mara_components/motors.yaml", "sim"]),
Node(package='mara_contact_publisher', node_executable='mara_contact_publisher',
output='screen')
])
return launchDesc
def launchReal():
#TODO: it is hard-coded
os.environ["ROS_DOMAIN_ID"] = str(1)
os.environ["RMW_IMPLEMENTATION"] = "rmw_fastrtps_cpp"
installDir = get_package_prefix('mara_gazebo_plugins')
launchDesc = LaunchDescription([
Node(package='hros_cognition_mara_components',
node_executable='hros_cognition_mara_components',
arguments=["-motors", installDir \
+ "/share/hros_cognition_mara_components/motors.yaml", "real"], output='screen')
])
return launchDesc
|
runner.py | #!/usr/bin/env python2
# Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""This is the Emscripten test runner. To run some tests, specify which tests
you want, for example
python tests/runner.py asm1.test_hello_world
There are many options for which tests to run and how to run them. For details,
see
http://kripken.github.io/emscripten-site/docs/getting_started/test-suite.html
"""
# XXX Use EMTEST_ALL_ENGINES=1 in the env to test all engines!
from __future__ import print_function
from subprocess import Popen, PIPE, STDOUT
import argparse
import atexit
import contextlib
import difflib
import fnmatch
import functools
import glob
import hashlib
import json
import logging
import math
import multiprocessing
import operator
import os
import random
import re
import shlex
import shutil
import string
import sys
import tempfile
import time
import unittest
import urllib
import webbrowser
if sys.version_info.major == 2:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SimpleHTTPServer import SimpleHTTPRequestHandler
from httplib import HTTPConnection
from urllib import unquote
else:
from http.server import HTTPServer, BaseHTTPRequestHandler, SimpleHTTPRequestHandler
from http.client import HTTPConnection
from urllib.parse import unquote
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(__rootpath__)
import parallel_runner
from tools.shared import EM_CONFIG, TEMP_DIR, EMCC, DEBUG, PYTHON, LLVM_TARGET, ASM_JS_TARGET, EMSCRIPTEN_TEMP_DIR, WASM_TARGET, SPIDERMONKEY_ENGINE, WINDOWS, V8_ENGINE, NODE_JS
from tools.shared import asstr, get_canonical_temp_dir, Building, run_process, limit_size, try_delete, to_cc, asbytes, safe_copy, Settings
from tools import jsrun, shared, line_endings
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
sys.path.append(path_from_root('third_party/websockify'))
logger = logging.getLogger(__file__)
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser. Setting '0' as the browser disables running a browser (but we still
# see tests compile)
EMTEST_BROWSER = os.getenv('EMTEST_BROWSER')
EMTEST_DETECT_TEMPFILE_LEAKS = int(os.getenv('EMTEST_DETECT_TEMPFILE_LEAKS', '0'))
EMTEST_WASM_PTHREADS = int(os.getenv('EMTEST_WASM_PTHREADS', '1'))
# Also suppot the old name: EM_SAVE_DIR
EMTEST_SAVE_DIR = os.getenv('EMTEST_SAVE_DIR', os.getenv('EM_SAVE_DIR'))
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = os.getenv('EMTEST_ALL_ENGINES')
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation=''):
explanation_str = ' : %s' % explanation if explanation else ''
def decorated(self):
if self.__getattribute__(condition)():
self.skipTest(condition + explanation_str)
func(self)
return decorated
def needs_dlfcn(func):
def decorated(self):
self.check_dlfcn()
return func(self)
return decorated
def no_wasm_backend(note=''):
def decorated(f):
return skip_if(f, 'is_wasm_backend', note)
return decorated
def no_windows(note=''):
if WINDOWS:
return unittest.skip(note)
return lambda f: f
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
def decorated(f):
def modified(self):
with env_modify(updates):
return f(self)
return modified
return decorated
@contextlib.contextmanager
def chdir(dir):
"""A context manager that performs actions in the given directory."""
orig_cwd = os.getcwd()
os.chdir(dir)
try:
yield
finally:
os.chdir(orig_cwd)
# The core test modes
core_test_modes = [
'asm0',
'asm1',
'asm2',
'asm3',
'asm2g',
'asm2f',
'binaryen0',
'binaryen1',
'binaryen2',
'binaryen3',
'binaryens',
'binaryenz',
'asmi',
'asm2i',
]
# The default core test mode, used when none is specified
default_core_test_mode = 'binaryen0'
# The non-core test modes
non_core_test_modes = [
'other',
'browser',
'sanity',
'sockets',
'interactive',
]
test_index = 0
class RunnerCore(unittest.TestCase):
emcc_args = None
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
save_dir = EMTEST_SAVE_DIR
save_JS = 0
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
env = {}
settings_mods = {}
temp_files_before_run = []
def is_emterpreter(self):
return self.emcc_args and 'EMTERPRETIFY=1' in self.emcc_args
def is_split_memory(self):
return self.emcc_args and 'SPLIT_MEMORY=' in str(self.emcc_args)
def is_wasm(self):
return (self.emcc_args and 'WASM=0' not in str(self.emcc_args)) or self.is_wasm_backend()
def is_wasm_backend(self):
return self.get_setting('WASM_BACKEND')
def check_dlfcn(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dlfcn with memory growth (without wasm)')
if self.is_wasm_backend():
self.skipTest('no shared modules in wasm backend')
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or self.get_setting('WASM'):
return False
if self.emcc_args is None:
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super(RunnerCore, cls).setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super(RunnerCore, self).setUp()
self.settings_mods = {}
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if self.save_dir:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
# Use emscripten root for node module lookup
os.environ['NODE_PATH'] = path_from_root('node_modules')
if not self.save_dir:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not self.save_dir:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not os.environ.get('EMCC_DEBUG'):
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir. They may not be due to
# us, but e.g. the browser when running browser tests. Until we figure out a proper solution,
# ignore some temp file names that we see on our CI infrastructure.
ignorable_files = ['/tmp/tmpaddon']
left_over_files = list(set(temp_files_after_run) - set(self.temp_files_before_run) - set(ignorable_files))
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
# Make sure we don't leave stuff around
# if not self.has_prev_ll:
# for temp_file in os.listdir(TEMP_DIR):
# assert not temp_file.endswith('.ll'), temp_file
# # TODO assert not temp_file.startswith('emscripten_'), temp_file
def get_setting(self, key):
if key in self.settings_mods:
return self.settings_mods[key]
return Settings[key]
def set_setting(self, key, value):
self.settings_mods[key] = value
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
ret += ['-s', '{}={}'.format(key, json.dumps(value))]
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def get_stdout_path(self):
return os.path.join(self.get_dir(), 'stdout')
def hardcode_arguments(self, filename, args):
# Hardcode in the arguments, so js is portable without manual commandlinearguments
if not args:
return
js = open(filename).read()
open(filename, 'w').write(js.replace('run();', 'run(%s + Module["arguments"]);' % str(args)))
def prep_ll_run(self, filename, ll_file, force_recompile=False, build_ll_hook=None):
# force_recompile = force_recompile or os.stat(filename + '.o.ll').st_size > 50000 # if the file is big, recompile just to get ll_opts # Recompiling just for dfe in ll_opts is too costly
def fix_target(ll_filename):
if LLVM_TARGET == ASM_JS_TARGET:
return
with open(ll_filename) as f:
contents = f.read()
if LLVM_TARGET in contents:
return
asmjs_layout = "e-p:32:32-i64:64-v128:32:128-n32-S128"
wasm_layout = "e-m:e-p:32:32-i64:64-n32:64-S128"
assert(ASM_JS_TARGET in contents)
assert(asmjs_layout in contents)
contents = contents.replace(asmjs_layout, wasm_layout)
contents = contents.replace(ASM_JS_TARGET, WASM_TARGET)
with open(ll_filename, 'w') as f:
f.write(contents)
if Building.LLVM_OPTS or force_recompile or build_ll_hook:
if ll_file.endswith(('.bc', '.o')):
if ll_file != filename + '.o':
shutil.copy(ll_file, filename + '.o')
Building.llvm_dis(filename)
else:
shutil.copy(ll_file, filename + '.o.ll')
fix_target(filename + '.o.ll')
if build_ll_hook:
need_post = build_ll_hook(filename)
Building.llvm_as(filename)
shutil.move(filename + '.o.ll', filename + '.o.ll.pre') # for comparisons later
if Building.LLVM_OPTS:
Building.llvm_opts(filename)
Building.llvm_dis(filename)
if build_ll_hook and need_post:
build_ll_hook(filename)
Building.llvm_as(filename)
shutil.move(filename + '.o.ll', filename + '.o.ll.post') # for comparisons later
Building.llvm_dis(filename)
Building.llvm_as(filename)
else:
if ll_file.endswith('.ll'):
safe_copy(ll_file, filename + '.o.ll')
fix_target(filename + '.o.ll')
Building.llvm_as(filename)
else:
safe_copy(ll_file, filename + '.o')
def get_emcc_transform_args(self, js_transform):
if not js_transform:
return []
transform_filename = os.path.join(self.get_dir(), 'transform.py')
with open(transform_filename, 'w') as f:
f.write('\nimport sys\nsys.path += [%r]\n' % path_from_root(''))
f.write(js_transform)
f.write('\nprocess(sys.argv[1])\n')
return ['--js-transform', "%s '%s'" % (PYTHON, transform_filename)]
# Generate JS from ll, and optionally modify the generated JS with a post_build function. Note
# that post_build is called on unoptimized JS, so we send it to emcc (otherwise, if run after
# emcc, it would not apply on the optimized/minified JS)
def ll_to_js(self, filename, js_transform):
emcc_args = self.emcc_args
if emcc_args is None:
emcc_args = []
transform_args = self.get_emcc_transform_args(js_transform)
Building.emcc(filename + '.o', self.serialize_settings() + emcc_args + transform_args + Building.COMPILER_TEST_OPTS, filename + '.o.js')
# Build JavaScript code from source code
def build(self, src, dirname, filename, main_file=None,
additional_files=[], libraries=[], includes=[], build_ll_hook=None,
post_build=None, js_outfile=True, js_transform=None):
Building.LLVM_OPT_OPTS = ['-O3'] # pick llvm opts here, so we include changes to Settings in the test case code
# Copy over necessary files for compiling the source
if main_file is None:
f = open(filename, 'w')
f.write(src)
f.close()
final_additional_files = []
for f in additional_files:
final_additional_files.append(os.path.join(dirname, os.path.basename(f)))
shutil.copyfile(f, final_additional_files[-1])
additional_files = final_additional_files
else:
# copy whole directory, and use a specific main .cpp file
# (rmtree() fails on Windows if the current working directory is inside the tree.)
if os.getcwd().startswith(os.path.abspath(dirname)):
os.chdir(os.path.join(dirname, '..'))
shutil.rmtree(dirname)
shutil.copytree(src, dirname)
shutil.move(os.path.join(dirname, main_file), filename)
# the additional files were copied; alter additional_files to point to their full paths now
additional_files = [os.path.join(dirname, f) for f in additional_files]
os.chdir(self.get_dir())
if build_ll_hook:
# "slow", old path: build to bc, then build to JS
# C++ => LLVM binary
for f in [filename] + additional_files:
try:
# Make sure we notice if compilation steps failed
os.remove(f + '.o')
except:
pass
args = [PYTHON, EMCC] + Building.COMPILER_TEST_OPTS + self.serialize_settings() + \
['-I', dirname, '-I', os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
['-c', f, '-o', f + '.o']
run_process(args, stderr=self.stderr_redirect if not DEBUG else None)
assert os.path.exists(f + '.o')
# Link all files
object_file = filename + '.o'
if len(additional_files) + len(libraries):
shutil.move(object_file, object_file + '.alone')
inputs = [object_file + '.alone'] + [f + '.o' for f in additional_files] + libraries
Building.link(inputs, object_file)
if not os.path.exists(object_file):
print("Failed to link LLVM binaries:\n\n", object_file)
raise Exception("Linkage error")
# Finalize
self.prep_ll_run(filename, object_file, build_ll_hook=build_ll_hook)
# BC => JS
self.ll_to_js(filename, js_transform)
else:
# "fast", new path: just call emcc and go straight to JS
all_files = [filename] + additional_files + libraries
for i in range(len(all_files)):
if '.' not in all_files[i]:
shutil.move(all_files[i], all_files[i] + '.bc')
all_files[i] += '.bc'
args = [PYTHON, EMCC] + Building.COMPILER_TEST_OPTS + self.serialize_settings() + \
self.emcc_args + \
['-I', dirname, '-I', os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
all_files + self.get_emcc_transform_args(js_transform) + \
['-o', filename + '.o.js']
run_process(args, stderr=self.stderr_redirect if not DEBUG else None)
if js_outfile:
assert os.path.exists(filename + '.o.js')
else:
assert os.path.exists(filename + '.o.wasm')
if post_build:
post_build(filename + '.o.js')
if self.emcc_args is not None and js_outfile:
src = open(filename + '.o.js').read()
if self.uses_memory_init_file():
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
def validate_asmjs(self, err):
m = re.search("asm.js type error: '(\w+)' is not a (standard|supported) SIMD type", err)
if m:
# Bug numbers for missing SIMD types:
bugs = {
'Int8x16': 1136226,
'Int16x8': 1136226,
'Uint8x16': 1244117,
'Uint16x8': 1244117,
'Uint32x4': 1240796,
'Float64x2': 1124205,
}
simd = m.group(1)
if simd in bugs:
print(("\nWARNING: ignoring asm.js type error from {} due to implementation not yet available in SpiderMonkey." +
" See https://bugzilla.mozilla.org/show_bug.cgi?id={}\n").format(simd, bugs[simd]), file=sys.stderr)
err = err.replace(m.group(0), '')
# check for asm.js validation
if 'uccessfully compiled asm.js code' in err and 'asm.js link error' not in err:
print("[was asm.js'ified]", file=sys.stderr)
# check for an asm.js validation error, if we expect one
elif 'asm.js' in err and not self.is_wasm() and self.get_setting('ASM_JS') == 1:
raise Exception("did NOT asm.js'ify: " + err)
err = '\n'.join([line for line in err.split('\n') if 'uccessfully compiled asm.js code' not in line])
return err
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
with open(javascript_file, 'rt') as f:
blob = "".join(f.readlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = run_process([os.path.join(Building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
raise Exception('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return run_process([os.path.join(Building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def run_generated_code(self, engine, filename, args=[], check_timeout=True, output_nicerizer=None, assert_returncode=0):
stdout = os.path.join(self.get_dir(), 'stdout') # use files, as PIPE can get too full and hang us
stderr = os.path.join(self.get_dir(), 'stderr')
try:
cwd = os.getcwd()
except:
cwd = None
os.chdir(self.get_dir())
self.assertEqual(line_endings.check_line_endings(filename), 0) # Make sure that we produced proper line endings to the .js file we are about to run.
jsrun.run_js(filename, engine, args, check_timeout, stdout=open(stdout, 'w'), stderr=open(stderr, 'w'), assert_returncode=assert_returncode)
if cwd is not None:
os.chdir(cwd)
out = open(stdout, 'r').read()
err = open(stderr, 'r').read()
if engine == SPIDERMONKEY_ENGINE and self.get_setting('ASM_JS') == 1:
err = self.validate_asmjs(err)
if output_nicerizer:
ret = output_nicerizer(out, err)
else:
ret = out + err
assert 'strict warning:' not in ret, 'We should pass all strict mode checks: ' + ret
return ret
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2)
def assertIdentical(self, values, y):
if type(values) not in [list, tuple]:
values = [values]
for x in values:
if x == y:
return # success
raise Exception("Expected to have '%s' == '%s', diff:\n\n%s" % (
limit_size(values[0]), limit_size(y),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(x.split('\n'), y.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
values = list(map(asstr, values))
if callable(string):
string = string()
for value in values:
if value in string:
return # success
raise Exception("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')])),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
raise Exception("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
if not os.path.exists(ret):
os.makedirs(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'], configure_args=[], make=['make'], make_args='help', cache=True, env_init={}, cache_name_extra='', native=False):
if make_args == 'help':
make_args = ['-j', str(multiprocessing.cpu_count())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
cache_name = name + ','.join([opt for opt in Building.COMPILER_TEST_OPTS if len(opt) < 7]) + '_' + hashlib.md5(str(Building.COMPILER_TEST_OPTS).encode('utf-8')).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache is not None:
if cache and self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
f = open(bc_file, 'wb')
f.write(contents)
f.close()
generated_libs.append(bc_file)
return generated_libs
print('<building and saving %s into cache> ' % cache_name, file=sys.stderr)
return Building.build_library(name, build_dir, output_dir, generated_libs, configure, configure_args, make, make_args, self.library_cache, cache_name,
copy_project=True, env_init=env_init, native=native)
def clear(self):
for name in os.listdir(self.get_dir()):
try_delete(os.path.join(self.get_dir(), name))
if EMSCRIPTEN_TEMP_DIR:
for name in os.listdir(EMSCRIPTEN_TEMP_DIR):
try_delete(os.path.join(EMSCRIPTEN_TEMP_DIR, name))
# Shared test code between main suite and others
def setup_runtimelink_test(self):
header = r'''
struct point
{
int x, y;
};
'''
open(os.path.join(self.get_dir(), 'header.h'), 'w').write(header)
supp = r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
'''
supp_name = os.path.join(self.get_dir(), 'supp.cpp')
open(supp_name, 'w').write(supp)
main = r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
#ifdef BROWSER
REPORT_RESULT(suppInt);
#endif
return 0;
}
'''
return (main, supp)
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = shared.JS_ENGINES
for engine in js_engines:
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) == list
js_engines = [engine for engine in js_engines if engine and engine[0] not in [banned[0] for banned in self.banned_js_engines if banned]]
return js_engines
def do_run_from_file(self, src, expected_output, *args, **kwargs):
self.do_run(open(src).read(), open(expected_output).read(), *args, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def do_run(self, src, expected_output, args=[], output_nicerizer=None,
no_build=False, main_file=None, additional_files=[],
js_engines=None, post_build=None, basename='src.cpp', libraries=[],
includes=[], force_c=False, build_ll_hook=None,
assert_returncode=None, assert_identical=False, js_transform=None):
if self.get_setting('ASYNCIFY') == 1 and self.is_wasm_backend():
self.skipTest("wasm backend doesn't support ASYNCIFY yet")
if force_c or (main_file is not None and main_file[-2:]) == '.c':
basename = 'src.c'
Building.COMPILER = to_cc(Building.COMPILER)
dirname = self.get_dir()
filename = os.path.join(dirname, basename)
if not no_build:
self.build(src, dirname, filename, main_file=main_file, additional_files=additional_files, libraries=libraries, includes=includes,
build_ll_hook=build_ll_hook, post_build=post_build,
js_transform=js_transform)
# Run in both JavaScript engines, if optimizing - significant differences there (typed arrays)
js_engines = self.filtered_js_engines(js_engines)
js_file = filename + '.o.js'
if len(js_engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % EM_CONFIG)
if len(js_engines) > 1 and not self.use_all_engines:
if SPIDERMONKEY_ENGINE in js_engines: # make sure to get asm.js validation checks, using sm
js_engines = [SPIDERMONKEY_ENGINE]
else:
js_engines = js_engines[:1]
for engine in js_engines:
# print 'test in', engine
js_output = self.run_generated_code(engine, js_file, args, output_nicerizer=output_nicerizer, assert_returncode=assert_returncode)
js_output = js_output.replace('\r\n', '\n')
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
else:
self.assertContained(expected_output, js_output)
self.assertNotContained('ERROR', js_output)
except Exception as e:
print('(test did not pass in JS engine: %s)' % engine)
raise e
# shutil.rmtree(dirname) # TODO: leave no trace in memory. But for now nice for debugging
if self.save_JS:
global test_index
self.hardcode_arguments(js_file, args)
shutil.copyfile(js_file, os.path.join(TEMP_DIR, str(test_index) + '.js'))
test_index += 1
# No building - just process an existing .ll file (or .bc, which we turn into .ll)
def do_ll_run(self, ll_file, expected_output=None, args=[], js_engines=None,
output_nicerizer=None, js_transform=None, force_recompile=False,
build_ll_hook=None, assert_returncode=None):
filename = os.path.join(self.get_dir(), 'src.cpp')
self.prep_ll_run(filename, ll_file, force_recompile, build_ll_hook)
self.ll_to_js(filename, js_transform)
self.do_run(None,
expected_output,
args,
no_build=True,
js_engines=js_engines,
output_nicerizer=output_nicerizer,
assert_returncode=assert_returncode) # post_build was already done in ll_to_js, this do_run call is just to test the output
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(q, port):
class TestServerHandler(BaseHTTPRequestHandler):
def do_GET(s):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
if s.path == '/run_harness':
s.wfile.write(open(path_from_root('tests', 'browser_harness.html'), 'rb').read())
else:
result = b'False'
if not q.empty():
result = q.get()
s.wfile.write(result)
def log_request(code=0, size=0):
# don't log; too noisy
pass
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
def server_func(dir, q, port):
class TestServerHandler(SimpleHTTPRequestHandler):
def do_GET(self):
if 'report_' in self.path:
print('[server response:', self.path, ']')
q.put(self.path)
# Send a default OK response to the browser.
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[server logging:', urllib.unquote_plus(self.path), ']')
else:
# Use SimpleHTTPServer default file serving operation for GET.
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
os.chdir(dir)
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class BrowserCore(RunnerCore):
def __init__(self, *args, **kwargs):
super(BrowserCore, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
super(BrowserCore, cls).setUpClass()
cls.also_asmjs = int(os.getenv('EMTEST_BROWSER_ALSO_ASMJS', '0')) == 1
cls.test_port = int(os.getenv('EMTEST_BROWSER_TEST_PORT', '8888'))
cls.harness_port = int(os.getenv('EMTEST_BROWSER_HARNESS_PORT', '9999'))
if not has_browser():
return
if not EMTEST_BROWSER:
print("Using default system browser")
else:
cmd = shlex.split(EMTEST_BROWSER)
def run_in_other_browser(url):
Popen(cmd + [url])
webbrowser.open_new = run_in_other_browser
print("Using Emscripten browser: " + str(cmd))
cls.browser_timeout = 30
cls.harness_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_queue, cls.harness_port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
webbrowser.open_new('http://localhost:%s/run_harness' % cls.harness_port)
@classmethod
def tearDownClass(cls):
super(BrowserCore, cls).tearDownClass()
if not has_browser():
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def run_browser(self, html_file, message, expectedResult=None, timeout=None):
if not has_browser():
return
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
queue = multiprocessing.Queue()
server = multiprocessing.Process(target=functools.partial(server_func, self.get_dir()), args=(queue, self.test_port))
server.start()
# Starting the web page server above is an asynchronous procedure, so before we tell the browser below to navigate to
# the test page, we need to know that the server has started up and is ready to process the site navigation.
# Therefore block until we can make a connection to the server.
for i in range(10):
httpconn = HTTPConnection('localhost:%s' % self.test_port, timeout=1)
try:
httpconn.connect()
httpconn.close()
break
except:
time.sleep(1)
else:
raise Exception('[Test harness server failed to start up in a timely manner]')
self.harness_queue.put(asbytes('http://localhost:%s/%s' % (self.test_port, html_file)))
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not queue.empty():
output = queue.get()
break
time.sleep(0.1)
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
self.assertIdentical(expectedResult, output)
finally:
server.terminate()
time.sleep(0.1) # see comment about Windows above
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
def with_report_result(self, code):
return '#define EMTEST_PORT_NUMBER %d\n#include "%s"\n' % (self.test_port, path_from_root('tests', 'report_result.h')) + code
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
open(os.path.join(self.get_dir(), 'reftest.js'), 'w').write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + wrong);
xhr.send();
if (wrong < 10 /* for easy debugging, don't close window on failure */) setTimeout(function() { window.close() }, 1000);
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
window.requestAnimationFrame = function(func) {
realRAF(function() {
func();
realRAF(doReftest);
});
};
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
''' % (self.test_port, basename, int(manually_trigger)))
def btest(self, filename, expected=None, reference=None, force_c=False,
reference_slack=0, manual_reference=False, post_build=None,
args=[], outfile='test.html', message='.', also_proxied=False,
url_suffix='', timeout=None, also_asmjs=False,
manually_trigger_reftest=False):
# if we are provided the source and not a path, use that
filename_is_src = '\n' in filename
src = filename if filename_is_src else ''
original_args = args[:]
if 'USE_PTHREADS=1' in args:
if EMTEST_WASM_PTHREADS:
also_asmjs = True
elif 'WASM=0' not in args:
args += ['-s', 'WASM=0']
if 'WASM=0' not in args:
# Filter out separate-asm, which is implied by wasm
args = [a for a in args if a != '--separate-asm']
args += ['-DEMTEST_PORT_NUMBER=%d' % self.test_port, '-include', path_from_root('tests', 'report_result.h')]
if filename_is_src:
filepath = os.path.join(self.get_dir(), 'main.c' if force_c else 'main.cpp')
with open(filepath, 'w') as f:
f.write(src)
else:
filepath = path_from_root('tests', filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(path_from_root('tests', reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args = args + ['--pre-js', 'reftest.js', '-s', 'GL_TESTING=1']
all_args = [PYTHON, EMCC, '-s', 'IN_TEST_HARNESS=1', filepath, '-o', outfile] + args
# print('all args:', all_args)
try_delete(outfile)
run_process(all_args)
assert os.path.exists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in args and (also_asmjs or self.also_asmjs):
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
args + ['-s', 'WASM=0'], outfile, message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-s', 'GL_TESTING=1'], outfile, message, timeout=timeout)
###################################################################################################
def get_zlib_library(runner_core):
if WINDOWS:
return runner_core.get_library('zlib', os.path.join('libz.a'),
configure=[path_from_root('emconfigure.bat')],
configure_args=['cmake', '.', '-DBUILD_SHARED_LIBS=OFF'],
make=['mingw32-make'],
make_args=[])
else:
return runner_core.get_library('zlib', os.path.join('libz.a'), make_args=['libz.a'])
# Both test_core and test_other access the Bullet library, share the access here to avoid duplication.
def get_bullet_library(runner_core, use_cmake):
if use_cmake:
configure_commands = ['cmake', '.']
configure_args = ['-DBUILD_DEMOS=OFF', '-DBUILD_EXTRAS=OFF', '-DUSE_GLUT=OFF']
# Depending on whether 'configure' or 'cmake' is used to build, Bullet places output files in different directory structures.
generated_libs = [os.path.join('src', 'BulletDynamics', 'libBulletDynamics.a'),
os.path.join('src', 'BulletCollision', 'libBulletCollision.a'),
os.path.join('src', 'LinearMath', 'libLinearMath.a')]
else:
configure_commands = ['sh', './configure']
# Force a nondefault --host= so that the configure script will interpret that we are doing cross-compilation
# and skip attempting to run the generated executable with './a.out', which would fail since we are building a .js file.
configure_args = ['--host=i686-pc-linux-gnu', '--disable-demos', '--disable-dependency-tracking']
generated_libs = [os.path.join('src', '.libs', 'libBulletDynamics.a'),
os.path.join('src', '.libs', 'libBulletCollision.a'),
os.path.join('src', '.libs', 'libLinearMath.a')]
return runner_core.get_library('bullet', generated_libs,
configure=configure_commands,
configure_args=configure_args,
cache_name_extra=configure_commands[0])
def check_js_engines():
total_engines = len(shared.JS_ENGINES)
shared.JS_ENGINES = list(filter(jsrun.check_engine, shared.JS_ENGINES))
if not shared.JS_ENGINES:
print('WARNING: None of the JS engines in JS_ENGINES appears to work.')
elif len(shared.JS_ENGINES) < total_engines:
print('WARNING: Not all the JS engines in JS_ENGINES appears to work, ignoring those.')
if EMTEST_ALL_ENGINES:
print('(using ALL js engines)')
else:
logger.warning('use EMTEST_ALL_ENGINES=1 in the env to run against all JS '
'engines, which is slower but provides more coverage')
def get_and_import_modules():
modules = []
for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'test*.py')):
module_dir, module_file = os.path.split(filename)
module_name, module_ext = os.path.splitext(module_file)
__import__(module_name)
modules.append(sys.modules[module_name])
return modules
def get_all_tests(modules):
# Create a list of all known tests so that we can choose from them based on a wildcard search
all_tests = []
suites = core_test_modes + non_core_test_modes
for m in modules:
for s in suites:
if hasattr(m, s):
tests = [t for t in dir(getattr(m, s)) if t.startswith('test_')]
all_tests += [s + '.' + t for t in tests]
return all_tests
def tests_with_expanded_wildcards(args, all_tests):
# Process wildcards, e.g. "browser.test_pthread_*" should expand to list all pthread tests
new_args = []
for i, arg in enumerate(args):
if '*' in arg:
if arg.startswith('skip:'):
arg = arg[5:]
matching_tests = fnmatch.filter(all_tests, arg)
new_args += ['skip:' + t for t in matching_tests]
else:
new_args += fnmatch.filter(all_tests, arg)
else:
new_args += [arg]
if not new_args and args:
print('No tests found to run in set: ' + str(args))
sys.exit(1)
return new_args
def skip_requested_tests(args, modules):
for i, arg in enumerate(args):
if arg.startswith('skip:'):
which = [arg.split('skip:')[1]]
print(','.join(which), file=sys.stderr)
for test in which:
print('will skip "%s"' % test, file=sys.stderr)
suite_name, test_name = test.split('.')
for m in modules:
try:
suite = getattr(m, suite_name)
setattr(suite, test_name, lambda s: s.skipTest("requested to be skipped"))
break
except:
pass
args[i] = None
return [a for a in args if a is not None]
def args_for_random_tests(args, modules):
if not args:
return args
first = args[0]
if first.startswith('random'):
random_arg = first[6:]
num_tests, base_module, relevant_modes = get_random_test_parameters(random_arg)
for m in modules:
if hasattr(m, base_module):
base = getattr(m, base_module)
new_args = choose_random_tests(base, num_tests, relevant_modes)
print_random_test_statistics(num_tests)
return new_args
return args
def get_random_test_parameters(arg):
num_tests = 1
base_module = default_core_test_mode
relevant_modes = core_test_modes
if len(arg):
num_str = arg
if arg.startswith('other'):
base_module = 'other'
relevant_modes = ['other']
num_str = arg.replace('other', '')
elif arg.startswith('browser'):
base_module = 'browser'
relevant_modes = ['browser']
num_str = arg.replace('browser', '')
num_tests = int(num_str)
return num_tests, base_module, relevant_modes
def choose_random_tests(base, num_tests, relevant_modes):
tests = [t for t in dir(base) if t.startswith('test_')]
print()
chosen = set()
while len(chosen) < num_tests:
test = random.choice(tests)
mode = random.choice(relevant_modes)
new_test = mode + '.' + test
before = len(chosen)
chosen.add(new_test)
if len(chosen) > before:
print('* ' + new_test)
else:
# we may have hit the limit
if len(chosen) == len(tests) * len(relevant_modes):
print('(all possible tests chosen! %d = %d*%d)' % (len(chosen), len(tests), len(relevant_modes)))
break
return list(chosen)
def print_random_test_statistics(num_tests):
std = 0.5 / math.sqrt(num_tests)
expected = 100.0 * (1.0 - std)
print()
print('running those %d randomly-selected tests. if they all pass, then there is a '
'greater than 95%% chance that at least %.2f%% of the test suite will pass'
% (num_tests, expected))
print()
def show():
print('if all tests passed then there is a greater than 95%% chance that at least '
'%.2f%% of the test suite will pass'
% (expected))
atexit.register(show)
def load_test_suites(args, modules):
loader = unittest.TestLoader()
unmatched_test_names = set(args)
suites = []
for m in modules:
names_in_module = []
for name in list(unmatched_test_names):
try:
operator.attrgetter(name)(m)
names_in_module.append(name)
unmatched_test_names.remove(name)
except AttributeError:
pass
if len(names_in_module):
loaded_tests = loader.loadTestsFromNames(sorted(names_in_module), m)
tests = flattened_tests(loaded_tests)
suite = suite_for_module(m, tests)
for test in tests:
suite.addTest(test)
suites.append((m.__name__, suite))
return suites, unmatched_test_names
def flattened_tests(loaded_tests):
tests = []
for subsuite in loaded_tests:
for test in subsuite:
tests.append(test)
return tests
def suite_for_module(module, tests):
suite_supported = module.__name__ in ('test_core', 'test_other')
has_multiple_tests = len(tests) > 1
has_multiple_cores = parallel_runner.num_cores() > 1
if suite_supported and has_multiple_tests and has_multiple_cores:
return parallel_runner.ParallelTestSuite()
return unittest.TestSuite()
def run_tests(options, suites):
resultMessages = []
num_failures = 0
print('Test suites:')
print([s[0] for s in suites])
# Run the discovered tests
testRunner = unittest.TextTestRunner(verbosity=2)
for mod_name, suite in suites:
print('Running %s: (%s tests)' % (mod_name, suite.countTestCases()))
res = testRunner.run(suite)
msg = ('%s: %s run, %s errors, %s failures, %s skipped' %
(mod_name, res.testsRun, len(res.errors), len(res.failures), len(res.skipped)))
num_failures += len(res.errors) + len(res.failures)
resultMessages.append(msg)
if len(resultMessages) > 1:
print('====================')
print()
print('TEST SUMMARY')
for msg in resultMessages:
print(' ' + msg)
# Return the number of failures as the process exit code for automating success/failure reporting.
return min(num_failures, 255)
def parse_args(args):
parser = argparse.ArgumentParser(prog='runner.py', description=__doc__)
parser.add_argument('-j', '--js-engine', help='Set JS_ENGINE_OVERRIDE')
parser.add_argument('tests', nargs='*')
return parser.parse_args()
def main(args):
options = parse_args(args)
if options.js_engine:
if options.js_engine == 'SPIDERMONKEY_ENGINE':
Building.JS_ENGINE_OVERRIDE = SPIDERMONKEY_ENGINE
elif options.js_engine == 'V8_ENGINE':
Building.JS_ENGINE_OVERRIDE = V8_ENGINE
elif options.js_engine == 'NODE_JS':
Building.JS_ENGINE_OVERRIDE = NODE_JS
else:
print('Unknown js engine override: ' + options.js_engine)
return 1
print("Overriding JS engine: " + Building.JS_ENGINE_OVERRIDE[0])
check_js_engines()
def prepend_default(arg):
if arg.startswith('test_'):
return default_core_test_mode + '.' + arg
return arg
tests = [prepend_default(t) for t in options.tests]
modules = get_and_import_modules()
all_tests = get_all_tests(modules)
tests = tests_with_expanded_wildcards(tests, all_tests)
tests = skip_requested_tests(tests, modules)
tests = args_for_random_tests(tests, modules)
suites, unmatched_tests = load_test_suites(tests, modules)
if unmatched_tests:
print('ERROR: could not find the following tests: ' + ' '.join(unmatched_tests))
return 1
return run_tests(options, suites)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
|
test_engine_py3k.py | import asyncio
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import delete
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import union_all
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.ext.asyncio import engine as _async_engine
from sqlalchemy.ext.asyncio import exc as asyncio_exc
from sqlalchemy.ext.asyncio.base import ReversibleProxy
from sqlalchemy.ext.asyncio.engine import AsyncConnection
from sqlalchemy.ext.asyncio.engine import AsyncEngine
from sqlalchemy.pool import AsyncAdaptedQueuePool
from sqlalchemy.testing import assertions
from sqlalchemy.testing import async_test
from sqlalchemy.testing import combinations
from sqlalchemy.testing import config
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_none
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing import ne_
from sqlalchemy.util.concurrency import greenlet_spawn
class AsyncFixture:
@config.fixture(
params=[
(rollback, run_second_execute, begin_nested)
for rollback in (True, False)
for run_second_execute in (True, False)
for begin_nested in (True, False)
]
)
def async_trans_ctx_manager_fixture(self, request, metadata):
rollback, run_second_execute, begin_nested = request.param
from sqlalchemy import Table, Column, Integer, func, select
t = Table("test", metadata, Column("data", Integer))
eng = getattr(self, "bind", None) or config.db
t.create(eng)
async def run_test(subject, trans_on_subject, execute_on_subject):
async with subject.begin() as trans:
if begin_nested:
if not config.requirements.savepoints.enabled:
config.skip_test("savepoints not enabled")
if execute_on_subject:
nested_trans = subject.begin_nested()
else:
nested_trans = trans.begin_nested()
async with nested_trans:
if execute_on_subject:
await subject.execute(t.insert(), {"data": 10})
else:
await trans.execute(t.insert(), {"data": 10})
# for nested trans, we always commit/rollback on the
# "nested trans" object itself.
# only Session(future=False) will affect savepoint
# transaction for session.commit/rollback
if rollback:
await nested_trans.rollback()
else:
await nested_trans.commit()
if run_second_execute:
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction "
"inside context manager. Please complete the "
"context manager "
"before emitting further commands.",
):
if execute_on_subject:
await subject.execute(
t.insert(), {"data": 12}
)
else:
await trans.execute(
t.insert(), {"data": 12}
)
# outside the nested trans block, but still inside the
# transaction block, we can run SQL, and it will be
# committed
if execute_on_subject:
await subject.execute(t.insert(), {"data": 14})
else:
await trans.execute(t.insert(), {"data": 14})
else:
if execute_on_subject:
await subject.execute(t.insert(), {"data": 10})
else:
await trans.execute(t.insert(), {"data": 10})
if trans_on_subject:
if rollback:
await subject.rollback()
else:
await subject.commit()
else:
if rollback:
await trans.rollback()
else:
await trans.commit()
if run_second_execute:
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction inside "
"context "
"manager. Please complete the context manager "
"before emitting further commands.",
):
if execute_on_subject:
await subject.execute(t.insert(), {"data": 12})
else:
await trans.execute(t.insert(), {"data": 12})
expected_committed = 0
if begin_nested:
# begin_nested variant, we inserted a row after the nested
# block
expected_committed += 1
if not rollback:
# not rollback variant, our row inserted in the target
# block itself would be committed
expected_committed += 1
if execute_on_subject:
eq_(
await subject.scalar(select(func.count()).select_from(t)),
expected_committed,
)
else:
with subject.connect() as conn:
eq_(
await conn.scalar(select(func.count()).select_from(t)),
expected_committed,
)
return run_test
class EngineFixture(AsyncFixture, fixtures.TablesTest):
__requires__ = ("async_dialect",)
@testing.fixture
def async_engine(self):
return engines.testing_engine(asyncio=True, transfer_staticpool=True)
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", Integer, primary_key=True, autoincrement=False),
Column("user_name", String(20)),
)
@classmethod
def insert_data(cls, connection):
users = cls.tables.users
connection.execute(
users.insert(),
[{"user_id": i, "user_name": "name%d" % i} for i in range(1, 20)],
)
class AsyncEngineTest(EngineFixture):
__backend__ = True
@testing.fails("the failure is the test")
@async_test
async def test_we_are_definitely_running_async_tests(self, async_engine):
async with async_engine.connect() as conn:
eq_(await conn.scalar(text("select 1")), 2)
@async_test
async def test_interrupt_ctxmanager_connection(
self, async_engine, async_trans_ctx_manager_fixture
):
fn = async_trans_ctx_manager_fixture
async with async_engine.connect() as conn:
await fn(conn, trans_on_subject=False, execute_on_subject=True)
def test_proxied_attrs_engine(self, async_engine):
sync_engine = async_engine.sync_engine
is_(async_engine.url, sync_engine.url)
is_(async_engine.pool, sync_engine.pool)
is_(async_engine.dialect, sync_engine.dialect)
eq_(async_engine.name, sync_engine.name)
eq_(async_engine.driver, sync_engine.driver)
eq_(async_engine.echo, sync_engine.echo)
@async_test
async def test_engine_eq_ne(self, async_engine):
e2 = _async_engine.AsyncEngine(async_engine.sync_engine)
e3 = testing.engines.testing_engine(
asyncio=True, transfer_staticpool=True
)
eq_(async_engine, e2)
ne_(async_engine, e3)
is_false(async_engine == None)
# NOTE: this test currently causes the test suite to hang; it previously
# was not actually running the worker thread
# as the testing_engine() fixture
# was rejecting the "transfer_staticpool" keyword argument
@async_test
async def temporarily_dont_test_no_attach_to_event_loop(
self, testing_engine
):
"""test #6409"""
import asyncio
import threading
errs = []
def go():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
engine = testing_engine(asyncio=True, transfer_staticpool=True)
async def main():
tasks = [task() for _ in range(2)]
await asyncio.gather(*tasks)
async def task():
async with engine.begin() as connection:
result = await connection.execute(select(1))
result.all()
try:
asyncio.run(main())
except Exception as err:
errs.append(err)
t = threading.Thread(target=go)
t.start()
t.join()
if errs:
raise errs[0]
@async_test
async def test_connection_info(self, async_engine):
async with async_engine.connect() as conn:
conn.info["foo"] = "bar"
eq_(conn.sync_connection.info, {"foo": "bar"})
@async_test
async def test_connection_eq_ne(self, async_engine):
async with async_engine.connect() as conn:
c2 = _async_engine.AsyncConnection(
async_engine, conn.sync_connection
)
eq_(conn, c2)
async with async_engine.connect() as c3:
ne_(conn, c3)
is_false(conn == None)
@async_test
async def test_transaction_eq_ne(self, async_engine):
async with async_engine.connect() as conn:
t1 = await conn.begin()
t2 = _async_engine.AsyncTransaction._regenerate_proxy_for_target(
t1._proxied
)
eq_(t1, t2)
is_false(t1 == None)
def test_clear_compiled_cache(self, async_engine):
async_engine.sync_engine._compiled_cache["foo"] = "bar"
eq_(async_engine.sync_engine._compiled_cache["foo"], "bar")
async_engine.clear_compiled_cache()
assert "foo" not in async_engine.sync_engine._compiled_cache
def test_execution_options(self, async_engine):
a2 = async_engine.execution_options(foo="bar")
assert isinstance(a2, _async_engine.AsyncEngine)
eq_(a2.sync_engine._execution_options, {"foo": "bar"})
eq_(async_engine.sync_engine._execution_options, {})
"""
attr uri, pool, dialect, engine, name, driver, echo
methods clear_compiled_cache, update_execution_options,
execution_options, get_execution_options, dispose
"""
@async_test
async def test_proxied_attrs_connection(self, async_engine):
conn = await async_engine.connect()
sync_conn = conn.sync_connection
is_(conn.engine, async_engine)
is_(conn.closed, sync_conn.closed)
is_(conn.dialect, async_engine.sync_engine.dialect)
eq_(conn.default_isolation_level, sync_conn.default_isolation_level)
@async_test
async def test_transaction_accessor(self, async_engine):
async with async_engine.connect() as conn:
is_none(conn.get_transaction())
is_false(conn.in_transaction())
is_false(conn.in_nested_transaction())
trans = await conn.begin()
is_true(conn.in_transaction())
is_false(conn.in_nested_transaction())
is_(
trans.sync_transaction, conn.get_transaction().sync_transaction
)
nested = await conn.begin_nested()
is_true(conn.in_transaction())
is_true(conn.in_nested_transaction())
is_(
conn.get_nested_transaction().sync_transaction,
nested.sync_transaction,
)
eq_(conn.get_nested_transaction(), nested)
is_(
trans.sync_transaction, conn.get_transaction().sync_transaction
)
await nested.commit()
is_true(conn.in_transaction())
is_false(conn.in_nested_transaction())
await trans.rollback()
is_none(conn.get_transaction())
is_false(conn.in_transaction())
is_false(conn.in_nested_transaction())
@testing.requires.queue_pool
@async_test
async def test_invalidate(self, async_engine):
conn = await async_engine.connect()
is_(conn.invalidated, False)
connection_fairy = await conn.get_raw_connection()
is_(connection_fairy.is_valid, True)
dbapi_connection = connection_fairy.connection
await conn.invalidate()
if testing.against("postgresql+asyncpg"):
assert dbapi_connection._connection.is_closed()
new_fairy = await conn.get_raw_connection()
is_not(new_fairy.connection, dbapi_connection)
is_not(new_fairy, connection_fairy)
is_(new_fairy.is_valid, True)
is_(connection_fairy.is_valid, False)
@async_test
async def test_get_dbapi_connection_raise(self, async_engine):
conn = await async_engine.connect()
with testing.expect_raises_message(
exc.InvalidRequestError,
"AsyncConnection.connection accessor is not "
"implemented as the attribute",
):
conn.connection
@async_test
async def test_get_raw_connection(self, async_engine):
conn = await async_engine.connect()
pooled = await conn.get_raw_connection()
is_(pooled, conn.sync_connection.connection)
@async_test
async def test_isolation_level(self, async_engine):
conn = await async_engine.connect()
sync_isolation_level = await greenlet_spawn(
conn.sync_connection.get_isolation_level
)
isolation_level = await conn.get_isolation_level()
eq_(isolation_level, sync_isolation_level)
await conn.execution_options(isolation_level="SERIALIZABLE")
isolation_level = await conn.get_isolation_level()
eq_(isolation_level, "SERIALIZABLE")
await conn.close()
@testing.requires.queue_pool
@async_test
async def test_dispose(self, async_engine):
c1 = await async_engine.connect()
c2 = await async_engine.connect()
await c1.close()
await c2.close()
p1 = async_engine.pool
if isinstance(p1, AsyncAdaptedQueuePool):
eq_(async_engine.pool.checkedin(), 2)
await async_engine.dispose()
if isinstance(p1, AsyncAdaptedQueuePool):
eq_(async_engine.pool.checkedin(), 0)
is_not(p1, async_engine.pool)
@testing.requires.independent_connections
@async_test
async def test_init_once_concurrency(self, async_engine):
c1 = async_engine.connect()
c2 = async_engine.connect()
await asyncio.wait([c1, c2])
@async_test
async def test_connect_ctxmanager(self, async_engine):
async with async_engine.connect() as conn:
result = await conn.execute(select(1))
eq_(result.scalar(), 1)
@async_test
async def test_connect_plain(self, async_engine):
conn = await async_engine.connect()
try:
result = await conn.execute(select(1))
eq_(result.scalar(), 1)
finally:
await conn.close()
@async_test
async def test_connection_not_started(self, async_engine):
conn = async_engine.connect()
testing.assert_raises_message(
asyncio_exc.AsyncContextNotStarted,
"AsyncConnection context has not been started and "
"object has not been awaited.",
conn.begin,
)
@async_test
async def test_transaction_commit(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
await conn.execute(delete(users))
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 0)
@async_test
async def test_savepoint_rollback_noctx(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
savepoint = await conn.begin_nested()
await conn.execute(delete(users))
await savepoint.rollback()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 19)
@async_test
async def test_savepoint_commit_noctx(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
savepoint = await conn.begin_nested()
await conn.execute(delete(users))
await savepoint.commit()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 0)
@async_test
async def test_transaction_rollback(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
trans = conn.begin()
await trans.start()
await conn.execute(delete(users))
await trans.rollback()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 19)
@async_test
async def test_conn_transaction_not_started(self, async_engine):
async with async_engine.connect() as conn:
trans = conn.begin()
with expect_raises_message(
asyncio_exc.AsyncContextNotStarted,
"AsyncTransaction context has not been started "
"and object has not been awaited.",
):
await trans.rollback(),
@testing.requires.queue_pool
@async_test
async def test_pool_exhausted_some_timeout(self, async_engine):
engine = create_async_engine(
testing.db.url,
pool_size=1,
max_overflow=0,
pool_timeout=0.1,
)
async with engine.connect():
with expect_raises(exc.TimeoutError):
await engine.connect()
@testing.requires.queue_pool
@async_test
async def test_pool_exhausted_no_timeout(self, async_engine):
engine = create_async_engine(
testing.db.url,
pool_size=1,
max_overflow=0,
pool_timeout=0,
)
async with engine.connect():
with expect_raises(exc.TimeoutError):
await engine.connect()
@async_test
async def test_create_async_engine_server_side_cursor(self, async_engine):
testing.assert_raises_message(
asyncio_exc.AsyncMethodRequired,
"Can't set server_side_cursors for async engine globally",
create_async_engine,
testing.db.url,
server_side_cursors=True,
)
class AsyncEventTest(EngineFixture):
"""The engine events all run in their normal synchronous context.
we do not provide an asyncio event interface at this time.
"""
__backend__ = True
@async_test
async def test_no_async_listeners(self, async_engine):
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(async_engine, "before_cursor_execute", mock.Mock())
conn = await async_engine.connect()
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(conn, "before_cursor_execute", mock.Mock())
@async_test
async def test_sync_before_cursor_execute_engine(self, async_engine):
canary = mock.Mock()
event.listen(async_engine.sync_engine, "before_cursor_execute", canary)
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
await conn.execute(text("select 1"))
eq_(
canary.mock_calls,
[mock.call(sync_conn, mock.ANY, "select 1", (), mock.ANY, False)],
)
@async_test
async def test_sync_before_cursor_execute_connection(self, async_engine):
canary = mock.Mock()
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
event.listen(
async_engine.sync_engine, "before_cursor_execute", canary
)
await conn.execute(text("select 1"))
eq_(
canary.mock_calls,
[mock.call(sync_conn, mock.ANY, "select 1", (), mock.ANY, False)],
)
class AsyncResultTest(EngineFixture):
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_all(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
all_ = await result.all()
if filter_ == "mappings":
eq_(
all_,
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(1, 20)
],
)
elif filter_ == "scalars":
eq_(
all_,
["name%d" % i for i in range(1, 20)],
)
else:
eq_(all_, [(i, "name%d" % i) for i in range(1, 20)])
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_aiter(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
rows = []
async for row in result:
rows.append(row)
if filter_ == "mappings":
eq_(
rows,
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(1, 20)
],
)
elif filter_ == "scalars":
eq_(
rows,
["name%d" % i for i in range(1, 20)],
)
else:
eq_(rows, [(i, "name%d" % i) for i in range(1, 20)])
@testing.combinations((None,), ("mappings",), argnames="filter_")
@async_test
async def test_keys(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
eq_(result.keys(), ["user_id", "user_name"])
await result.close()
@async_test
async def test_unique_all(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
union_all(select(users), select(users)).order_by(
users.c.user_id
)
)
all_ = await result.unique().all()
eq_(all_, [(i, "name%d" % i) for i in range(1, 20)])
@async_test
async def test_columns_all(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
all_ = await result.columns(1).all()
eq_(all_, [("name%d" % i,) for i in range(1, 20)])
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_partitions(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
check_result = []
async for partition in result.partitions(5):
check_result.append(partition)
if filter_ == "mappings":
eq_(
check_result,
[
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(a, b)
]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
elif filter_ == "scalars":
eq_(
check_result,
[
["name%d" % i for i in range(a, b)]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
else:
eq_(
check_result,
[
[(i, "name%d" % i) for i in range(a, b)]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_one_success(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).limit(1).order_by(users.c.user_name)
)
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars()
u1 = await result.one()
if filter_ == "mappings":
eq_(u1, {"user_id": 1, "user_name": "name%d" % 1})
elif filter_ == "scalars":
eq_(u1, 1)
else:
eq_(u1, (1, "name%d" % 1))
@async_test
async def test_one_no_result(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).where(users.c.user_name == "nonexistent")
)
with expect_raises_message(
exc.NoResultFound, "No row was found when one was required"
):
await result.one()
@async_test
async def test_one_multi_result(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).where(users.c.user_name.in_(["name3", "name5"]))
)
with expect_raises_message(
exc.MultipleResultsFound,
"Multiple rows were found when exactly one was required",
):
await result.one()
class TextSyncDBAPI(fixtures.TestBase):
def test_sync_dbapi_raises(self):
with expect_raises_message(
exc.InvalidRequestError,
"The asyncio extension requires an async driver to be used.",
):
create_async_engine("sqlite:///:memory:")
@testing.fixture
def async_engine(self):
engine = create_engine("sqlite:///:memory:", future=True)
engine.dialect.is_async = True
return _async_engine.AsyncEngine(engine)
@async_test
@combinations(
lambda conn: conn.exec_driver_sql("select 1"),
lambda conn: conn.stream(text("select 1")),
lambda conn: conn.execute(text("select 1")),
argnames="case",
)
async def test_sync_driver_execution(self, async_engine, case):
with expect_raises_message(
exc.AwaitRequired,
"The current operation required an async execution but none was",
):
async with async_engine.connect() as conn:
await case(conn)
@async_test
async def test_sync_driver_run_sync(self, async_engine):
async with async_engine.connect() as conn:
res = await conn.run_sync(
lambda conn: conn.scalar(text("select 1"))
)
assert res == 1
assert await conn.run_sync(lambda _: 2) == 2
class AsyncProxyTest(EngineFixture, fixtures.TestBase):
@async_test
async def test_get_transaction(self, async_engine):
async with async_engine.connect() as conn:
async with conn.begin() as trans:
is_(trans.connection, conn)
is_(conn.get_transaction(), trans)
@async_test
async def test_get_nested_transaction(self, async_engine):
async with async_engine.connect() as conn:
async with conn.begin() as trans:
n1 = await conn.begin_nested()
is_(conn.get_nested_transaction(), n1)
n2 = await conn.begin_nested()
is_(conn.get_nested_transaction(), n2)
await n2.commit()
is_(conn.get_nested_transaction(), n1)
is_(conn.get_transaction(), trans)
@async_test
async def test_get_connection(self, async_engine):
async with async_engine.connect() as conn:
is_(
AsyncConnection._retrieve_proxy_for_target(
conn.sync_connection
),
conn,
)
def test_regenerate_connection(self, connection):
async_connection = AsyncConnection._retrieve_proxy_for_target(
connection
)
a2 = AsyncConnection._retrieve_proxy_for_target(connection)
is_(async_connection, a2)
is_not(async_connection, None)
is_(async_connection.engine, a2.engine)
is_not(async_connection.engine, None)
@testing.requires.predictable_gc
async def test_gc_engine(self, testing_engine):
ReversibleProxy._proxy_objects.clear()
eq_(len(ReversibleProxy._proxy_objects), 0)
async_engine = AsyncEngine(testing.db)
eq_(len(ReversibleProxy._proxy_objects), 1)
del async_engine
eq_(len(ReversibleProxy._proxy_objects), 0)
@testing.requires.predictable_gc
@async_test
async def test_gc_conn(self, testing_engine):
ReversibleProxy._proxy_objects.clear()
async_engine = AsyncEngine(testing.db)
eq_(len(ReversibleProxy._proxy_objects), 1)
async with async_engine.connect() as conn:
eq_(len(ReversibleProxy._proxy_objects), 2)
async with conn.begin() as trans:
eq_(len(ReversibleProxy._proxy_objects), 3)
del trans
del conn
eq_(len(ReversibleProxy._proxy_objects), 1)
del async_engine
eq_(len(ReversibleProxy._proxy_objects), 0)
def test_regen_conn_but_not_engine(self, async_engine):
sync_conn = async_engine.sync_engine.connect()
async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
async_conn2 = AsyncConnection._retrieve_proxy_for_target(sync_conn)
is_(async_conn, async_conn2)
is_(async_conn.engine, async_engine)
def test_regen_trans_but_not_conn(self, async_engine):
sync_conn = async_engine.sync_engine.connect()
async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
trans = sync_conn.begin()
async_t1 = async_conn.get_transaction()
is_(async_t1.connection, async_conn)
is_(async_t1.sync_transaction, trans)
async_t2 = async_conn.get_transaction()
is_(async_t1, async_t2)
|
rl-sim.py | # -*- coding:utf8 -*-
# File : rl-sim.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 4/22/17
#
# This file is part of TensorArtist.
import argparse
import pickle
import time
import threading
import os.path as osp
from tartist import image
from tartist.app.rl.simulator import Controller, Pack, __quit_action__
from tartist.core import get_logger, io
from tartist.core.utils.imp import load_source
logger = get_logger(__file__)
parser = argparse.ArgumentParser()
parser.add_argument('cfg')
parser.add_argument('-fps', '--fps', dest='fps', default=24, type=int, help='Controller fps, 0 if wait-keyboard')
parser.add_argument('-rfps', '--render-fps', dest='rfps', default=100, type=int, help='Render fps')
parser.add_argument('-winsize', '--window-size', dest='winsize', default=None, type=int, nargs=2,
help='Window size, of format (min_dim, max_dim)')
parser.add_argument('-record', '--record', dest='record', action='store_true', help='Whether to record the play')
args = parser.parse_args()
def vis_and_action(args, cfg, controller, observation):
# here, the observation is actually a state
if hasattr(cfg, 'observe'):
observation = cfg.observe(observation)
display = image.resize_minmax(observation, *args.winsize, interpolation='NEAREST')
controller.update(display)
if args.fps > 0:
action = 0
for i in reversed(range(len(cfg.action_keys))):
if controller.test_key(cfg.action_keys[i]):
action = i
break
if controller.test_key(cfg.quit_action_key):
action = __quit_action__
time.sleep((1.0 / args.fps))
else:
action = 0
key = controller.get_last_key()
for i in reversed(range(len(cfg.action_keys))):
if key == cfg.action_keys[i]:
action = i
break
if key == cfg.quit_action_key:
action = __quit_action__
return action
def dump_pack(cfg, pack):
pickleable = pack.make_pickleable()
name = cfg.name + '-'
name += time.strftime('%Y%m%d-%H%M%S')
name += '.replay.pkl'
io.mkdir('replays')
with open(osp.join('replays', name), 'wb') as f:
pickle.dump(pickleable, f, pickle.HIGHEST_PROTOCOL)
logger.critical('replay written to replays/{}'.format(name))
def main(args, controller):
cfg = load_source(args.cfg, 'cfg')
if args.winsize is None:
args.winsize = cfg.default_winsize
controller.update_title(cfg.name)
game = cfg.make()
is_over = False
if args.record:
pack = Pack(cfg)
pack.reset(game.current_state)
else:
pack = None
while True:
action = vis_and_action(args, cfg, controller, game.current_state)
if action == __quit_action__:
break
if not is_over:
reward, is_over = game.action(action)
if args.record:
pack.step(action, game.current_state, reward, is_over)
if not cfg.mute_noaction or args.fps == 0 or action != 0:
logger.info('Perform action: {}, reward={}.'.format(cfg.action_names[action], reward))
if is_over:
logger.critical('Game ended, press q (quit key) to exit.')
logger.critical('Quit action detected: about to exit.')
controller.quit()
if args.record:
dump_pack(cfg, pack)
if __name__ == '__main__':
controller = Controller(fps=args.rfps)
thread = threading.Thread(target=main, args=(args, controller))
thread.start()
controller.mainloop()
|
views.py | import hashlib
import hmac
import json
import logging
import shlex
import subprocess
import threading
from django.http import JsonResponse
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from core import models
logger = logging.getLogger(__name__)
def _verify_signature(client_secret, raw_response, x_hub_signature):
digest = hmac.new(client_secret.encode('utf-8'),
msg=raw_response.encode('utf-8'),
digestmod=hashlib.sha1
).hexdigest()
return 'sha1={}'.format(digest) == x_hub_signature
def create_request_dict(request):
meta = {}
try:
body = request.body.decode('utf-8')
except UnicodeDecodeError as e:
logger.warn('faild to load body as utf-8. %s', e)
body = None
for k, v in request.META.items():
if k.startswith(('HTTP_', 'REMOTE_')):
meta[k.lower()] = v
req = {
'datetime': timezone.localtime(timezone.now()).isoformat(),
'get': dict(request.GET),
'post': dict(request.POST),
'body': body,
'cookies': dict(request.COOKIES),
'path': request.path,
'meta': meta,
'encoding': request.encoding,
'method': request.method,
}
# POST bodyがjsonであればシリアライズ結果も返す
try:
body_json = json.loads(body)
req['body_json'] = body_json
except (TypeError, ValueError) as e:
logger.debug('faild to load as json. %s', e)
pass
return req
@csrf_exempt
def webhook_github(request):
req = create_request_dict(request)
models.HookLog.objects.create(data=req)
# X-Github-Event
event = request.META.get('HTTP_X_GITHUB_EVENT')
body = request.body.decode('utf-8')
try:
body_json = json.loads(body)
except:
return JsonResponse({'error': 'cannot decode body like json'})
if event == 'push' or True:
ret = github_push(request, body_json)
if event == 'release':
github_release(request, body_json)
if event == 'deployment_status':
pass
return JsonResponse(ret)
def github_push(request, payload):
try:
repo_fullname = payload['repository']['full_name']
except:
return {'error': 'payload repo_fullname'}
try:
repo = models.Repository.objects.get(hub='github', full_name=repo_fullname)
except models.Repository.DoesNotExist:
return {'error': 'repository not exist'}
# X-Hub-Signature
signature = request.META.get('HTTP_X_HUB_SIGNATURE')
if not _verify_signature(repo.secret, request.body.decode('utf-8'), signature):
return {'error': 'incorrect signature'}
# X-Github-Delivery
# delivery = request.META.get('HTTP_X_GITHUB_DELIVERY')
branch = payload.get('ref').replace('refs/heads/', '')
deploy_settings = repo.deploysetting_set.filter(branch=branch)
if not deploy_settings:
return {'error': 'no deploy settings'}
for deploy in deploy_settings:
exec_command(deploy)
return {'data': ''}
def update_log(p1):
output, err = p1.communicate()
models.DeployLog.objects.create(log=output, return_code=p1.returncode)
# ret.append({'command': command, 'log': log.decode('utf-8')})
def exec_command(deploy):
cmd = ['ansible-playbook']
cmd.extend(shlex.split(deploy.command))
try:
p1 = subprocess.Popen(cmd)
t = threading.Thread(target=update_log, args=(p1,))
# t.setDaemon(True)
t.start()
# t.join()
except:
pass
def github_release(request, payload):
if payload.get('prerelease'):
pass
|
train.py | #!/usr/bin/env python
import os
import json
import torch
import numpy as np
import queue
import pprint
import random
import argparse
import importlib
import threading
import traceback
import torch.distributed as dist
import torch.multiprocessing as mp
from tqdm import tqdm
from torch.multiprocessing import Process, Queue, Pool
import config_debug
from core.dbs import datasets
from core.utils import stdout_to_tqdm
from core.config import SystemConfig
from core.sample import data_sampling_func
from core.nnet.py_factory import NetworkFactory
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def parse_args():
parser = argparse.ArgumentParser(description="Training Script")
parser.add_argument("--cfg_file", default=config_debug.cfg_file, help="config file", type=str)
parser.add_argument("--iter", dest="start_iter",
help="train at iteration i",
default=0, type=int)
parser.add_argument("--workers", default=config_debug.workers, type=int)
parser.add_argument("--initialize", action="store_true")
parser.add_argument("--distributed", action="store_true")
parser.add_argument("--world-size", default=-1, type=int,
help="number of nodes of distributed training")
parser.add_argument("--rank", default=0, type=int,
help="node rank for distributed training")
parser.add_argument("--dist-url", default=None, type=str,
help="url used to set up distributed training")
parser.add_argument("--dist-backend", default="nccl", type=str)
args = parser.parse_args()
return args
def prefetch_data(system_config, db, queue, sample_data, data_aug):
ind = 0
print("start prefetching data...")
np.random.seed(os.getpid())
while True:
try:
data, ind = sample_data(system_config, db, ind, data_aug=data_aug)
queue.put(data)
except Exception as e:
traceback.print_exc()
raise e
def _pin_memory(ts):
if type(ts) is list:
return [t.pin_memory() for t in ts]
return ts.pin_memory()
def pin_memory(data_queue, pinned_data_queue, sema):
while True:
data = data_queue.get()
data["xs"] = [_pin_memory(x) for x in data["xs"]]
data["ys"] = [_pin_memory(y) for y in data["ys"]]
pinned_data_queue.put(data)
if sema.acquire(blocking=False):
return
def init_parallel_jobs(system_config, dbs, queue, fn, data_aug):
tasks = [Process(target=prefetch_data, args=(system_config, db, queue, fn, data_aug)) for db in dbs]
for task in tasks:
task.daemon = True
task.start()
return tasks
def terminate_tasks(tasks):
for task in tasks:
task.terminate()
def train(training_dbs, validation_db, system_config, model, args):
# reading arguments from command
start_iter = args.start_iter
distributed = args.distributed
world_size = args.world_size
initialize = args.initialize
gpu = args.gpu
rank = args.rank
# reading arguments from json file
batch_size = system_config.batch_size
learning_rate = system_config.learning_rate
max_iteration = system_config.max_iter
pretrained_model = system_config.pretrain
stepsize = system_config.stepsize
snapshot = system_config.snapshot
val_iter = system_config.val_iter
display = system_config.display
decay_rate = system_config.decay_rate
stepsize = system_config.stepsize
if config_debug.validation:
if config_debug.cfg_file == 'CornerNet_ifp_Squeeze':
pretrained_model = "./cache/nnet/CornerNet_Squeeze/CornerNet_Squeeze_500000.pkl"
elif config_debug.cfg_file == 'CornerNet_ifp_Saccade':
pretrained_model = "./cache/nnet/CornerNet_Saccade/CornerNet_Saccade_500000.pkl"
# pretrained_model = "./cache/nnet/CornerNet/CornerNet_500000.pkl"
# start_iter = 500000
max_iteration = 100000
stepsize = 100000
print("Process {}: building model...".format(rank))
nnet = NetworkFactory(system_config, model, distributed=distributed, gpu=gpu)
if initialize:
nnet.save_params(0)
exit(0)
# queues storing data for training
training_queue = Queue(system_config.prefetch_size)
validation_queue = Queue(5)
# queues storing pinned data for training
pinned_training_queue = queue.Queue(system_config.prefetch_size)
pinned_validation_queue = queue.Queue(5)
# allocating resources for parallel reading
training_tasks = init_parallel_jobs(system_config, training_dbs, training_queue, data_sampling_func, True)
if val_iter:
validation_tasks = init_parallel_jobs(system_config, [validation_db], validation_queue, data_sampling_func, False)
training_pin_semaphore = threading.Semaphore()
validation_pin_semaphore = threading.Semaphore()
training_pin_semaphore.acquire()
validation_pin_semaphore.acquire()
training_pin_args = (training_queue, pinned_training_queue, training_pin_semaphore)
training_pin_thread = threading.Thread(target=pin_memory, args=training_pin_args)
training_pin_thread.daemon = True
training_pin_thread.start()
validation_pin_args = (validation_queue, pinned_validation_queue, validation_pin_semaphore)
validation_pin_thread = threading.Thread(target=pin_memory, args=validation_pin_args)
validation_pin_thread.daemon = True
validation_pin_thread.start()
if pretrained_model is not None:
if not os.path.exists(pretrained_model):
raise ValueError("pretrained model does not exist")
print("Process {}: loading from pretrained model".format(rank))
nnet.load_pretrained_params(pretrained_model)
if start_iter:
nnet.load_params(start_iter)
learning_rate /= (decay_rate ** (start_iter // stepsize))
nnet.set_lr(learning_rate)
print("Process {}: training starts from iteration {} with learning_rate {}".format(rank, start_iter + 1, learning_rate))
else:
nnet.set_lr(learning_rate)
if rank == 0:
print("training start...")
nnet.cuda()
nnet.train_mode()
with stdout_to_tqdm() as save_stdout:
for iteration in tqdm(range(start_iter + 1, max_iteration + 1), file=save_stdout, ncols=80):
training = pinned_training_queue.get(block=True)
training_loss = nnet.train(**training)
if display and iteration % display == 0:
print("Process {}: training loss at iteration {}: {}".format(rank, iteration, training_loss.item()))
del training_loss
if val_iter and validation_db.db_inds.size and iteration % val_iter == 0:
nnet.eval_mode()
validation = pinned_validation_queue.get(block=True)
validation_loss = nnet.validate(**validation)
print("Process {}: validation loss at iteration {}: {}".format(rank, iteration, validation_loss.item()))
nnet.train_mode()
if iteration % snapshot == 0 and rank == 0:
nnet.save_params(iteration)
if iteration % stepsize == 0:
learning_rate /= decay_rate
nnet.set_lr(learning_rate)
# sending signal to kill the thread
training_pin_semaphore.release()
validation_pin_semaphore.release()
# terminating data fetching processes
terminate_tasks(training_tasks)
terminate_tasks(validation_tasks)
def main(gpu, ngpus_per_node, args):
args.gpu = gpu
if args.distributed:
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
rank = args.rank
cfg_file = os.path.join("./configs", args.cfg_file + ".json")
with open(cfg_file, "r") as f:
config = json.load(f)
config["system"]["snapshot_name"] = args.cfg_file
system_config = SystemConfig().update_config(config["system"])
model_file = "core.models.{}".format(args.cfg_file)
model_file = importlib.import_module(model_file)
model = model_file.model()
train_split = system_config.train_split
val_split = system_config.val_split
print("Process {}: loading all datasets...".format(rank))
dataset = system_config.dataset
workers = args.workers
print("Process {}: using {} workers".format(rank, workers))
training_dbs = [datasets[dataset](config["db"], split=train_split, sys_config=system_config) for _ in range(workers)]
validation_db = datasets[dataset](config["db"], split=val_split, sys_config=system_config)
if rank == 0:
print("system config...")
pprint.pprint(system_config.full)
print("db config...")
pprint.pprint(training_dbs[0].configs)
print("len of db: {}".format(len(training_dbs[0].db_inds)))
print("distributed: {}".format(args.distributed))
train(training_dbs, validation_db, system_config, model, args)
if __name__ == "__main__":
args = parse_args()
distributed = args.distributed
world_size = args.world_size
if distributed and world_size < 0:
raise ValueError("world size must be greater than 0 in distributed training")
ngpus_per_node = torch.cuda.device_count()
if distributed:
args.world_size = ngpus_per_node * args.world_size
mp.spawn(main, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main(None, ngpus_per_node, args)
|
test_telnetlib.py | import socket
import selectors
import telnetlib
import time
import contextlib
from test import support
import unittest
threading = support.import_module('threading')
HOST = support.HOST
def server(evt, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
conn.close()
except socket.timeout:
pass
finally:
serv.close()
class GeneralTests(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = support.bind_port(self.sock)
self.thread = threading.Thread(target=server, args=(self.evt,self.sock))
self.thread.setDaemon(True)
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
del self.thread # Clear out any dangling Thread objects.
def testBasic(self):
# connects
telnet = telnetlib.Telnet(HOST, self.port)
telnet.sock.close()
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutNone(self):
# None, having other default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(telnet.sock.gettimeout() is None)
telnet.sock.close()
def testTimeoutValue(self):
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutOpen(self):
telnet = telnetlib.Telnet()
telnet.open(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testGetters(self):
# Test telnet getter methods
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
t_sock = telnet.sock
self.assertEqual(telnet.get_socket(), t_sock)
self.assertEqual(telnet.fileno(), t_sock.fileno())
telnet.sock.close()
class SocketStub(object):
''' a socket proxy that re-defines sendall() '''
def __init__(self, reads=()):
self.reads = list(reads) # Intentionally make a copy.
self.writes = []
self.block = False
def sendall(self, data):
self.writes.append(data)
def recv(self, size):
out = b''
while self.reads and len(out) < size:
out += self.reads.pop(0)
if len(out) > size:
self.reads.insert(0, out[size:])
out = out[:size]
return out
class TelnetAlike(telnetlib.Telnet):
def fileno(self):
raise NotImplementedError()
def close(self): pass
def sock_avail(self):
return (not self.sock.block)
def msg(self, msg, *args):
with support.captured_stdout() as out:
telnetlib.Telnet.msg(self, msg, *args)
self._messages += out.getvalue()
return
class MockSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
@property
def resolution(self):
return 1e-3
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout=None):
block = False
for fileobj in self.keys:
if isinstance(fileobj, TelnetAlike):
block = fileobj.sock.block
break
if block:
return []
else:
return [(key, key.events) for key in self.keys.values()]
def get_map(self):
return self.keys
@contextlib.contextmanager
def test_socket(reads):
def new_conn(*ignored):
return SocketStub(reads)
try:
old_conn = socket.create_connection
socket.create_connection = new_conn
yield None
finally:
socket.create_connection = old_conn
return
def test_telnet(reads=(), cls=TelnetAlike):
''' return a telnetlib.Telnet object that uses a SocketStub with
reads queued up to be read '''
for x in reads:
assert type(x) is bytes, x
with test_socket(reads):
telnet = cls('dummy', 0)
telnet._messages = '' # debuglevel output
return telnet
class ExpectAndReadTestCase(unittest.TestCase):
def setUp(self):
self.old_selector = telnetlib._TelnetSelector
telnetlib._TelnetSelector = MockSelector
def tearDown(self):
telnetlib._TelnetSelector = self.old_selector
class ReadTests(ExpectAndReadTestCase):
def test_read_until(self):
"""
read_until(expected, timeout=None)
test the blocking version of read_util
"""
want = [b'xxxmatchyyy']
telnet = test_telnet(want)
data = telnet.read_until(b'match')
self.assertEqual(data, b'xxxmatch', msg=(telnet.cookedq, telnet.rawq, telnet.sock.reads))
reads = [b'x' * 50, b'match', b'y' * 50]
expect = b''.join(reads[:-1])
telnet = test_telnet(reads)
data = telnet.read_until(b'match')
self.assertEqual(data, expect)
def test_read_all(self):
"""
read_all()
Read all data until EOF; may block.
"""
reads = [b'x' * 500, b'y' * 500, b'z' * 500]
expect = b''.join(reads)
telnet = test_telnet(reads)
data = telnet.read_all()
self.assertEqual(data, expect)
return
def test_read_some(self):
"""
read_some()
Read at least one byte or EOF; may block.
"""
# test 'at least one byte'
telnet = test_telnet([b'x' * 500])
data = telnet.read_some()
self.assertTrue(len(data) >= 1)
# test EOF
telnet = test_telnet()
data = telnet.read_some()
self.assertEqual(b'', data)
def _read_eager(self, func_name):
"""
read_*_eager()
Read all data available already queued or on the socket,
without blocking.
"""
want = b'x' * 100
telnet = test_telnet([want])
func = getattr(telnet, func_name)
telnet.sock.block = True
self.assertEqual(b'', func())
telnet.sock.block = False
data = b''
while True:
try:
data += func()
except EOFError:
break
self.assertEqual(data, want)
def test_read_eager(self):
# read_eager and read_very_eager make the same guarantees
# (they behave differently but we only test the guarantees)
self._read_eager('read_eager')
self._read_eager('read_very_eager')
# NB -- we need to test the IAC block which is mentioned in the
# docstring but not in the module docs
def read_very_lazy(self):
want = b'x' * 100
telnet = test_telnet([want])
self.assertEqual(b'', telnet.read_very_lazy())
while telnet.sock.reads:
telnet.fill_rawq()
data = telnet.read_very_lazy()
self.assertEqual(want, data)
self.assertRaises(EOFError, telnet.read_very_lazy)
def test_read_lazy(self):
want = b'x' * 100
telnet = test_telnet([want])
self.assertEqual(b'', telnet.read_lazy())
data = b''
while True:
try:
read_data = telnet.read_lazy()
data += read_data
if not read_data:
telnet.fill_rawq()
except EOFError:
break
self.assertTrue(want.startswith(data))
self.assertEqual(data, want)
class nego_collector(object):
def __init__(self, sb_getter=None):
self.seen = b''
self.sb_getter = sb_getter
self.sb_seen = b''
def do_nego(self, sock, cmd, opt):
self.seen += cmd + opt
if cmd == tl.SE and self.sb_getter:
sb_data = self.sb_getter()
self.sb_seen += sb_data
tl = telnetlib
class WriteTests(unittest.TestCase):
'''The only thing that write does is replace each tl.IAC for
tl.IAC+tl.IAC'''
def test_write(self):
data_sample = [b'data sample without IAC',
b'data sample with' + tl.IAC + b' one IAC',
b'a few' + tl.IAC + tl.IAC + b' iacs' + tl.IAC,
tl.IAC,
b'']
for data in data_sample:
telnet = test_telnet()
telnet.write(data)
written = b''.join(telnet.sock.writes)
self.assertEqual(data.replace(tl.IAC,tl.IAC+tl.IAC), written)
class OptionTests(unittest.TestCase):
# RFC 854 commands
cmds = [tl.AO, tl.AYT, tl.BRK, tl.EC, tl.EL, tl.GA, tl.IP, tl.NOP]
def _test_command(self, data):
""" helper for testing IAC + cmd """
telnet = test_telnet(data)
data_len = len(b''.join(data))
nego = nego_collector()
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
cmd = nego.seen
self.assertTrue(len(cmd) > 0) # we expect at least one command
self.assertIn(cmd[:1], self.cmds)
self.assertEqual(cmd[1:2], tl.NOOPT)
self.assertEqual(data_len, len(txt + cmd))
nego.sb_getter = None # break the nego => telnet cycle
def test_IAC_commands(self):
for cmd in self.cmds:
self._test_command([tl.IAC, cmd])
self._test_command([b'x' * 100, tl.IAC, cmd, b'y'*100])
self._test_command([b'x' * 10, tl.IAC, cmd, b'y'*10])
# all at once
self._test_command([tl.IAC + cmd for (cmd) in self.cmds])
def test_SB_commands(self):
# RFC 855, subnegotiations portion
send = [tl.IAC + tl.SB + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + b'aa' + tl.IAC + tl.SE,
tl.IAC + tl.SB + b'bb' + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + b'cc' + tl.IAC + tl.IAC + b'dd' + tl.IAC + tl.SE,
]
telnet = test_telnet(send)
nego = nego_collector(telnet.read_sb_data)
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
self.assertEqual(txt, b'')
want_sb_data = tl.IAC + tl.IAC + b'aabb' + tl.IAC + b'cc' + tl.IAC + b'dd'
self.assertEqual(nego.sb_seen, want_sb_data)
self.assertEqual(b'', telnet.read_sb_data())
nego.sb_getter = None # break the nego => telnet cycle
def test_debuglevel_reads(self):
# test all the various places that self.msg(...) is called
given_a_expect_b = [
# Telnet.fill_rawq
(b'a', ": recv b''\n"),
# Telnet.process_rawq
(tl.IAC + bytes([88]), ": IAC 88 not recognized\n"),
(tl.IAC + tl.DO + bytes([1]), ": IAC DO 1\n"),
(tl.IAC + tl.DONT + bytes([1]), ": IAC DONT 1\n"),
(tl.IAC + tl.WILL + bytes([1]), ": IAC WILL 1\n"),
(tl.IAC + tl.WONT + bytes([1]), ": IAC WONT 1\n"),
]
for a, b in given_a_expect_b:
telnet = test_telnet([a])
telnet.set_debuglevel(1)
txt = telnet.read_all()
self.assertIn(b, telnet._messages)
return
def test_debuglevel_write(self):
telnet = test_telnet()
telnet.set_debuglevel(1)
telnet.write(b'xxx')
expected = "send b'xxx'\n"
self.assertIn(expected, telnet._messages)
def test_debug_accepts_str_port(self):
# Issue 10695
with test_socket([]):
telnet = TelnetAlike('dummy', '0')
telnet._messages = ''
telnet.set_debuglevel(1)
telnet.msg('test')
self.assertRegex(telnet._messages, r'0.*test')
class ExpectTests(ExpectAndReadTestCase):
def test_expect(self):
"""
expect(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
"""
want = [b'x' * 10, b'match', b'y' * 10]
telnet = test_telnet(want)
(_,_,data) = telnet.expect([b'match'])
self.assertEqual(data, b''.join(want[:-1]))
if __name__ == '__main__':
import unittest
unittest.main()
|
zip_streaming.py | #!/usr/bin/python3.9
# SPDX-License-Identifier: MIT
#
# Streaming zip files via http with naive block cache.
#
__author__ = "Amir Heinisch <mail@amir-heinisch.de>"
import os, re
from urllib.request import Request, urlopen
class URLCache:
# Cache block size.
CBLKSIZE = 10 * 1024 * 1024
def __init__(self, url):
self.url = url
self.cursor = 0
# Get filesize from webserver.
with urlopen(Request(url, method="HEAD")) as res:
if res.status == 200:
self.size = int(res.getheader("Content-Length"))
# Prepare block cache.
rest = 0 if self.size % self.CBLKSIZE == 0 else 1
self.cache = [ None ] * ( self.size // self.CBLKSIZE + rest)
def seekable(self):
return True
def get_size(self):
return self.size
def download_range(self, start, end):
print("Download: (", start, ",", end, ")")
headers = {
# TODO: multiple ranges in one request are possible.
"Range": f"bytes={start}-{end}"
}
req = Request(url, headers=headers)
with urlopen(req) as res:
return res.read()
def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
self.cursor = offset
elif whence == os.SEEK_CUR:
self.cursor += offset
elif whence == os.SEEK_END:
self.cursor = self.get_size() + offset
else:
pass
def tell(self):
return self.cursor
def read(self, count=None):
if count == None:
count = self.get_size() - self.cursor
if (self.cursor + count - 1) >= self.get_size() or count == 0:
return b""
sb = self.cursor // self.CBLKSIZE
eb = (self.cursor + count) // self.CBLKSIZE
data = []
for b_idx in range(sb, eb+1):
blk = b""
if self.cache[b_idx] == None:
blk = self.download_range(b_idx*self.CBLKSIZE, b_idx*self.CBLKSIZE+self.CBLKSIZE-1)
self.cache[b_idx] = blk
else:
blk = self.cache[b_idx]
data.append(blk)
tr = self.cursor - (sb * self.CBLKSIZE)
data[0] = data[0][tr:]
data = b"".join(data)[:count]
assert len(data) == count
self.cursor += len(data)
return data
if __name__ == '__main__':
import sys, zipfile
# Get input: filename or url.
inp = sys.argv[1]
zf = None
if inp.startswith("http"):
url = inp
# Start local http server in background thread.
from http.server import HTTPServer
from threading import Thread
# Std implementation doesn't implement range header see here:
# - https://github.com/danvk/RangeHTTPServer/blob/master/RangeHTTPServer/__init__.py
import RangeHTTPRequestHandler
httpd = HTTPServer(('127.0.0.1', 8000), RangeHTTPRequestHandler)
t = Thread(target=httpd.serve_forever, daemon=True)
t.start()
zf = zipfile.ZipFile(URLCache(url))
else:
filename = inp
if not zipfile.is_zipfile(filename):
print("No valid zip given.")
sys.exit()
zf = zipfile.ZipFile(filename)
print(zf.namelist())
with zf.open("test.txt") as f:
print(f.read())
zf.close()
|
moto_services.py | # Copyright 2019-2022 Darren Weber
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import os
import threading
import time
import moto.backends
import moto.server
import urllib3
import werkzeug.serving
from pytest_aiomoto.utils import AWS_HOST
from pytest_aiomoto.utils import get_free_tcp_port
_PYCHARM_HOSTED = os.environ.get("PYCHARM_HOSTED") == "1"
CONNECT_TIMEOUT = 90 if _PYCHARM_HOSTED else 10
def moto_service_reset(service_name: str):
"""
Reset a moto service backend, for all regions.
Each service can have multiple regional backends.
"""
service_backends = moto.backends.get_backend(service_name)
for region_name, backend in service_backends.items():
backend.reset()
def moto_service_app(service_name: str):
app = moto.server.DomainDispatcherApplication(
moto.server.create_backend_app, service=service_name
)
app.debug = True
return app
class MotoService:
"""Will Create MotoService.
Service is ref-counted so there will only be one per process. Real Service will
be returned by `__enter__`."""
_services = dict() # {name: instance}
def __init__(self, service_name: str, port: int = None):
self._service_name = service_name
if port:
self._socket = None
self._port = port
else:
self._socket, self._port = get_free_tcp_port()
self._thread = None
self._logger = logging.getLogger(self.__class__.__name__)
self._refcount = 0
self._ip_address = AWS_HOST
self._server = None
@property
def endpoint_url(self):
return "http://{}:{}".format(self._ip_address, self._port)
def reset(self):
moto_service_reset(service_name=self._service_name)
def __call__(self, func):
def wrapper(*args, **kwargs):
self._start()
try:
result = func(*args, **kwargs)
finally:
self._stop()
return result
functools.update_wrapper(wrapper, func)
wrapper.__wrapped__ = func
return wrapper
def __enter__(self):
svc = self._services.get(self._service_name)
if svc is None:
self._services[self._service_name] = self
self._refcount = 1
self._start()
return self
else:
svc._refcount += 1
return svc
def __exit__(self, exc_type, exc_val, exc_tb):
self._refcount -= 1
if self._socket:
self._socket.close()
self._socket = None
if self._refcount == 0:
del self._services[self._service_name]
self._stop()
def _server_entry(self):
self._main_app = moto_service_app(service_name=self._service_name)
if self._socket:
self._socket.close() # release right before we use it
self._socket = None
self._server = werkzeug.serving.make_server(
self._ip_address, self._port, self._main_app, True
)
self._server.serve_forever()
def _start(self):
self._thread = threading.Thread(target=self._server_entry, daemon=True)
self._thread.start()
http = urllib3.PoolManager()
start = time.time()
while time.time() - start < 10:
if not self._thread.is_alive():
break
try:
resp = http.request(
"GET", self.endpoint_url + "/static", timeout=CONNECT_TIMEOUT
)
break
except (
urllib3.exceptions.NewConnectionError,
urllib3.exceptions.MaxRetryError,
):
time.sleep(0.2)
else:
self._stop() # pytest.fail doesn't call stop_process
raise Exception("Cannot start {}: {}".format(self.__class__.__name__, self._service_name))
def _stop(self):
if self._server:
self._server.shutdown()
self._thread.join()
|
conftest.py | import multiprocessing
import platform
import pytest
def dummy_func():
pass
@pytest.fixture(scope='session', autouse=True)
def scope_session():
if int(platform.python_version_tuple()[0]) >= 3:
multiprocessing.set_start_method('forkserver')
p = multiprocessing.Process(target=dummy_func)
p.start()
p.join()
yield
|
test_retry_eintr.py | # -*- coding: utf8 -*-
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import signal
import time
from threading import Thread
from pytest import mark
import zmq
from zmq.tests import BaseZMQTestCase, SkipTest, skip_pypy
from zmq.utils.strtypes import b
# Partially based on EINTRBaseTest from CPython 3.5 eintr_tester
class TestEINTRSysCall(BaseZMQTestCase):
""" Base class for EINTR tests. """
# delay for initial signal delivery
signal_delay = 0.1
# timeout for tests. Must be > signal_delay
timeout = 0.25
timeout_ms = int(timeout * 1e3)
def alarm(self, t=None):
"""start a timer to fire only once
like signal.alarm, but with better resolution than integer seconds.
"""
if not hasattr(signal, 'setitimer'):
raise SkipTest('EINTR tests require setitimer')
if t is None:
t = self.signal_delay
self.timer_fired = False
self.orig_handler = signal.signal(signal.SIGALRM, self.stop_timer)
# signal_period ignored, since only one timer event is allowed to fire
signal.setitimer(signal.ITIMER_REAL, t, 1000)
def stop_timer(self, *args):
self.timer_fired = True
signal.setitimer(signal.ITIMER_REAL, 0, 0)
signal.signal(signal.SIGALRM, self.orig_handler)
@mark.skipif(not hasattr(zmq, 'RCVTIMEO'), reason="requires RCVTIMEO")
def test_retry_recv(self):
pull = self.socket(zmq.PULL)
pull.rcvtimeo = self.timeout_ms
self.alarm()
self.assertRaises(zmq.Again, pull.recv)
assert self.timer_fired
@mark.skipif(not hasattr(zmq, 'SNDTIMEO'), reason="requires SNDTIMEO")
def test_retry_send(self):
push = self.socket(zmq.PUSH)
push.sndtimeo = self.timeout_ms
self.alarm()
self.assertRaises(zmq.Again, push.send, b('buf'))
assert self.timer_fired
@mark.flaky(reruns=3)
def test_retry_poll(self):
x, y = self.create_bound_pair()
poller = zmq.Poller()
poller.register(x, zmq.POLLIN)
self.alarm()
def send():
time.sleep(2 * self.signal_delay)
y.send(b('ping'))
t = Thread(target=send)
t.start()
evts = dict(poller.poll(2 * self.timeout_ms))
t.join()
assert x in evts
assert self.timer_fired
x.recv()
def test_retry_term(self):
push = self.socket(zmq.PUSH)
push.linger = self.timeout_ms
push.connect('tcp://127.0.0.1:5555')
push.send(b('ping'))
time.sleep(0.1)
self.alarm()
self.context.destroy()
assert self.timer_fired
assert self.context.closed
def test_retry_getsockopt(self):
raise SkipTest("TODO: find a way to interrupt getsockopt")
def test_retry_setsockopt(self):
raise SkipTest("TODO: find a way to interrupt setsockopt")
|
base.py | """Worker pool executor base classes."""
import abc
import inspect
import logging
import numbers
import os
import psutil
import threading
import time
from schema import Or, And
import six
from testplan.common.config import ConfigOption, validate_func
from testplan.common import entity
from testplan.common.utils.thread import interruptible_join
from testplan.common.utils.exceptions import format_trace
from testplan.common.utils.strings import Color
from testplan.common.utils.timing import wait_until_predicate
from testplan.common.utils import logger
from .communication import Message
from testplan.runners.base import Executor, ExecutorConfig
from .tasks import Task, TaskResult
class Transport(logger.Loggable):
"""
Transport layer for communication between a pool and a worker.
Worker send messages, pool receives and send back responses.
:param recv_sleep: Sleep duration in msg receive loop.
:type recv_sleep: ``float``
"""
def __init__(self, recv_sleep=0.05):
super(Transport, self).__init__()
self._recv_sleep = recv_sleep
self.requests = []
self.responses = []
self.active = True
def send(self, message):
"""
Worker sends a message.
:param message: Message to be sent.
:type message: :py:class:`~testplan.runners.pools.communication.Message`
"""
self.requests.append(message)
def receive(self):
"""
Worker receives the response to the message sent.
:return: Response to the message sent.
:type: :py:class:`~testplan.runners.pools.communication.Message`
"""
while self.active:
try:
return self.responses.pop()
except IndexError:
time.sleep(self._recv_sleep)
def accept(self):
"""
Pool receives message sent by worker.
:return: Message pool received.
:type: :py:class:`~testplan.runners.pools.communication.Message`
"""
return self.requests.pop()
def respond(self, message):
"""
Used by :py:class:`~testplan.runners.pools.base.Pool` to respond to
worker request.
:param message: Respond message.
:type message: :py:class:`~testplan.runners.pools.communication.Message`
"""
self.responses.append(message)
def send_and_receive(self, message, expect=None):
"""
Send and receive shortcut. optionally assert that the response is
of the type expected. I.e For a TaskSending message, an Ack is expected.
:param message: Message sent.
:type message: :py:class:`~testplan.runners.pools.communication.Message`
:param expect: Assert message received command is the expected.
:type expect: ``NoneType`` or
:py:class:`~testplan.runners.pools.communication.Message`
:return: Message received.
:rtype: ``object``
"""
if not self.active:
return None
try:
self.send(message)
except Exception as exc:
self.logger.exception('Hit exception on transport send.')
raise RuntimeError('On transport send - {}.'.format(exc))
try:
received = self.receive()
except Exception as exc:
self.logger.exception('Hit exception on transport receive.')
raise RuntimeError('On transport receive - {}.'.format(exc))
if self.active and expect is not None:
if received is None:
raise RuntimeError('Received None when {} was expected.'.format(
expect))
assert received.cmd == expect
return received
@six.add_metaclass(abc.ABCMeta)
class ConnectionManager(entity.Resource):
"""
Abstract base class for classes that manage connections between Pools and
workers.
"""
def __init__(self):
super(ConnectionManager, self).__init__()
self._workers = []
@property
def workers(self):
"""Returns workers this object manages connections for."""
return self._workers
def starting(self):
"""
Perform any connection setup - in the base class no setup is required.
"""
self.status.change(self.status.STARTED)
def stopping(self):
"""Unregister workers when stopping."""
self._unregister_workers()
self.status.change(self.status.STOPPED)
def aborting(self):
"""Abort policy - no abort actions are required in the base class."""
def register(self, worker):
"""
Register a new worker. Workers should be registered after the
connection manager is started and will be automatically unregistered
when it is stopped.
"""
if self.status.tag != self.status.STARTED:
raise RuntimeError(
'Can only register workers when started. Current state is {}'
.format(self.status.tag))
if worker in self._workers:
raise RuntimeError('Worker {} already in ConnectionManager'
.format(worker))
self._workers.append(worker)
@abc.abstractmethod
def accept(self):
"""
Accepts a new message from worker. This method should not block - if
no message is queued for receiving it should return None.
:return: Message received from worker transport, or None.
:rtype: ``NoneType`` or
:py:class:`~testplan.runners.pools.communication.Message`
"""
raise NotImplementedError
def _unregister_workers(self):
"""Remove workers from this connection manager."""
self._workers = []
class RoundRobinConnManager(ConnectionManager):
"""Manages workers and performs round robin communication with each."""
def __init__(self):
super(RoundRobinConnManager, self).__init__()
self._current = 1
def accept(self):
"""
Accepts a new message from the next worker, increments the current
worker index. Doesn't block if no message is queued for receiving.
:return: Message received from worker transport, or None.
:rtype: ``NoneType`` or
:py:class:`~testplan.runners.pools.communication.Message`
"""
if not self._workers:
return None
msg = None
try:
idx = (self._current % len(self._workers)) - 1
msg = self._workers[idx].transport.accept()
except IndexError:
pass
finally:
self._current += 1
return msg
class WorkerConfig(entity.ResourceConfig):
"""
Configuration object for
:py:class:`~testplan.runners.pools.base.Worker` resource entity.
:param index: Worker index id.
:type index: ``int`` or ``str``
:param transport: Transport communication class definition.
:type transport: :py:class:`~testplan.runners.pools.base.Transport`
Also inherits all :py:class:`~testplan.common.entity.base.ResourceConfig`
options.
"""
@classmethod
def get_options(cls):
"""
Schema for options validation and assignment of default values.
"""
return {
'index': Or(int, str),
ConfigOption('transport', default=Transport): object,
}
class Worker(entity.Resource):
"""
Worker resource that pulls tasks from the transport provided, executes them
and sends back task results.
"""
CONFIG = WorkerConfig
def __init__(self, **options):
super(Worker, self).__init__(**options)
self._metadata = None
self._transport = self.cfg.transport()
self._loop_handler = None
self.last_heartbeat = None
self.assigned = set()
self.requesting = 0
@property
def transport(self):
"""Worker communication transport."""
return self._transport
@property
def metadata(self):
"""Worker metadata information."""
if not self._metadata:
self._metadata = {'thread': threading.current_thread(),
'index': self.cfg.index}
return self._metadata
@property
def outfile(self):
"""Stdout file."""
return os.path.join(self.parent.runpath,
'{}_startup'.format(self.cfg.index))
def uid(self):
"""Worker unique index."""
return self.cfg.index
def starting(self):
"""Starts the daemonic worker loop."""
self.make_runpath_dirs()
self._loop_handler = threading.Thread(
target=self._loop, args=(self._transport,))
self._loop_handler.daemon = True
self._loop_handler.start()
def stopping(self):
"""Stops the worker."""
self._transport.active = False
if self._loop_handler:
interruptible_join(self._loop_handler)
self._loop_handler = None
def aborting(self):
"""Aborting logic, will not wait running tasks."""
self._transport.active = False
@property
def is_alive(self):
"""Poll the loop handler thread to check it is running as expected."""
return self._loop_handler.is_alive()
def _loop(self, transport):
message = Message(**self.metadata)
while self.active:
received = transport.send_and_receive(message.make(
message.TaskPullRequest, data=1))
if received is None or received.cmd == Message.Stop:
break
elif received.cmd == Message.TaskSending:
results = []
for item in received.data:
results.append(self.execute(item))
transport.send_and_receive(message.make(
message.TaskResults, data=results), expect=message.Ack)
elif received.cmd == Message.Ack:
pass
time.sleep(self.cfg.active_loop_sleep)
def execute(self, task):
"""
Executes a task and return the associated task result.
:param task: Task that worker pulled for execution.
:type task: :py:class:`~testplan.runners.pools.tasks.base.Task`
:return: Task result.
:rtype: :py:class:`~testplan.runners.pools.tasks.base.TaskResult`
"""
try:
target = task.materialize()
if isinstance(target, entity.Runnable):
if not target.parent:
target.parent = self
if not target.cfg.parent:
target.cfg.parent = self.cfg
result = target.run()
elif callable(target):
result = target()
else:
result = target.run()
except BaseException as exc:
task_result = TaskResult(
task=task, result=None, status=False,
reason=format_trace(inspect.trace(), exc))
else:
task_result = TaskResult(task=task, result=result, status=True)
return task_result
def respond(self, msg):
"""
Method that the pull uses to respond with a message to the worker.
:param msg: Response message.
:type msg: :py:class:`~testplan.runners.pools.communication.Message`
"""
self._transport.respond(msg)
def __repr__(self):
return '{}[{}]'.format(self.__class__.__name__, self.cfg.index)
def default_check_reschedule(pool, task_result):
"""
Determines if a task should be rescheduled based on the task result info.
"""
return False
class PoolConfig(ExecutorConfig):
"""
Configuration object for
:py:class:`~testplan.runners.pools.base.Pool` executor resource entity.
:param name: Pool name.
:type name: ``str``
:param size: Pool workers size. Default: 4
:type size: ``int``
:param worker_type: Type of worker to be initialized.
:type worker_type: :py:class:`~testplan.runners.pools.base.Worker`
:param worker_heartbeat: Worker heartbeat period.
:type worker_heartbeat: ``int`` or ``float`` or ``NoneType``
:param heartbeat_init_window: Allowed seconds of missing heartbeats from
workers.
:type heartbeat_init_window: ``int``
:param heartbeats_miss_limit: Worker heartbeat period.
:type heartbeats_miss_limit: ``int``
:param task_retries_limit: Maximum times a task can be re-assigned to pool.
:type task_retries_limit: ``int``
:param max_active_loop_sleep: Maximum value for delay logic in active sleep.
:type max_active_loop_sleep: ``int`` or ``float``
Also inherits all :py:class:`~testplan.runners.base.ExecutorConfig`
options.
"""
@classmethod
def get_options(cls):
"""
Schema for options validation and assignment of default values.
"""
return {
'name': str,
ConfigOption('size', default=4): And(int, lambda x: x > 0),
ConfigOption('worker_type', default=Worker): object,
ConfigOption('worker_heartbeat', default=None):
Or(int, float, None),
ConfigOption('heartbeat_init_window', default=1800): int,
ConfigOption('worker_inactivity_threshold', default=300): int,
ConfigOption('heartbeats_miss_limit', default=3): int,
ConfigOption('task_retries_limit', default=3): int,
ConfigOption('max_active_loop_sleep', default=5): numbers.Number}
class Pool(Executor):
"""
Pool task executor object that initializes workers and dispatches tasks.
"""
CONFIG = PoolConfig
CONN_MANAGER = RoundRobinConnManager
def __init__(self, **options):
super(Pool, self).__init__(**options)
self.unassigned = [] # unassigned tasks
self.task_assign_cnt = {} # uid: times_assigned
self.should_reschedule = default_check_reschedule
self._workers = entity.Environment(parent=self)
self._workers_last_result = {}
self._conn = self.CONN_MANAGER()
self._conn.parent = self
self._pool_lock = threading.Lock()
self._metadata = {}
self.make_runpath_dirs()
self._metadata['runpath'] = self.runpath
self._add_workers()
# Methods for handling different Message types. These are expected to
# take the worker, request and response objects as the only required
# positional args.
self._request_handlers = {
Message.ConfigRequest: self._handle_cfg_request,
Message.TaskPullRequest: self._handle_taskpull_request,
Message.TaskResults: self._handle_taskresults,
Message.Heartbeat: self._handle_heartbeat,
Message.SetupFailed: self._handle_setupfailed}
def uid(self):
"""Pool name."""
return self.cfg.name
def add(self, task, uid):
"""
Add a task for execution.
:param task: Task to be scheduled to workers.
:type task: :py:class:`~testplan.runners.pools.tasks.base.Task`
:param uid: Task uid.
:type uid: ``str``
"""
if not isinstance(task, Task):
raise ValueError('Task was expected, got {} instead.'.format(
type(task)))
super(Pool, self).add(task, uid)
self.unassigned.append(uid)
def set_reschedule_check(self, check_reschedule):
"""
Sets callable with custom rules to determine if a task should be
rescheduled. It must accept the pool object and the task result,
and based on these it returns if the task should be rescheduled
(i.e due to a known rare system error).
:param check_reschedule: Custom callable for task reschedule.
:type check_reschedule: ``callable`` that takes
``pool``, ``task_result`` arguments.
:return: True if Task should be rescheduled else False.
:rtype: ``bool``
"""
validate_func('pool', 'task_result')(check_reschedule)
self.should_reschedule = check_reschedule
def _loop(self):
"""
Main executor work loop - runs in a seperate thread when the Pool is
started.
"""
# No heartbeat means no fault tolerance for worker.
if self.cfg.worker_heartbeat:
self.logger.debug('Starting worker monitor thread.')
worker_monitor = threading.Thread(target=self._workers_monitoring)
worker_monitor.daemon = True
worker_monitor.start()
while self.active:
with self._pool_lock:
should_continue = self._loop_process_work(self.status.tag)
if not should_continue:
break
else:
time.sleep(self.cfg.active_loop_sleep)
def _loop_process_work(self, curr_status):
"""
Poll for work based on the current pool state and process the next item
if there is one.
:return: Whether to continue the main work loop.
:rtype: ``bool``
"""
if curr_status == self.status.STARTING:
self.status.change(self.status.STARTED)
elif curr_status == self.status.STOPPING:
self.status.change(self.status.STOPPED)
return False # Indicate to break from the main work loop.
elif curr_status != self.status.STARTED:
raise RuntimeError('Pool in unexpected state {}'
.format(curr_status))
else:
msg = self._conn.accept()
if msg:
try:
self.logger.debug('Received message from worker: %s.',
msg)
self.handle_request(msg)
except Exception as exc:
self.logger.error(format_trace(inspect.trace(), exc))
# The main work loop can continue.
return True
def handle_request(self, request):
"""
Handles a worker request. I.e TaskPull, TaskResults, Heartbeat etc.
:param request: Worker request.
:type request: :py:class:`~testplan.runners.pools.communication.Message`
"""
sender_index = request.sender_metadata['index']
worker = self._workers[sender_index]
if not worker.active:
self.logger.critical(
'Ignoring message {} - {} from inactive worker {}'.format(
request.cmd, request.data, worker))
# TODO check whether should we respond.
worker.respond(Message(**self._metadata).make(Message.Ack))
return
else:
worker.last_heartbeat = time.time()
self.logger.debug('Pool {} request received by {} - {}, {}'.format(
self.cfg.name, worker, request.cmd, request.data))
response = Message(**self._metadata)
if not self.active or self.status.tag == self.STATUS.STOPPING:
worker.respond(response.make(Message.Stop))
elif request.cmd in self._request_handlers:
self._request_handlers[request.cmd](worker, request, response)
else:
self.logger.error('Unknown request: {} {} {} {}'.format(
request, dir(request), request.cmd, request.data))
worker.respond(response.make(Message.Ack))
def _handle_cfg_request(self, worker, _, response):
"""Handle a ConfigRequest from a worker."""
options = []
cfg = self.cfg
while cfg:
try:
options.append(cfg.denormalize())
except Exception as exc:
self.logger.error('Could not denormalize: {} - {}'.format(
cfg, exc))
cfg = cfg.parent
worker.respond(response.make(Message.ConfigSending,
data=options))
def _handle_taskpull_request(self, worker, request, response):
"""Handle a TaskPullRequest from a worker."""
tasks = []
if self.status.tag == self.status.STARTED:
for _ in range(request.data):
try:
uid = self.unassigned.pop(0)
except IndexError:
break
if uid not in self.task_assign_cnt:
self.task_assign_cnt[uid] = 0
if self.task_assign_cnt[uid] >= self.cfg.task_retries_limit:
self._discard_task(
uid, '{} already reached max retries: {}'.format(
self._input[uid], self.cfg.task_retries_limit))
continue
else:
self.task_assign_cnt[uid] += 1
task = self._input[uid]
self.logger.test_info(
'Scheduling {} to {}'.format(task, worker))
worker.assigned.add(uid)
tasks.append(task)
if tasks:
worker.respond(response.make(
Message.TaskSending, data=tasks))
worker.requesting = request.data - len(tasks)
return
worker.requesting = request.data
worker.respond(response.make(Message.Ack))
def _handle_taskresults(self, worker, request, response):
"""Handle a TaskResults message from a worker."""
for task_result in request.data:
uid = task_result.task.uid()
worker.assigned.remove(uid)
if worker not in self._workers_last_result:
self._workers_last_result[worker] = time.time()
self.logger.test_info('De-assign {} from {}'.format(
task_result.task, worker))
if self.should_reschedule(self, task_result):
if self.task_assign_cnt[uid] >= self.cfg.task_retries_limit:
self.logger.test_info(
'Will not reschedule %(input)s again as it '
'reached max retries %(retries)d',
{'input': self._input[uid],
'retries': self.cfg.task_retries_limit})
else:
self.logger.test_info(
'Rescheduling {} due to '
'should_reschedule() cfg option of {}'.format(
task_result.task, self))
self.unassigned.append(uid)
continue
self._print_test_result(task_result)
self._results[uid] = task_result
self.ongoing.remove(uid)
worker.respond(response.make(Message.Ack))
def _handle_heartbeat(self, worker, request, response):
"""Handle a Heartbeat message received from a worker."""
worker.last_heartbeat = time.time()
self.logger.debug(
'Received heartbeat from {} at {} after {}s.'.format(
worker, request.data, time.time() - request.data))
worker.respond(response.make(Message.Ack,
data=worker.last_heartbeat))
def _handle_setupfailed(self, worker, request, response):
"""Handle a SetupFailed message received from a worker."""
self.logger.test_info('Worker {} setup failed:{}{}'.format(
worker, os.linesep, request.data))
worker.respond(response.make(Message.Ack))
self._deco_worker(
worker, 'Aborting {}, setup failed.')
def _deco_worker(self, worker, message):
"""Decomission a worker."""
self.logger.critical(message.format(worker))
if os.path.exists(worker.outfile):
self.logger.critical('\tlogfile: {}'.format(worker.outfile))
while worker.assigned:
uid = worker.assigned.pop()
self.logger.test_info(
'Re-assigning {} from {} to {}.'.format(
self._input[uid], worker, self))
self.unassigned.append(uid)
worker.abort()
def _workers_handler_monitoring(self, worker, workers_last_killed={}):
inactivity_threshold = self.cfg.worker_inactivity_threshold
if worker not in workers_last_killed:
workers_last_killed[worker] = time.time()
worker_last_killed = workers_last_killed[worker]
if not worker.assigned or\
time.time() - worker_last_killed < inactivity_threshold:
return
try:
proc = psutil.Process(worker.handler.pid)
children = list(proc.children(recursive=True))
worker_last_result = self._workers_last_result.get(worker, 0)
if all(item.status() == 'zombie' for item in children) and\
time.time() - worker_last_result > inactivity_threshold:
workers_last_killed[worker] = time.time()
try:
while worker.assigned:
uid = worker.assigned.pop()
self.logger.test_info(
'Re-assigning {} from {} to {}.'.format(
self._input[uid], worker, self))
self.unassigned.append(uid)
self.logger.test_info(
'Restarting worker: {}'.format(worker))
worker.stop()
worker.start()
except Exception as exc:
self.logger.critical(
'Worker {} failed to restart: {}'.format(worker, exc))
self._deco_worker(
worker, 'Aborting {}, due to defunct child process.')
except psutil.NoSuchProcess:
pass
def _workers_monitoring(self):
"""
Monitor the health of workers in a loop. Executes in a separate thread.
"""
if not self.cfg.worker_heartbeat:
raise RuntimeError(
'Cannot monitor workers with no heartbeat configured.')
monitor_started = time.time()
loop_sleep = self.cfg.worker_heartbeat * self.cfg.heartbeats_miss_limit
while self._loop_handler.is_alive():
w_total = set()
w_uninitialized = set()
w_active = set()
w_inactive = set()
monitor_alive = time.time() - monitor_started
init_window = monitor_alive <= self.cfg.heartbeat_init_window
with self._pool_lock:
for worker in self._workers:
if getattr(worker, 'handler', None):
self._workers_handler_monitoring(worker)
w_total.add(worker)
if not worker.active:
w_inactive.add(worker)
elif worker.last_heartbeat is None:
w_uninitialized.add(worker)
if not init_window:
self._deco_worker(
worker, 'Aborting {}, could not initialize.')
elif time.time() - worker.last_heartbeat > loop_sleep:
w_inactive.add(worker)
self._deco_worker(
worker, 'Aborting {}, failed to send heartbeats.')
else:
w_active.add(worker)
if w_total:
if len(w_inactive) == len(w_total):
self.logger.critical(
'All workers of {} are inactive.'.format(self))
self.abort()
break
try:
# For early finish of worker monitoring thread.
wait_until_predicate(lambda: not self._loop_handler.is_alive(),
timeout=loop_sleep, interval=0.05)
except RuntimeError:
break
def _discard_task(self, uid, reason):
self.logger.critical('Discard task {} of {} - {}.'.format(
self._input[uid], self, reason))
self._results[uid] = TaskResult(
task=self._input[uid], status=False,
reason='Task discarded by {} - {}.'.format(self, reason))
self.ongoing.remove(uid)
def _discard_pending_tasks(self):
self.logger.critical('Discard pending tasks of {}.'.format(self))
while self.ongoing:
uid = self.ongoing[0]
self._results[uid] = TaskResult(
task=self._input[uid], status=False,
reason='Task [{}] discarding due to {} abort.'.format(
self._input[uid]._target, self))
self.ongoing.pop(0)
def _print_test_result(self, task_result):
if (not isinstance(task_result.result, entity.RunnableResult)) or (
not hasattr(task_result.result, 'report')):
return
# Currently prints report top level result and not details.
name = task_result.result.report.name
if task_result.result.report.passed is True:
self.logger.test_info('{} -> {}'.format(name, Color.green('Pass')))
else:
self.logger.test_info('{} -> {}'.format(name, Color.red('Fail')))
def _add_workers(self):
"""Initialise worker instances."""
for idx in (str(i) for i in range(self.cfg.size)):
worker = self.cfg.worker_type(index=idx)
worker.parent = self
worker.cfg.parent = self.cfg
self._workers.add(worker, uid=idx)
self.logger.debug('Added worker %(index)s (outfile = %(outfile)s)',
{'index': idx, 'outfile': worker.outfile})
def starting(self):
"""Starting the pool and workers."""
with self._pool_lock:
self._conn.start()
for worker in self._workers:
self._conn.register(worker)
self._workers.start()
if self._workers.start_exceptions:
for msg in self._workers.start_exceptions.values():
self.logger.error(msg)
self._workers.stop()
raise RuntimeError('All workers of {} failed to start.'.format(
self))
super(Pool, self).starting()
self.logger.debug('%s started.', self.__class__.__name__)
def workers_requests(self):
"""Count how many tasks workers are requesting."""
return sum(worker.requesting for worker in self._workers)
def stopping(self):
"""Stop connections and workers."""
# Stop workers before stopping the connection manager.
with self._pool_lock:
self._workers.stop()
self._conn.stop()
super(Pool, self).stopping()
self.logger.debug('Stopped %s', self.__class__.__name__)
def abort_dependencies(self):
"""Empty generator to override parent implementation."""
return
yield
def aborting(self):
"""Aborting logic."""
self.logger.debug('Aborting pool {}'.format(self))
for worker in self._workers:
worker.abort()
self._conn.abort()
self._discard_pending_tasks()
self.logger.debug('Aborted pool {}'.format(self))
|
net.py | """
About: ComNetsEmu Network Module.
"""
import http.server
import json
import os
import os.path
import shutil
import threading
from functools import partial
from time import sleep
import docker
from comnetsemu.cli import spawnXtermDocker
from comnetsemu.node import APPContainer, DockerHost
from mininet.log import debug, error, info
from mininet.net import Mininet
from mininet.term import cleanUpScreens, makeTerms
from mininet.util import BaseString
# ComNetsEmu version: should be consistent with README and LICENSE
VERSION = "0.1.11"
APPCONTAINERMANGER_MOUNTED_DIR = "/tmp/comnetsemu/appcontainermanger"
class Containernet(Mininet):
"""Network emulation with containerized network nodes."""
def __init__(self, **params):
"""Create a Containernet object with the same parameters provided by
Mininet.
"""
self._appcontainers = list()
Mininet.__init__(self, **params)
def addDockerHost(self, name: str, **params): # pragma: no cover
"""Wrapper for addHost method that adds a Docker container as a host.
:param name: Name of the host.
:type name: str
"""
return self.addHost(name, cls=DockerHost, **params)
def startTerms(self): # pragma: no cover
"Start a terminal for each node."
if "DISPLAY" not in os.environ:
error("Error starting terms: Cannot connect to display\n")
return
info("*** Running terms on %s\n" % os.environ["DISPLAY"])
cleanUpScreens()
self.terms += makeTerms(self.controllers, "controller")
self.terms += makeTerms(self.switches, "switch")
dhosts = [h for h in self.hosts if isinstance(h, DockerHost)]
for d in dhosts:
self.terms.append(spawnXtermDocker(d.name))
rest = [h for h in self.hosts if h not in dhosts]
self.terms += makeTerms(rest, "host")
def addLinkNamedIfce(self, src, dst, *args, **kwargs): # pragma: no cover
"""Add a link with two named interfaces.
- Name of interface 1: src-dst
- Name of interface 2: dst-src
"""
# Accept node objects or names
src = src if not isinstance(src, BaseString) else self[src]
dst = dst if not isinstance(dst, BaseString) else self[dst]
self.addLink(
src,
dst,
intfName1="-".join((src.name, dst.name)),
intfName2="-".join((dst.name, src.name)),
*args,
**kwargs,
)
class APPContainerManagerRequestHandler(http.server.BaseHTTPRequestHandler):
"""Basic implementation of a REST API for app containers.
Python's built-in http server only does basic security checks and this class
has basic and limited sanity checks on the requests. Designed only for
teaching.
"""
_container_resource_path = "/containers"
def __init__(self, appcontainermanager, enable_log=True, *args, **kargs):
self.mgr = appcontainermanager
self.enable_log = enable_log
super(APPContainerManagerRequestHandler, self).__init__(*args, **kargs)
def _send_bad_request(self):
self.send_response(400)
self.end_headers()
def log_message(self, format, *args):
if not self.enable_log:
return
else: # pragma no cover
super(APPContainerManagerRequestHandler, self).log_message(format, *args)
def do_GET(self):
if self.path == self._container_resource_path:
self.send_response(200)
self.end_headers()
ret = json.dumps(self.mgr.getAllContainers()).encode("utf-8")
self.wfile.write(ret)
else:
self._send_bad_request()
@staticmethod
def _post_sanity_check(post_dict):
# Check for essential keys.
for k in ["name", "dhost", "dimage", "dcmd", "docker_args"]:
if k not in post_dict:
return False
else:
return True
def do_POST(self):
"""Create a new APP container."""
if self.path == self._container_resource_path:
content_length = int(self.headers.get("content-length", 0))
if content_length == 0:
self._send_bad_request()
else:
post_data = self.rfile.read(content_length).decode("utf-8")
container_para = json.loads(post_data)
if not self._post_sanity_check(container_para):
self._send_bad_request()
else:
self.mgr.addContainer(**container_para)
self.send_response(200)
self.end_headers()
else:
self._send_bad_request()
def _delete_sanity_check(self, container_name):
# Check if container exists.
c = self.mgr.getContainerInstance(container_name, None)
return True if c else False
def do_DELETE(self):
paths = os.path.split(self.path)
if len(paths) == 2 and paths[0] == self._container_resource_path:
container_name = paths[1]
if not self._delete_sanity_check(container_name):
self._send_bad_request()
else:
self.mgr.removeContainer(container_name)
self.send_response(200)
self.end_headers()
else:
self._send_bad_request()
class APPContainerManager:
"""Manager for application containers (sibling containers) deployed on
Mininet hosts.
- To make is simple. It uses docker-py APIs to manage internal containers
from host system.
- Internal methods (starts with an underscore) should be documented after
tests and before stable releases.
Ref:
[1] https://docker-py.readthedocs.io/en/stable/containers.html
"""
docker_args_default = {
"init": True,
"tty": True, # -t
"detach": True, # -d
# Used for cleanups
"labels": {"comnetsemu": "dockercontainer"},
# Required for CRIU checkpoint
"security_opt": ["seccomp:unconfined"],
}
docker_volumes_default = {
# Shared directory in host OS
APPCONTAINERMANGER_MOUNTED_DIR: {
"bind": APPCONTAINERMANGER_MOUNTED_DIR,
"mode": "rw",
}
}
# Default delay between tries for Docker API
retry_delay_secs = 0.1
def __init__(self, net: Mininet):
"""Init the APPContainerManager.
:param net (Mininet): The mininet object, used to manage hosts via
Mininet's API.
"""
self.net = net
self.dclt = docker.from_env()
# Following resources can be shared by main and httpd threads.
# A simple lock is used.
self._container_queue_lock = threading.Lock()
self._container_queue = list()
# Fast search for added containers.
self._name_container_map = dict()
self._http_server_started = False
self._http_server_thread = None
os.makedirs(APPCONTAINERMANGER_MOUNTED_DIR, exist_ok=True)
def _createContainer(self, name, dhost, dimage, dcmd, docker_args):
"""Create a Docker container."""
if "volumes" in docker_args:
debug(
f"Update the default volumes {self.docker_volumes_default} to the already given volumes config.\n"
)
docker_args["volumes"].update(self.docker_volumes_default)
# Override the essential parameters
for key in self.docker_args_default:
if key in docker_args: # pragma no cover
error(
f"Given argument: {key} will be overridden by the default "
f"value: {self.docker_args_default[key]}\n"
)
docker_args.update(self.docker_args_default)
docker_args["name"] = name
docker_args["image"] = dimage
docker_args["cgroup_parent"] = "/docker/{}".format(dhost.dins.id)
docker_args["command"] = dcmd
docker_args["network_mode"] = "container:{}".format(dhost.dins.id)
ret = self.dclt.containers.create(**docker_args)
return ret
def _waitContainerStart(self, name): # pragma: no cover
"""Wait for container to start up running"""
while not self._getDockerIns(name):
debug("Failed to get container:%s" % (name))
sleep(self.retry_delay_secs)
dins = self._getDockerIns(name)
while not dins.attrs["State"]["Running"]:
sleep(self.retry_delay_secs)
dins.reload() # refresh information in 'attrs'
def _waitContainerRemoved(self, name): # pragma: no cover
"""Wait for container to be removed"""
while self._getDockerIns(name):
sleep(self.retry_delay_secs)
def _getDockerIns(self, name):
"""Get the APPContainer instance by name.
:param name (str): Name of the container
"""
try:
dins = self.dclt.containers.get(name)
except docker.errors.NotFound:
return None
return dins
def getContainerInstance(self, name: str, default=None) -> APPContainer:
"""Get the APPContainer instance with the given name.
:param name: The name of the given container.
:type name: str
:param default: The default return value if not found.
:rtype: APPContainer
"""
with self._container_queue_lock:
for c in self._container_queue:
if c.name == name:
return c
else:
return default
def getContainersDhost(self, dhost: str) -> list:
"""Get containers deployed on the given DockerHost.
:param dhost: Name of the DockerHost.
:type dhost: str
:return: A list of APPContainer instances on given DockerHost.
:rtype: list
"""
with self._container_queue_lock:
clist = [c.name for c in self._container_queue if c.dhost == dhost]
return clist
def getAllContainers(self) -> list:
"""Get a list of names of all containers in current container queue.
:rtype: list
"""
with self._container_queue_lock:
return [c.name for c in self._container_queue]
def addContainer(
self,
name: str,
dhost: str,
dimage: str,
dcmd: str,
docker_args: dict,
wait: bool = True,
) -> APPContainer:
"""Create and run a new container inside a Mininet DockerHost.
:param name: Name of the container.
:type name: str
:param dhost: Name of the host used for deployment.
:type name: str
:param dimage: Name of the docker image.
:type dimage: str
:param dcmd: Command to run after the creation.
:type dcmd: str
:param docker_args: All other keyword arguments supported by Docker-py.
e.g. CPU and memory related limitations.
Some parameters are overriden for APPContainerManager's functionalities.
:type docker_args: dict
:param wait: Wait until the container has the running state if True.
:type wait: bool
Check cls.docker_args_default.
:return: Added APPContainer instance or None if the creation process failed.
:rtype: APPContainer
"""
container = None
dhost = self.net.get(dhost)
with self._container_queue_lock:
dins = self._createContainer(name, dhost, dimage, dcmd, docker_args)
dins.start()
if wait:
self._waitContainerStart(name)
container = APPContainer(name, dhost.name, dimage, dins)
self._container_queue.append(container)
self._name_container_map[container.name] = container
self.net._appcontainers.append(name)
return container
def removeContainer(self, name: str, wait: bool = True):
"""Remove the APP container with the given name.
:param name: Name of the to be removed container.
:type name: str
:param wait: Wait until the container is fully removed if True.
:type wait: bool
"""
with self._container_queue_lock:
container = self._name_container_map.get(name, None)
if not container:
raise ValueError(f"Can not find container with name: {container}")
container.dins.remove(force=True)
if wait:
self._waitContainerRemoved(container.name)
self._container_queue.remove(container)
self.net._appcontainers.remove(name)
del self._name_container_map[name]
@staticmethod
def _calculate_cpu_percent(stats):
"""Calculate the CPU usage in percent with given stats JSON data"""
cpu_count = len(stats["cpu_stats"]["cpu_usage"]["percpu_usage"])
cpu_percent = 0.0
cpu_delta = float(stats["cpu_stats"]["cpu_usage"]["total_usage"]) - float(
stats["precpu_stats"]["cpu_usage"]["total_usage"]
)
system_delta = float(stats["cpu_stats"]["system_cpu_usage"]) - float(
stats["precpu_stats"]["system_cpu_usage"]
)
if system_delta > 0.0:
cpu_percent = cpu_delta / system_delta * 100.0 * cpu_count
if cpu_percent > 100: # pragma: no cover
cpu_percent = 100
return cpu_percent
def monResourceStats(
self, name: str, sample_num: int = 3, sample_period: float = 1.0
) -> list:
"""Monitor the resource stats of a container within the given name.
This function measure the CPU and memory usages sample_num times and
sleep for sample_period between each time. All measurement results are
returned as a list.
:param container: Name of the container
:type container: str
:param sample_num: Number of samples.
:type sample_num: int
:param sample_period: Sleep period for each sample
:type sample_period: float
:return: A list of resource usages. Each item is a tuple (cpu_usg, mem_usg)
:rtype: list
:raise ValueError: container is not found
"""
container = self._name_container_map.get(name, None)
if not container:
raise ValueError(f"Can not found container with name: {container}")
n = 0
usages = list()
while n < sample_num:
stats = container.getCurrentStats()
mem_stats = stats["memory_stats"]
mem_usg = mem_stats["usage"] / (1024 ** 2)
cpu_usg = self._calculate_cpu_percent(stats)
usages.append((cpu_usg, mem_usg))
sleep(sample_period)
n += 1
return usages
# BUG: Checkpoint inside container breaks the networking of outside
# container if container networking mode is used.
# def checkpoint(self, container: str) -> str:
# container = self._name_container_map.get(container, None)
# if not container:
# raise ValueError(f"Can not found container with name: {container}")
# ckpath = os.path.join(APPCONTAINERMANGER_MOUNTED_DIR, f"{container.name}")
# # MARK: Docker-py does not provide API for checkpoint and restore,
# # Docker CLI is directly used with subprocess as a temp workaround.
# subprocess.run(split(
# f"docker checkpoint create --checkpoint-dir={ckpath} {container.name} {container.name}"
# ),
# check=True,
# stdout=subprocess.DEVNULL,
# stderr=subprocess.DEVNULL)
# return ckpath
def _runHTTPServer(self, ip_addr, port, enable_log):
"""Generate HTTPServer with partial parameters and run it forever."""
handler = partial(APPContainerManagerRequestHandler, self, enable_log)
httpd = http.server.HTTPServer((ip_addr, port), handler)
info(f"Start REST API server on address: {ip_addr}:{port}.\n")
httpd.serve_forever()
def runRESTServerThread(
self, ip: str, port: int = 8000, enable_log: bool = True
) -> None:
"""Run the REST API server in a separate daemon thread.
threading is used to avoid blocking the main thread in the emulation
scripts. Since to be performed operation is more IO-bounded and this
server should have no impact on the concurrency approach of main thread,
the threading with a Lock is used instead of multi-processing or
asyncio.
:param ip: Listening IP address.
:type ip: str
:param port: Port number.
:type port: int
:param enable_log: Print logs using stdout if True.
:type enable_log: bool
"""
self._http_server_started = True
self._http_server_thread = threading.Thread(
target=self._runHTTPServer, args=(ip, port, enable_log)
)
# It will die if all non-daemon threads (including main) exist.
self._http_server_thread.daemon = True
self._http_server_thread.start()
def stop(self):
"""Stop the APPContainerManager."""
if len(self._container_queue) > 0:
info(
"Stop {} containers in the App container queue: {}\n".format(
len(self._container_queue),
", ".join((c.name for c in self._container_queue)),
)
)
# Avoid missing delete internal containers manually before stop
for c in self._container_queue:
c._terminate()
c.dins.remove(force=True)
self.dclt.close()
shutil.rmtree(APPCONTAINERMANGER_MOUNTED_DIR)
class VNFManager(APPContainerManager):
"""App container for Virtualized Network Functions"""
pass
|
views.py | from datetime import datetime
from threading import Thread
import requests
from django.core.exceptions import PermissionDenied
from django.shortcuts import render, reverse, redirect
from random import randint
from django.conf import settings
from os import path, listdir
from account.permissions import is_admin_or_root
from blog.models import Blog
from django.views.generic import TemplateView
from submission.statistics import get_accept_problem_count
from utils import random_string
from utils.site_settings import is_site_closed, site_settings_get
from utils.upload import save_uploaded_file_to
def file_manager(request):
def slugify(text):
import re
return re.sub(r'[ /"#!:]+', '_', text)
if not is_admin_or_root(request.user):
raise PermissionDenied
if request.method == 'POST':
try:
file = request.FILES['file']
save_uploaded_file_to(file, settings.UPLOAD_DIR, filename=slugify(file.name))
except Exception as e:
raise PermissionDenied(repr(e))
return render(request, 'filemanager.jinja2', context={
'file_list': list(map(lambda x: {
'name': x,
'modified_time': datetime.fromtimestamp(path.getmtime(path.join(settings.UPLOAD_DIR, x))).
strftime(settings.DATETIME_FORMAT_TEMPLATE),
'size': str(path.getsize(path.join(settings.UPLOAD_DIR, x)) // 1024) + "K"
}, filter(lambda x: path.isfile(path.join(settings.UPLOAD_DIR, x)), listdir(settings.UPLOAD_DIR))))
})
def proxy_file_downloader(request):
if not is_admin_or_root(request.user):
raise PermissionDenied
def download_file(url):
local_filename = url.split('/')[-1]
if local_filename == '':
local_filename = random_string()
r = requests.get(url, stream=True, timeout=30)
with open(path.join(settings.UPLOAD_DIR, local_filename), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
if request.method == 'POST':
try:
url = request.POST['url']
Thread(target=download_file, args=(url,)).start()
except Exception as e:
raise PermissionDenied(repr(e))
return redirect(reverse('filemanager'))
def home_view(request):
if request.user.is_authenticated:
ctx = {'solved': get_accept_problem_count(request.user.pk),
'bulletin': site_settings_get('BULLETIN', '')}
if not is_site_closed():
ctx['blog_list'] = Blog.objects.with_likes().with_likes_flag(request.user).select_related("author").order_by("-create_time").filter(visible=True, recommend=True)[:15]
return render(request, 'home_logged_in.jinja2', context=ctx)
else:
return render(request, 'home.jinja2', context={'bg': '/static/image/bg/%d.jpg' % randint(1, 14), })
def forbidden_view(request, exception):
return render(request, 'error/403.jinja2', context={"exception": exception})
def not_found_view(request):
return render(request, 'error/404.jinja2')
def server_error_view(request):
return render(request, 'error/500.jinja2')
def faq_view(request):
return render(request, 'faq.jinja2')
class TestView(TemplateView):
template_name = 'test.jinja2'
|
base.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tools
import time
import re
import db
import threading
class Source (object) :
def __init__ (self):
self.T = tools.Tools()
self.now = int(time.time() * 1000)
def getSource (self) :
urlList = []
url = 'https://www.jianshu.com/p/2499255c7e79'
req = [
'user-agent: Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Mobile Safari/537.36',
]
res = self.T.getPage(url, req)
if res['code'] == 200 :
pattern = re.compile(r"<code(.*?)</code>", re.I|re.S)
tmp = pattern.findall(res['body'])
pattern = re.compile(r"#EXTINF:0,(.*?)\n#EXTVLCOPT:network-caching=1000\n(.*?)\n", re.I|re.S)
sourceList = pattern.findall(tmp[0])
sourceList = sourceList + pattern.findall(tmp[1])
threads = []
for item in sourceList :
thread = threading.Thread(target = self.detectData, args = (item[0], item[1], ), daemon = True)
thread.start()
threads.append(thread)
for t in threads:
t.join()
else :
pass # MAYBE later :P
return urlList
def detectData (self, title, url) :
info = self.T.fmtTitle(title)
netstat = self.T.chkPlayable(url)
if netstat > 0 :
cros = 1 if self.T.chkCros(url) else 0
data = {
'title' : str(info['id']) if info['id'] != '' else str(info['title']),
'url' : str(url),
'quality': str(info['quality']),
'delay' : netstat,
'level' : str(info['level']),
'cros' : cros,
'online' : 1,
'udTime' : self.now,
}
self.addData(data)
self.T.logger('正在分析[ %s ]: %s' % (str(info['id']) + str(info['title']), url))
else :
pass # MAYBE later :P
def addData (self, data) :
DB = db.DataBase()
sql = "SELECT * FROM %s WHERE url = '%s'" % (DB.table, data['url'])
result = DB.query(sql)
if len(result) == 0 :
data['enable'] = 1
DB.insert(data)
else :
id = result[0][0]
DB.edit(id, data)
|
thread_objects_02.py | import threading
from time import sleep
from tqdm import tqdm
def target_function(name: str = 'someone') -> None:
print(f'hello, {name}!')
print(f'you are in the thread {threading.current_thread().name}')
for i in tqdm(range(5)):
sleep(1)
thread_01 = threading.Thread(name='t1', target=target_function, args=['Bruno'])
thread_01.start()
print('executed in an incertain moment (after or along thread exection)')
thread_01.join()
print('executed after join thread 01')
|
ServiceMLFlask.py | #!flask/bin/python
import sys
import logging
from threading import Thread
import requests
from flask import Flask
from flask import jsonify
from flask import request
sys.path.append('./vgg19')
from Persistancy import Persistancy
from ModelVgg19 import ModelVgg19
app = Flask(__name__, static_folder='./html/')
# @app.route('/mlflask/1.0/health', methods=['get'])
# def health():
# return jsonify(status='OK')
# @app.route('/mlflask/1.0/vgg19start', methods=['get'])
# def health():
# return jsonify(status='OK')
# @app.route('/mlflask/1.0/vgg19status', methods=['get'])
# def health():
# return jsonify(status='OK')
#
# @app.route('/mlflask/1.0/vgg19stop', methods=['get'])
# def health():
# return jsonify(status='OK')
@app.route('/mlflask/1.0/index', methods=['get'])
def index():
return app.send_static_file('index.html')
@app.route('/mlflask/1.0/imageresultforurl', methods=['post'])
def imageresultforurl():
jsonObject = request.get_json()
#validate data
if jsonObject == None:
return jsonify('no json payload'), 500
if not 'url' in jsonObject:
return jsonify('missing: url'), 500
url = jsonObject['url']
downloadImage(persistancy, url)
model.enqueue(url)
while(persistancy.loadImageResult(url) == None):
time.sleep(0.1)
return jsonify(persistancy.loadImageResult(url))
def downloadImage(persistancy, url):
filePath = persistancy.getImageFilePath(url)
response = requests.get(url, allow_redirects=True)
with open(filePath, 'wb') as file:
file.write(response.content)
modelFilePath = './data/vgg19.npy'
persistancy = Persistancy('./data/')
model = ModelVgg19(persistancy, modelFilePath)
t = Thread(target=model.start)
t.daemon = True
t.start()
if __name__ == '__main__':
# logging.basicConfig(filename='error.log',level=logging.DEBUG)
app.run(debug=True)
|
main.py | from trade_client import *
from store_order import *
from load_config import *
from collections import defaultdict
from datetime import datetime, time
import time
import threading
import json
import os.path
# loads local configuration
config = load_config('config.yml')
def get_all_coins():
"""
Returns all coins from Binance
"""
return client.get_all_tickers()
def generate_coin_seen_dict(all_coins):
"""
This method should be used once before starting the loop.
The value for every coin detected before the loop is set to True in the coin_seen_dict.
All the new coins detected during the loop will have a value of False.
"""
coin_seen_dict = defaultdict(bool)
for old_coin in all_coins:
coin_seen_dict[old_coin['symbol']] = True
return coin_seen_dict
def get_new_coins(coin_seen_dict, all_coins_recheck):
"""
This method checks if there are new coins listed and returns them in a list.
The value of the new coins in coin_seen_dict will be set to True to make them not get detected again.
"""
result = []
for new_coin in all_coins_recheck:
if not coin_seen_dict[new_coin['symbol']]:
result += [new_coin]
# this line ensures the new coin isn't detected again
coin_seen_dict[new_coin['symbol']] = True
return result
def get_price(coin, pairing):
"""
Get the latest price for a coin
"""
return client.get_ticker(symbol=coin+pairing)['lastPrice']
def add_updated_all_coins_to_queue(queue):
"""
This method makes a request to get all coins and adds it to the given queue.
"""
all_coins_updated = get_all_coins()
queue += [all_coins_updated]
def make_threads_to_request_all_coins(queue, interval=0.1, max_amount_of_threads=20, max_queue_length=20):
"""
This method creates threads for new requests to get all coins.
A new thread is created every interval.
If there are more threads than max_amount_of_threads no new threads will be created, after every 1 second there
will be a new attempt to create a thread.
The amount of threads can increase if the response from Binance to get all coins increases.
If the queue length gets bigger than max_queue_length no new threads will be created, after every 1 second there
will be a new attempt to create a thread.
The amount of elements in the queue can increase if the while loop in main takes long to handle.
"""
while True:
time.sleep(interval)
# checks if the amount of threads is bigger than max_amount_of_threads
if len(threading.enumerate()) > max_amount_of_threads:
print("Too many threads, waiting 1 second to attempt to create a new thread.")
time.sleep(1)
# checks if the queue isn't getting too big
elif len(queue) > max_queue_length:
print("Queue length too big, waiting 1 second to attempt to create a new thread.")
time.sleep(1)
else:
threading.Thread(target=add_updated_all_coins_to_queue, args=(queue,)).start()
def main():
"""
Sells, adjusts TP and SL according to trailing values
and buys new coins
"""
# store config deets
tp = config['TRADE_OPTIONS']['TP']
sl = config['TRADE_OPTIONS']['SL']
enable_tsl = config['TRADE_OPTIONS']['ENABLE_TSL']
tsl = config['TRADE_OPTIONS']['TSL']
ttp = config['TRADE_OPTIONS']['TTP']
pairing = config['TRADE_OPTIONS']['PAIRING']
qty = config['TRADE_OPTIONS']['QUANTITY']
frequency = config['TRADE_OPTIONS']['RUN_EVERY']
test_mode = config['TRADE_OPTIONS']['TEST']
all_coins = get_all_coins()
coin_seen_dict = generate_coin_seen_dict(all_coins)
# this list will work as a queue, if a new updated all_coins is received it will be added to this queue
queue_of_updated_all_coins = []
# start a thread to run the make_threads_to_request_all_coins method
threading.Thread(target=make_threads_to_request_all_coins, args=(queue_of_updated_all_coins,)).start()
# this is just used to calculate the amount of time between getting updated all_coins
t0 = time.time()
while True:
try:
# check if the order file exists and load the current orders
# basically the sell block and update TP and SL logic
if os.path.isfile('order.json'):
order = load_order('order.json')
for coin in list(order):
# store some necesarry trade info for a sell
stored_price = float(order[coin]['price'])
coin_tp = order[coin]['tp']
coin_sl = order[coin]['sl']
volume = order[coin]['volume']
symbol = coin.split(pairing)[0]
last_price = get_price(symbol, pairing)
# update stop loss and take profit values if threshold is reached
if float(last_price) > stored_price + (stored_price*coin_tp /100) and enable_tsl:
# increase as absolute value for TP
new_tp = float(last_price) + (float(last_price)*ttp /100)
# convert back into % difference from when the coin was bought
new_tp = float( (new_tp - stored_price) / stored_price*100)
# same deal as above, only applied to trailing SL
new_sl = float(last_price) - (float(last_price)*tsl /100)
new_sl = float((new_sl - stored_price) / stored_price*100)
# new values to be added to the json file
order[coin]['tp'] = new_tp
order[coin]['sl'] = new_sl
store_order('order.json', order)
print(f'updated tp: {round(new_tp, 3)} and sl: {round(new_sl, 3)}')
# close trade if tsl is reached or trail option is not enabled
elif float(last_price) < stored_price - (stored_price*sl /100) or float(last_price) > stored_price + (stored_price*tp /100) and not enable_tsl:
try:
# sell for real if test mode is set to false
if not test_mode:
sell = create_order(coin, coin['volume'], 'SELL')
print(f"sold {coin} at {(float(last_price) - stored_price) / float(stored_price)*100}")
# remove order from json file
order.pop(coin)
store_order('order.json', order)
except Exception as e:
print(e)
# store sold trades data
else:
if os.path.isfile('sold.json'):
sold_coins = load_order('sold.json')
else:
sold_coins = {}
if not test_mode:
sold_coins[coin] = sell
store_order('sold.json', sold_coins)
else:
sold_coins[coin] = {
'symbol':coin,
'price':last_price,
'volume':volume,
'time':datetime.timestamp(datetime.now()),
'profit': float(last_price) - stored_price,
'relative_profit': round((float(last_price) - stored_price) / stored_price*100, 3)
}
store_order('sold.json', sold_coins)
else:
order = {}
# check if a new all_coins_updated is on the queue
if len(queue_of_updated_all_coins) > 0:
# get the first updated coins from the queue
all_coins_updated = queue_of_updated_all_coins.pop(0)
# check if new coins are listed
new_coins = get_new_coins(coin_seen_dict, all_coins_updated)
print("time to get updated list of coins: ", time.time() - t0)
print("current amount of threads: ", len(threading.enumerate()))
print("current queue length: ", len(queue_of_updated_all_coins))
t0 = time.time()
else:
# if no new all_coins_updated is on the queue, new_coins should be empty
new_coins = []
# the buy block and logic pass
if len(new_coins) > 0:
print(f'New coins detected: {new_coins}')
for coin in new_coins:
# buy if the coin hasn't already been bought
if coin['symbol'] not in order and pairing in coin['symbol']:
symbol_only = coin['symbol'].split(pairing)[0]
print(f"Preparing to buy {coin['symbol']}")
price = get_price(symbol_only, pairing)
volume = convert_volume(coin['symbol'], qty, price)
try:
# Run a test trade if true
if config['TRADE_OPTIONS']['TEST']:
order[coin['symbol']] = {
'symbol':symbol_only+pairing,
'price':price,
'volume':volume,
'time':datetime.timestamp(datetime.now()),
'tp': tp,
'sl': sl
}
print('PLACING TEST ORDER')
# place a live order if False
else:
order[coin['symbol']] = create_order(symbol_only+pairing, volume, 'BUY')
order[coin['symbol']]['tp'] = tp
order[coin['symbol']]['sl'] = sl
except Exception as e:
print(e)
else:
print(f"Order created with {volume} on {coin['symbol']}")
store_order('order.json', order)
else:
print(f"New coin detected, but {coin['symbol']} is currently in portfolio, or {pairing} does not match")
else:
pass
except Exception as e:
print(e)
if __name__ == '__main__':
print('working...')
main()
|
fuwuduan-udp.py | import socket
import threading
import time
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('127.0.0.1', 9999))
print('Bind UDP on 9999...')
def tcplink(data,addr):
print('Received from %s:%s.' % addr)
s.sendto(b'Hello, %s!' % data, addr)
while True:
data, addr = s.recvfrom(1024)
t = threading.Thread(target=tcplink, args=(data, addr))
t.start()
|
server.py | import socket
import threading
import struct
IP = '127.0.0.1'
PORT = 1613
# 定义数据长度
# 一个数据包的数据为 1024 bytes
# 再加上 24 bytes 的序列号,确认号,文件结束标志
_BUFFER_SIZE = 1024 + 24
# 传送包的结构定义
# 使用 python 的 struct 定义
# 包括 序列号,确认号,文件结束标志,1024B的数据
# pkt_value = (int seq, int ack, int end_flag 1024B的byte类型 data)
pkt_struct = struct.Struct('III1024s')
# 信息反馈包
fb_struct = struct.Struct('II')
def receiver(s, cli_address, file_name):
print('服务器正在接受', file_name, '从客户端', cli_address)
# 文件暂存
# 模式wb 以二进制格式打开一个文件只用于写入。
# 如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。
# 如果该文件不存在,创建新文件。
f = open(file_name, 'wb')
# 记录包的数目
packet_count = 1
# 将 rwnd 设置为 110
rwnd = 110
# 数据缓存
temp_data = []
while True:
# 读传输的数据 data
data, cli_address = s.recvfrom(_BUFFER_SIZE)
# 提取包数据
try:
unpkt_data = pkt_struct.unpack(data)
except struct.error as e:
break
packet_count += 1
if rwnd > 0:
# 若序列号为0,也就是 seq 为零,跳过该包
if unpkt_data[0] == 0:
s.sendto(fb_struct.pack(*(unpkt_data[0], rwnd)), cli_address)
continue
# 如果序号不连续,丢弃该包
if unpkt_data[1] != packet_count - 1:
print('包序号错误!-> ', unpkt_data[0])
# 发回上一个包
s.sendto(fb_struct.pack(*(unpkt_data[1] - 1, rwnd)), sev_address)
continue
temp_data.append(unpkt_data)
rwnd -= 1
# 接收完毕,返回 ACK
s.sendto(fb_struct.pack(*(unpkt_data[0], rwnd)), cli_address)
# 缓冲不足
else:
s.sendto(fb_struct.pack(*(unpkt_data[0], rwnd)), cli_address)
print('服务器已经接受第',unpkt_data[0],'个包')
# 文件写入
while len(temp_data) > 0:
unpkt_data = temp_data[0]
seq = unpkt_data[0]
ack = unpkt_data[1]
end = unpkt_data[2]
data = unpkt_data[3]
# 读取数据后删除,然后缓冲空间 +1 s
del temp_data[0]
rwnd += 1
# 写入数据,直到 end = 1
if end != 1:
f.write(data)
else:
break
if unpkt_data[2] == 1:
break
print('传输完毕'+str(packet_count), '个包')
# 保存关闭文件
f.close
def server_child_thread(data, cli_address):
# 对传输过来的 data 进行处理
# data 的内容为 ‘命令, 文件名’
# 提取命令以及文件名
try:
command = data.decode('utf-8').split(',')[0]
file_name = data.decode('utf-8').split(',')[1]
except Exception as e:
return
# 建立 socket 链接
# socket.SOCK_DGRAM -> 基于UDP的数据报式socket通信
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# 设置 socket 缓冲区
# 待定
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024 * 64)
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024 * 64)
if command == 'lsend':
s.sendto('发起链接?'.encode('utf-8'), cli_address)
# 等待客户端确认
# data?
data, cli_address = s.recvfrom(_BUFFER_SIZE)
print('来自', cli_address,'的数据是:', data.decode('utf-8'))
receiver(s, cli_address, file_name)
print('\n开始断开连接(四次握手)')
data, sev_address = s.recvfrom(_BUFFER_SIZE)
print(data.decode('utf-8'))
data = '我服务器同意断开连接'
s.sendto(data.encode('utf-8'), cli_address)
print(data)
data = '我服务器请求断开连接'
s.sendto(data.encode('utf-8'), cli_address)
print(data)
data, sev_address = s.recvfrom(_BUFFER_SIZE)
print(data.decode('utf-8'))
print('结束链接')
s.close()
def main():
# 建立 socket 链接
# socket.SOCK_DGRAM -> 基于UDP的数据报式socket通信
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# 将套接字绑定到地址
s.bind((IP, PORT))
while True:
# 客户端会先发送操作指令以及客户端IP过来
data, cli_address = s.recvfrom(_BUFFER_SIZE)
# 每当请求来,便开启一个线程
# 线程执行的函数为 server_child_thread
# args 表示函数参数
child_thread = threading.Thread(target=server_child_thread, args=(data, cli_address))
# 启动
child_thread.start()
if __name__ == "__main__":
main() |
dropbox.py | #!/usr/bin/python
#
# Copyright (c) Dropbox, Inc.
#
# dropbox
# Dropbox frontend script
# This file is part of nautilus-dropbox 2015.02.12.
#
# nautilus-dropbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nautilus-dropbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nautilus-dropbox. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import with_statement
import errno
import locale
import optparse
import os
import platform
import shutil
import socket
import StringIO
import subprocess
import sys
import tarfile
import tempfile
import threading
import thread
import time
import traceback
import urllib2
try:
import gpgme
except ImportError:
gpgme = None
from contextlib import closing, contextmanager
from posixpath import curdir, sep, pardir, join, abspath, commonprefix
INFO = u"Dropbox is the easiest way to share and store your files online. Want to learn more? Head to"
LINK = u"https://www.dropbox.com/"
WARNING = u"In order to use Dropbox, you must download the proprietary daemon."
GPG_WARNING = u"Note: python-gpgme is not installed, we will not be able to verify binary signatures."
ERROR_CONNECTING = u"Trouble connecting to Dropbox servers. Maybe your internet connection is down, or you need to set your http_proxy environment variable."
ERROR_SIGNATURE = u"Downloaded binary does not match Dropbox signature, aborting install."
DOWNLOAD_LOCATION_FMT = "https://www.dropbox.com/download?plat=%s"
SIGNATURE_LOCATION_FMT = "https://www.dropbox.com/download?plat=%s&signature=1"
DOWNLOADING = u"Downloading Dropbox... %d%%"
UNPACKING = u"Unpacking Dropbox... %d%%"
PARENT_DIR = os.path.expanduser("~")
DROPBOXD_PATH = "%s/.dropbox-dist/dropboxd" % PARENT_DIR
DESKTOP_FILE = u"/usr/share/applications/dropbox.desktop"
enc = locale.getpreferredencoding()
# Available from https://linux.dropbox.com/fedora/rpm-public-key.asc
DROPBOX_PUBLIC_KEY = """
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: SKS 1.1.0
mQENBEt0ibEBCACv4hZRPqwtpU6z8+BB5YZU1a3yjEvg2W68+a6hEwxtCa2U++4dzQ+7EqaU
q5ybQnwtbDdpFpsOi9x31J+PCpufPUfIG694/0rlEpmzl2GWzY8NqfdBFGGm/SPSSwvKbeNc
FMRLu5neo7W9kwvfMbGjHmvUbzBUVpCVKD0OEEf1q/Ii0Qcekx9CMoLvWq7ZwNHEbNnij7ec
nvwNlE2MxNsOSJj+hwZGK+tM19kuYGSKw4b5mR8IyThlgiSLIfpSBh1n2KX+TDdk9GR+57TY
vlRu6nTPu98P05IlrrCP+KF0hYZYOaMvQs9Rmc09tc/eoQlN0kkaBWw9Rv/dvLVc0aUXABEB
AAG0MURyb3Bib3ggQXV0b21hdGljIFNpZ25pbmcgS2V5IDxsaW51eEBkcm9wYm94LmNvbT6J
ATYEEwECACAFAkt0ibECGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRD8kYszUESRLi/z
B/wMscEa15rS+0mIpsORknD7kawKwyda+LHdtZc0hD/73QGFINR2P23UTol/R4nyAFEuYNsF
0C4IAD6y4pL49eZ72IktPrr4H27Q9eXhNZfJhD7BvQMBx75L0F5gSQwuC7GdYNlwSlCD0AAh
Qbi70VBwzeIgITBkMQcJIhLvllYo/AKD7Gv9huy4RLaIoSeofp+2Q0zUHNPl/7zymOqu+5Ox
e1ltuJT/kd/8hU+N5WNxJTSaOK0sF1/wWFM6rWd6XQUP03VyNosAevX5tBo++iD1WY2/lFVU
JkvAvge2WFk3c6tAwZT/tKxspFy4M/tNbDKeyvr685XKJw9ei6GcOGHD
=5rWG
-----END PGP PUBLIC KEY BLOCK-----
"""
# Futures
def methodcaller(name, *args, **kwargs):
def caller(obj):
return getattr(obj, name)(*args, **kwargs)
return caller
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
if type(start) is unicode:
start_list = unicode_abspath(start).split(sep)
else:
start_list = abspath(start).split(sep)
if type(path) is unicode:
path_list = unicode_abspath(path).split(sep)
else:
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
# End Futures
def console_print(st=u"", f=sys.stdout, linebreak=True):
global enc
assert type(st) is unicode
f.write(st.encode(enc))
if linebreak: f.write(os.linesep)
def console_flush(f=sys.stdout):
f.flush()
def yes_no_question(question):
while True:
console_print(question, linebreak=False)
console_print(u" [y/n] ", linebreak=False)
console_flush()
text = raw_input()
if text.lower().startswith("y"):
return True
elif text.lower().startswith("n"):
return False
else:
console_print(u"Sorry, I didn't understand that. Please type yes or no.")
def plat():
if sys.platform.lower().startswith('linux'):
arch = platform.machine()
if (arch[0] == 'i' and
arch[1].isdigit() and
arch[2:4] == '86'):
plat = "x86"
elif arch == 'x86_64':
plat = arch
else:
FatalVisibleError("Platform not supported")
return "lnx.%s" % plat
else:
FatalVisibleError("Platform not supported")
def is_dropbox_running():
pidfile = os.path.expanduser("~/.dropbox/dropbox.pid")
try:
with open(pidfile, "r") as f:
pid = int(f.read())
with open("/proc/%d/cmdline" % pid, "r") as f:
cmdline = f.read().lower()
except:
cmdline = ""
return "dropbox" in cmdline
def unicode_abspath(path):
global enc
assert type(path) is unicode
# shouldn't pass unicode to this craphead, it appends with os.getcwd() which is always a str
return os.path.abspath(path.encode(sys.getfilesystemencoding())).decode(sys.getfilesystemencoding())
@contextmanager
def gpgme_context(keys):
gpg_conf_contents = ''
_gpghome = tempfile.mkdtemp(prefix='tmp.gpghome')
try:
os.environ['GNUPGHOME'] = _gpghome
fp = open(os.path.join(_gpghome, 'gpg.conf'), 'wb')
fp.write(gpg_conf_contents)
fp.close()
ctx = gpgme.Context()
loaded = []
for key_file in keys:
result = ctx.import_(key_file)
key = ctx.get_key(result.imports[0][0])
loaded.append(key)
ctx.signers = loaded
yield ctx
finally:
del os.environ['GNUPGHOME']
shutil.rmtree(_gpghome, ignore_errors=True)
class SignatureVerifyError(Exception):
pass
def verify_signature(key_file, sig_file, plain_file):
with gpgme_context([key_file]) as ctx:
sigs = ctx.verify(sig_file, plain_file, None)
return sigs[0].status == None
def download_file_chunk(url, buf):
opener = urllib2.build_opener()
opener.addheaders = [('User-Agent', "DropboxLinuxDownloader/2015.02.12")]
sock = opener.open(url)
size = int(sock.info()['content-length'])
bufsize = max(size / 200, 4096)
progress = 0
with closing(sock) as f:
yield (0, True)
while True:
try:
chunk = f.read(bufsize)
progress += len(chunk)
buf.write(chunk)
yield (float(progress)/size, True)
if progress == size:
break
except OSError, e:
if hasattr(e, 'errno') and e.errno == errno.EAGAIN:
# nothing left to read
yield (float(progress)/size, False)
else:
raise
class DownloadState(object):
def __init__(self):
self.local_file = StringIO.StringIO()
def copy_data(self):
return download_file_chunk(DOWNLOAD_LOCATION_FMT % plat(), self.local_file)
def unpack(self):
# download signature
signature = StringIO.StringIO()
for _ in download_file_chunk(SIGNATURE_LOCATION_FMT % plat(), signature):
pass
signature.seek(0)
self.local_file.seek(0)
if gpgme:
if not verify_signature(StringIO.StringIO(DROPBOX_PUBLIC_KEY), signature, self.local_file):
raise SignatureVerifyError()
self.local_file.seek(0)
archive = tarfile.open(fileobj=self.local_file, mode='r:gz')
total_members = len(archive.getmembers())
for i, member in enumerate(archive.getmembers()):
archive.extract(member, PARENT_DIR)
yield member.name, i, total_members
archive.close()
def cancel(self):
if not self.local_file.closed:
self.local_file.close()
def load_serialized_images():
global box_logo_pixbuf, window_icon
import gtk
box_logo_pixbuf = gtk.gdk.pixbuf_new_from_data('\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x08\x00\\\x9ef\x00\\\x9ej\x00\\\x9e\x04\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eZ\x00[\x9er\x00\\\x9e\x14\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e8\x00Y\x9c\xc2\x00X\x9b\xff\x00X\x9b\xff\x00[\x9d\xaa\x00\\\x9e\r\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x02\x00\\\x9e\x8e\x00Y\x9b\xff\x00Y\x9b\xff\x00Y\x9b\xd5\x00\\\x9eM\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x12\x00[\x9d\x8b\x00U\x99\xfa\x0fe\xa5\xff]\xa2\xd3\xffM\x95\xc9\xff\x00X\x9b\xff\x00Y\x9c\xc9\x00\\\x9e\x1e\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x0f\x00[\x9d\xb1\x00V\x99\xff4\x85\xc1\xffZ\xa3\xda\xff\x17m\xab\xff\x00V\x99\xff\x00Z\x9d\xa2\x00\\\x9e \x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\\\x00W\x9a\xde\x00Y\x9c\xff:\x87\xbf\xff\x83\xbf\xeb\xff\x98\xce\xf6\xff\x9b\xd0\xf6\xffa\xa3\xd3\xff\x05]\x9e\xff\x00X\x9b\xda\x00\\\x9e/\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x1c\x00Z\x9c\xc5\x01Y\x9b\xff?\x90\xca\xff|\xc1\xf4\xff\x82\xc4\xf6\xff}\xbf\xf0\xffD\x90\xc8\xff\x05]\x9e\xff\x00V\x9a\xed\x00\\\x9es\x00\\\x9e\x07\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e4\x00Z\x9c\xba\x00V\x99\xff\x1dq\xae\xffd\xaa\xdd\xff\x8e\xc9\xf5\xff\x8e\xc7\xf3\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\xa1\xd2\xf6\xffw\xb3\xde\xff\x0fd\xa3\xff\x00V\x9a\xed\x00\\\x9eL\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e4\x00X\x9b\xdd\x05^\x9f\xffM\x9d\xd6\xffy\xc1\xf6\xffw\xbe\xf2\xffz\xbe\xf1\xff\x80\xc1\xf2\xff\x89\xc8\xf6\xffq\xb3\xe3\xff*z\xb5\xff\x00W\x9a\xff\x00X\x9b\xcd\x00\\\x9eG\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x0e\x00[\x9d\x86\x00V\x99\xfa\x0cc\xa4\xffK\x96\xce\xff\x81\xc2\xf2\xff\x89\xc7\xf5\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\xa4\xd3\xf6\xff\x85\xbb\xe4\xff\x18k\xa8\xff\x00U\x99\xfc\x00\\\x9en\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eS\x00W\x9a\xf1\x0bb\xa3\xffT\xa3\xdd\xffv\xc0\xf7\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x8a\xc7\xf4\xff\x8f\xc9\xf4\xff`\xa3\xd5\xff\x15i\xa8\xff\x00U\x98\xff\x00[\x9d\x9c\x00\\\x9e\x1a\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eU\x00X\x9b\xd9\x00Z\x9c\xff1\x83\xbf\xffp\xb6\xea\xff\x84\xc5\xf6\xff\x80\xc2\xf2\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\xa6\xd3\xf5\xff\x96\xc7\xeb\xff*y\xb2\xff\x00T\x98\xff\x00\\\x9e\x90\x00\\\x9e\x02\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eu\x00V\x99\xfe\x14k\xac\xff\\\xac\xe6\xffr\xbd\xf6\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8b\xc5\xf1\xff\x95\xcc\xf6\xff\x8c\xc5\xee\xffH\x90\xc5\xff\x04]\x9e\xff\x00V\x9a\xe7\x00\\\x9ej\x00\\\x9e\x03\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e.\x00Z\x9c\xb3\x00V\x99\xff\x17m\xad\xffV\xa3\xdc\xff{\xc2\xf6\xff|\xbf\xf3\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa6\xd3\xf4\xff\xa4\xd1\xf1\xff@\x88\xbd\xff\x00U\x99\xff\x00[\x9d\xb0\x00\\\x9e\x0c\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x02\x00[\x9d\x97\x00V\x98\xff\x1fv\xb6\xffa\xb1\xed\xffl\xbb\xf4\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x93\xcb\xf2\xff\x9e\xd1\xf6\xff|\xb7\xe1\xff(w\xb2\xff\x00U\x99\xff\x00Y\x9c\xc6\x00\\\x9e?\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x0b\x00[\x9e\x86\x00V\x99\xf6\ta\xa2\xff=\x8f\xcc\xffm\xb9\xf1\xffu\xbf\xf5\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa1\xcf\xf1\xff\xa8\xd3\xf3\xff\xae\xd8\xf4\xffX\x99\xc9\xff\x00X\x9b\xff\x00Y\x9c\xc2\x00\\\x9e\x1b\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\r\x00[\x9d\xab\x00W\x99\xff,\x82\xc1\xffe\xb5\xf2\xffh\xb7\xf3\xffh\xb7\xf1\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x9f\xd1\xf5\xff\xa0\xcf\xf3\xffe\xa3\xd1\xff\x12f\xa5\xff\x00U\x98\xff\x00[\x9d\x9b\x00\\\x9e\x16\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eN\x00Y\x9b\xd3\x00Y\x9c\xff\'}\xbc\xff]\xad\xe8\xffp\xbe\xf6\xffn\xba\xf2\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa1\xcf\xf1\xff\xa5\xd1\xf1\xff\xa9\xd4\xf2\xff\xb5\xdb\xf6\xffq\xaa\xd4\xff\x04[\x9e\xff\x00X\x9b\xdc\x00\\\x9e>\x00\\\x9e0\x00Z\x9c\xc9\x00Z\x9b\xff8\x8d\xcd\xffe\xb7\xf5\xffc\xb4\xf2\xffe\xb5\xf1\xffh\xb7\xf1\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9d\xce\xf2\xff\xa9\xd5\xf6\xff\x99\xc9\xec\xffI\x8e\xc1\xff\x03[\x9d\xff\x00V\x9a\xe1\x00\\\x9ea\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e(\x00[\x9d\xab\x00V\x98\xff\x13j\xab\xffK\x9e\xdc\xffi\xb9\xf6\xffj\xb8\xf3\xffj\xb8\xf1\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa1\xcf\xf1\xff\xa5\xd1\xf1\xff\xa7\xd3\xf1\xff\xaa\xd4\xf1\xff\xb9\xdc\xf6\xff\x80\xb5\xda\xff\rb\xa2\xff\x00W\x9a\xff\x00Y\x9b\xfe\x04]\x9f\xff>\x94\xd4\xffd\xb6\xf6\xff`\xb3\xf1\xffb\xb3\xf1\xffe\xb5\xf1\xffh\xb7\xf1\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa6\xd2\xf3\xff\xb0\xd9\xf6\xff\x87\xbb\xe0\xff\'u\xaf\xff\x00T\x98\xff\x00Y\x9c\xbd\x00\\\x9e7\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x08\x00\\\x9e~\x00W\x99\xf2\x05^\x9f\xff3\x89\xc9\xff^\xb1\xf0\xffe\xb7\xf5\xffd\xb4\xf1\xffg\xb6\xf1\xffj\xb8\xf1\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa1\xcf\xf1\xff\xa5\xd1\xf1\xff\xa7\xd3\xf1\xff\xaa\xd4\xf1\xff\xad\xd4\xf1\xff\xbb\xdd\xf6\xff\x96\xc3\xe4\xff\x18i\xa7\xff\x01]\xa2\xffH\x9e\xde\xffa\xb6\xf6\xff^\xb1\xf1\xff`\xb3\xf1\xffb\xb3\xf1\xffe\xb5\xf1\xffh\xb7\xf1\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xb0\xd8\xf5\xff\xad\xd5\xf1\xfff\xa2\xce\xff\rb\xa2\xff\x00U\x99\xfb\x00\\\x9e\x92\x00\\\x9e\x11\x00\\\x9e\x9b\x02\\\x9e\xff\x1ct\xb5\xffM\xa3\xe3\xffb\xb7\xf6\xff`\xb3\xf2\xffa\xb3\xf1\xffd\xb4\xf1\xffg\xb6\xf1\xffj\xb8\xf1\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa1\xcf\xf1\xff\xa5\xd1\xf1\xff\xa7\xd3\xf1\xff\xaa\xd4\xf1\xff\xad\xd4\xf1\xff\xae\xd5\xf1\xff\xb7\xdb\xf4\xff\xaa\xcf\xe8\xffm\xb3\xe6\xffX\xb2\xf4\xffX\xae\xf1\xff^\xb1\xf1\xff`\xb3\xf1\xffb\xb3\xf1\xffe\xb5\xf1\xffh\xb7\xf1\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xa9\xd3\xf1\xff\xae\xd5\xf2\xff\xba\xdd\xf7\xff\x9b\xc7\xe6\xff<\x83\xb8\xff\x06^\x9f\xff\x00[\x9d\xb6\x00Z\x9c\xdd\x0cd\xa6\xffR\xa9\xe9\xffb\xb7\xf8\xff\\\xb1\xf1\xff_\xb2\xf1\xffa\xb3\xf1\xffd\xb4\xf1\xffg\xb6\xf1\xffj\xb8\xf1\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa1\xcf\xf1\xff\xa5\xd1\xf1\xff\xa7\xd3\xf1\xff\xaa\xd4\xf1\xff\xaa\xd2\xf0\xff\xb2\xd7\xf1\xff\xce\xe5\xf6\xff\xe9\xf5\xfd\xff\xd0\xeb\xfe\xff\xa1\xd2\xf7\xffg\xb6\xf2\xffW\xad\xf0\xff_\xb2\xf1\xffb\xb3\xf1\xffe\xb5\xf1\xffh\xb7\xf1\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xa9\xd3\xf1\xff\xac\xd4\xf1\xff\xae\xd4\xf1\xff\xbf\xe0\xf7\xff\xac\xd2\xee\xff\x1eo\xaa\xff\x00X\x9b\xeb\x00\\\x9eR\x00Y\x9b\xf6\x0ce\xa6\xffH\x9e\xde\xffb\xb6\xf6\xff_\xb2\xf1\xffa\xb3\xf1\xffd\xb4\xf1\xffg\xb6\xf1\xffj\xb8\xf1\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa1\xcf\xf1\xff\xa5\xd1\xf1\xff\xa5\xd2\xf1\xff\xa8\xd2\xf0\xff\xbe\xdd\xf4\xff\xdd\xee\xfa\xff\xe9\xf3\xfc\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xce\xe8\xfb\xff\xc3\xe2\xfa\xff\x89\xc6\xf5\xff]\xb1\xf1\xff]\xb1\xf0\xffe\xb5\xf1\xffh\xb7\xf1\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xa9\xd3\xf1\xff\xac\xd4\xf1\xff\xba\xdd\xf6\xff\x93\xc1\xe3\xff\x1fo\xaa\xff\x00W\x9b\xff\x00\\\x9eo\x00\\\x9e\x00\x00\\\x9e;\x00Y\x9b\xdf\x03\\\x9e\xff;\x90\xd0\xffd\xb6\xf5\xffb\xb4\xf2\xffd\xb4\xf1\xffg\xb6\xf1\xffj\xb8\xf1\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa0\xcf\xf1\xff\xa1\xcf\xf0\xff\xae\xd6\xf2\xff\xcf\xe6\xf8\xff\xe4\xf2\xfb\xff\xe5\xf2\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xcb\xe7\xfb\xff\xd1\xe9\xfb\xff\xb3\xda\xf9\xffx\xbe\xf3\xff^\xb1\xf0\xfff\xb6\xf1\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xaa\xd4\xf2\xff\xb7\xdb\xf6\xffx\xaf\xd6\xff\x0b`\xa1\xff\x00V\x9a\xed\x00\\\x9eR\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x1c\x00Z\x9c\xbe\x00X\x99\xff-\x83\xc2\xffe\xb6\xf3\xfff\xb6\xf3\xffg\xb6\xf1\xffj\xb8\xf1\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9b\xcc\xf0\xff\xa1\xcf\xf1\xff\xbf\xde\xf6\xff\xdc\xee\xfa\xff\xe3\xf1\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xd2\xe8\xfb\xff\xd0\xe9\xfb\xff\xa2\xd2\xf7\xffm\xb9\xf1\xffe\xb5\xf0\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa9\xd4\xf2\xff\xb1\xd9\xf5\xff[\x9b\xc9\xff\x00X\x9b\xff\x00Y\x9c\xd3\x00\\\x9e-\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x06\x00[\x9d\x96\x00V\x99\xff"x\xb8\xffa\xb1\xee\xffk\xba\xf4\xffj\xb8\xf1\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x96\xca\xf1\xff\x97\xca\xf0\xff\xac\xd5\xf3\xff\xd0\xe7\xf9\xff\xe0\xef\xfb\xff\xdf\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xd0\xea\xfb\xff\xd8\xec\xfb\xff\xc8\xe5\xfa\xff\x8f\xc9\xf4\xffi\xb7\xf0\xffo\xb9\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa8\xd4\xf4\xff\xa6\xd2\xf1\xffE\x8c\xbf\xff\x00U\x99\xff\x00Z\x9d\xaf\x00\\\x9e\x12\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9en\x00V\x98\xfe\x14k\xac\xffY\xaa\xe5\xffp\xbd\xf6\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x8e\xc7\xf0\xff\x9a\xcc\xf1\xff\xbd\xde\xf7\xff\xd8\xec\xfb\xff\xdc\xed\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd6\xec\xfb\xff\xda\xed\xfb\xff\xb6\xdc\xf8\xff\x80\xc1\xf2\xffo\xb9\xf0\xffy\xbd\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\xa7\xd4\xf5\xff\x95\xc7\xea\xff+y\xb2\xff\x00T\x98\xff\x00[\x9e\x88\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eQ\x00W\x9a\xee\x08a\xa2\xffL\x9d\xd8\xfft\xbf\xf6\xffq\xbb\xf2\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x88\xc4\xf1\xff\x8d\xc6\xf0\xff\xaa\xd5\xf4\xff\xcd\xe7\xfa\xff\xd8\xed\xfb\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xdd\xef\xfc\xff\xd7\xeb\xfb\xff\xa6\xd4\xf5\xff{\xbe\xf1\xffy\xbd\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\xa6\xd4\xf6\xff~\xb6\xdf\xff\x15h\xa7\xff\x00U\x99\xf9\x00\\\x9ek\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e-\x00Y\x9c\xd2\x01Y\x9c\xff<\x8e\xca\xffu\xbe\xf4\xffv\xbe\xf2\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x82\xc1\xf1\xff\x83\xc1\xf0\xff\x97\xcb\xf3\xff\xbe\xe0\xf8\xff\xd4\xeb\xfb\xff\xd5\xeb\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xdc\xee\xfb\xff\xe3\xf0\xfc\xff\xcd\xe7\xf9\xff\x98\xcc\xf3\xff|\xbf\xf0\xff\x82\xc2\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x96\xcb\xf2\xff\xa1\xd2\xf5\xffc\xa3\xd2\xff\x06]\x9f\xff\x00W\x9b\xe5\x00\\\x9eC\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x12\x00[\x9d\xaf\x00V\x98\xff.\x81\xbe\xffv\xbd\xf2\xff|\xc0\xf4\xff|\xbf\xf1\xff{\xbf\xf0\xff\x83\xc2\xf1\xff\xaa\xd5\xf6\xff\xcc\xe6\xfb\xff\xd1\xea\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xed\xfb\xff\xdc\xed\xfb\xff\xe2\xf1\xfb\xff\xe3\xf2\xfb\xff\xbe\xdf\xf7\xff\x8b\xc6\xf1\xff\x84\xc2\xf0\xff\x8c\xc5\xf1\xff\x94\xcb\xf3\xff\x9b\xcf\xf4\xffK\x92\xc6\xff\x00W\x9a\xff\x00Y\x9c\xc7\x00\\\x9e#\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x8c\x00V\x98\xfd u\xb3\xffn\xb4\xe8\xff~\xc0\xf3\xff\x94\xca\xf4\xff\xbe\xe0\xf9\xff\xcf\xe8\xfb\xff\xcd\xe6\xfb\xff\xce\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xed\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe1\xf0\xfb\xff\xe8\xf3\xfb\xff\xdb\xed\xfa\xff\xac\xd5\xf4\xff\x8f\xc7\xf2\xff\x89\xc3\xed\xff6\x83\xbb\xff\x00U\x99\xff\x00[\x9d\xa9\x00\\\x9e\n\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x07\x00\\\x9e\xf1\x00Q\x95\xff\x18p\xb0\xff\x98\xcd\xf5\xff\xd4\xeb\xfd\xff\xce\xe8\xfb\xff\xcb\xe6\xfb\xff\xcc\xe6\xfb\xff\xce\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xed\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe6\xf3\xfb\xff\xf2\xf8\xfd\xff\xc9\xe5\xf9\xff1\x81\xba\xff\x00O\x94\xff\x00\\\x9e\xff\x00\\\x9e\'\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e}\x00V\x99\xfc\x1ap\xae\xffc\xad\xe4\xffM\xa8\xef\xff\x83\xc2\xf3\xff\xc6\xe4\xfb\xff\xd1\xe9\xfc\xff\xcc\xe6\xfb\xff\xce\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xed\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe7\xf4\xfc\xff\xe7\xf3\xfb\xff\xb6\xd8\xf4\xff{\xbc\xee\xff\x7f\xbd\xe9\xff/}\xb7\xff\x00U\x99\xff\x00[\x9d\x9d\x00\\\x9e\x06\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x0b\x00[\x9d\xa2\x00U\x98\xff\'{\xb9\xffs\xbb\xef\xff{\xc0\xf4\xff@\xa1\xed\xff3\x99\xeb\xffW\xac\xee\xff\xa7\xd4\xf7\xff\xd3\xe9\xfc\xff\xd1\xeb\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xed\xfb\xff\xdc\xed\xfb\xff\xe0\xf0\xfb\xff\xea\xf5\xfc\xff\xcc\xe5\xf8\xff~\xbe\xee\xffX\xaa\xe9\xffc\xb0\xe9\xff\x92\xca\xf3\xff\x9a\xcd\xf3\xffC\x8d\xc2\xff\x00U\x99\xff\x00Z\x9c\xbd\x00\\\x9e\x1c\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e%\x00Z\x9c\xc9\x00X\x9b\xff6\x8a\xc6\xffs\xbd\xf3\xffw\xbe\xf3\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xff>\x9f\xeb\xffE\xa2\xeb\xff}\xbf\xf1\xff\xc3\xe3\xfa\xff\xd8\xed\xfc\xff\xd4\xeb\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xdb\xed\xfb\xff\xe4\xf1\xfc\xff\xda\xed\xfb\xff\x97\xca\xf2\xffV\xa9\xea\xffS\xa7\xe9\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x97\xcc\xf2\xff\xa1\xd1\xf5\xff\\\x9e\xcf\xff\x03[\x9d\xff\x00X\x9b\xdf\x00\\\x9e<\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eH\x00X\x9a\xe8\x06_\xa0\xffH\x99\xd5\xffs\xbf\xf6\xffq\xbb\xf2\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffH\xa4\xec\xffG\xa4\xeb\xff\\\xad\xed\xff\x9e\xcf\xf5\xff\xd4\xea\xfb\xff\xda\xee\xfc\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xdd\xef\xfc\xff\xe1\xf0\xfc\xff\xac\xd5\xf5\xff\\\xad\xec\xffB\xa0\xe9\xffQ\xa7\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x9a\xcc\xf1\xff\xa6\xd4\xf6\xffy\xb2\xdd\xff\x11f\xa5\xff\x00V\x99\xf6\x00\\\x9ed\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9ef\x00V\x99\xfc\x11h\xa9\xffV\xa7\xe2\xffp\xbd\xf6\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffR\xa9\xec\xffS\xa9\xeb\xffx\xbb\xef\xff\xba\xdd\xf7\xff\xdd\xef\xfc\xff\xdc\xed\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd5\xeb\xfb\xff\xdf\xf0\xfc\xff\xc1\xe1\xf9\xffk\xb5\xef\xff8\x9b\xe9\xff@\xa0\xe9\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\xa8\xd5\xf5\xff\x91\xc4\xe8\xff\'u\xb0\xff\x00T\x98\xff\x00\\\x9e\x83\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x04\x00[\x9d\x90\x00W\x98\xff\x1fv\xb6\xff_\xaf\xec\xffk\xba\xf5\xffi\xb8\xf1\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xff[\xad\xec\xff\\\xad\xeb\xffc\xb1\xeb\xff\x99\xcc\xf2\xff\xd4\xe9\xfa\xff\xe2\xf0\xfc\xff\xdf\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd9\xed\xfc\xff\xd4\xea\xfb\xff\x87\xc4\xf3\xff6\x9b\xea\xff/\x98\xe9\xff>\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa9\xd4\xf4\xff\xa5\xd1\xf0\xffB\x89\xbd\xff\x00U\x99\xff\x00[\x9d\xab\x00\\\x9e\x10\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x19\x00Z\x9d\xb9\x00W\x99\xff*\x81\xc0\xffc\xb5\xf2\xfff\xb6\xf3\xfff\xb6\xf1\xffi\xb8\xf1\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xff[\xad\xec\xffa\xb0\xec\xffe\xb2\xec\xfff\xb2\xeb\xff}\xbd\xed\xff\xb8\xda\xf5\xff\xe1\xf0\xfb\xff\xe4\xf2\xfc\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe3\xf2\xfb\xff\xc8\xe6\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xd2\xe8\xfb\xff\xd8\xee\xfc\xff\xa5\xd2\xf7\xffG\xa2\xed\xff!\x90\xe9\xff0\x98\xea\xff9\x9d\xea\xff?\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa9\xd4\xf2\xff\xb0\xd9\xf5\xffX\x99\xc8\xff\x00W\x9b\xff\x00Y\x9c\xd1\x00\\\x9e,\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e9\x00Y\x9b\xdc\x03\\\x9d\xff9\x8f\xce\xffd\xb6\xf5\xffb\xb4\xf2\xffc\xb4\xf1\xfff\xb6\xf1\xffi\xb8\xf1\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xff[\xad\xec\xffa\xb0\xec\xfff\xb2\xec\xffm\xb5\xec\xffp\xb7\xeb\xfft\xb7\xeb\xff\x97\xc9\xf0\xff\xcf\xe7\xf8\xff\xe7\xf4\xfc\xff\xe4\xf2\xfb\xff\xe3\xf0\xfb\xff\xe9\xf4\xfb\xff\xd6\xec\xfc\xff\xc5\xe3\xfb\xff\xca\xe6\xfb\xff\xd7\xec\xfc\xff\xbb\xdd\xf9\xff]\xaf\xf0\xff\x1b\x8d\xe9\xff\x1f\x8e\xe9\xff,\x96\xea\xff3\x99\xea\xff9\x9d\xea\xff?\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xaa\xd4\xf2\xff\xb7\xdb\xf6\xffv\xae\xd6\xff\n`\xa0\xff\x00W\x9a\xed\x00\\\x9eQ\x00\\\x9e\x00\x00\\\x9eP\x00Y\x9b\xf5\x0cd\xa6\xffF\x9c\xdc\xffb\xb6\xf6\xff_\xb2\xf1\xffa\xb3\xf1\xffc\xb4\xf1\xfff\xb6\xf1\xffi\xb8\xf1\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xff[\xad\xec\xffa\xb0\xec\xfff\xb2\xec\xffm\xb5\xec\xffs\xb8\xec\xffy\xba\xec\xffy\xba\xeb\xff\x83\xbf\xec\xff\xb0\xd6\xf2\xff\xe1\xf0\xfa\xff\xf5\xfa\xfe\xff\xfe\xff\xff\xff\xff\xff\xff\xff\xf1\xf8\xfe\xff\xd2\xe9\xfb\xffz\xbd\xf4\xff \x90\xeb\xff\x10\x87\xe9\xff \x8e\xea\xff&\x92\xea\xff,\x96\xea\xff3\x99\xea\xff9\x9d\xea\xff?\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xa9\xd3\xf1\xff\xac\xd4\xf1\xff\xba\xdd\xf6\xff\x93\xc1\xe3\xff\x1eo\xaa\xff\x00X\x9b\xff\x00\\\x9ep\x00Z\x9c\xd6\x0bc\xa4\xffQ\xa7\xe7\xffb\xb8\xf9\xff\\\xb1\xf1\xff_\xb2\xf1\xffa\xb3\xf1\xffc\xb4\xf1\xfff\xb6\xf1\xffi\xb8\xf1\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xff[\xad\xec\xffa\xb0\xec\xfff\xb2\xec\xffm\xb5\xec\xffs\xb8\xec\xffy\xba\xec\xff~\xbd\xec\xff\x82\xbe\xec\xff\x84\xbf\xeb\xff\xa1\xce\xef\xff\xdf\xee\xf9\xff\xff\xff\xff\xff\xff\xff\xff\xff\xce\xe7\xfb\xffJ\xa5\xee\xff\x08\x83\xe9\xff\x12\x89\xea\xff\x1b\x8d\xea\xff!\x8f\xea\xff&\x92\xea\xff,\x96\xea\xff3\x99\xea\xff9\x9d\xea\xff?\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xa9\xd3\xf1\xff\xac\xd4\xf1\xff\xae\xd5\xf1\xff\xbf\xe0\xf7\xff\xab\xd2\xed\xff\x1en\xaa\xff\x00X\x9b\xeb\x00[\x9d\x90\x02\\\x9e\xff\x1bs\xb4\xffM\xa2\xe2\xffb\xb7\xf6\xff`\xb3\xf2\xffa\xb3\xf1\xffc\xb4\xf1\xfff\xb6\xf1\xffi\xb8\xf1\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xff[\xad\xec\xffa\xb0\xec\xfff\xb2\xec\xffm\xb5\xec\xffs\xb8\xec\xffy\xba\xec\xff\x7f\xbd\xec\xff\x89\xc1\xed\xff\x92\xc6\xed\xff\x93\xc7\xed\xff\x95\xc7\xec\xff\xc0\xde\xf3\xff\x7f\xbf\xf4\xff\x0f\x87\xe9\xff\r\x87\xe9\xff\x1c\x8e\xea\xff\x1d\x8e\xea\xff\x1d\x8e\xea\xff \x8f\xea\xff&\x92\xea\xff,\x96\xea\xff3\x99\xea\xff9\x9d\xea\xff?\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xa9\xd3\xf1\xff\xad\xd5\xf2\xff\xba\xdd\xf6\xff\x9b\xc7\xe7\xff=\x84\xb9\xff\x05]\x9f\xff\x00[\x9d\xb1\x00\\\x9e\x03\x00\\\x9ey\x00X\x9a\xf3\x05^\x9f\xff6\x8c\xcc\xff`\xb3\xf2\xffe\xb7\xf5\xffc\xb4\xf1\xfff\xb6\xf1\xffi\xb8\xf1\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xff[\xad\xec\xffa\xb0\xec\xfff\xb2\xec\xffm\xb5\xec\xffr\xb8\xec\xff|\xbb\xec\xff\x87\xc1\xed\xff\x8e\xc4\xed\xff\x92\xc6\xed\xff\x94\xc7\xed\xff\x9d\xcb\xed\xff\xb7\xd9\xf3\xffP\xaa\xf0\xff\x18\x8c\xea\xff\x13\x89\xea\xff\x1b\x8e\xea\xff \x90\xea\xff$\x92\xea\xff$\x91\xea\xff&\x92\xea\xff,\x96\xea\xff3\x99\xea\xff9\x9d\xea\xff?\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xaf\xd7\xf4\xff\xb1\xd8\xf4\xffn\xa8\xd2\xff\x0ec\xa2\xff\x00U\x99\xfd\x00\\\x9e\x91\x00\\\x9e\r\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e)\x00Z\x9c\xb2\x00V\x99\xff\x18o\xb0\xffR\xa4\xe2\xffj\xb9\xf6\xffg\xb7\xf2\xffi\xb8\xf1\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xff[\xad\xec\xffa\xb0\xec\xffe\xb2\xec\xffm\xb5\xec\xffz\xbb\xed\xff\x84\xc0\xed\xff\x89\xc2\xed\xff\x8d\xc4\xed\xff\x8f\xc4\xec\xff\x9c\xcc\xee\xff\xc5\xe1\xf5\xff\xca\xe4\xf7\xffi\xb7\xf2\xffS\xac\xf0\xff(\x94\xeb\xff\x16\x8b\xea\xff \x90\xea\xff$\x92\xea\xff*\x95\xea\xff,\x96\xea\xff-\x97\xea\xff2\x99\xea\xff9\x9d\xea\xff?\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa5\xd1\xf2\xff\xb1\xd9\xf6\xff\x94\xc4\xe7\xff2}\xb5\xff\x00U\x99\xff\x00Y\x9c\xc6\x00\\\x9e;\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e[\x00X\x9a\xe1\x04]\x9e\xff3\x87\xc5\xffd\xb4\xef\xffo\xbd\xf5\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xffZ\xad\xec\xff`\xaf\xec\xffj\xb5\xec\xffx\xba\xed\xff\x80\xbe\xed\xff\x83\xc0\xed\xff\x89\xc2\xed\xff\x8a\xc2\xec\xff\x9b\xcb\xef\xff\xc4\xe0\xf6\xff\xd0\xe7\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xff[\xb0\xf1\xff^\xb1\xf1\xff2\x9a\xec\xff\x1b\x8d\xea\xff#\x92\xea\xff)\x95\xea\xff/\x98\xea\xff4\x9a\xea\xff6\x9b\xea\xff9\x9c\xea\xff?\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\xa6\xd3\xf5\xff\xa4\xd1\xf2\xff^\x9e\xcc\xff\x0b`\xa1\xff\x00V\x99\xef\x00\\\x9er\x00\\\x9e\x04\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x17\x00[\x9d\xa0\x00V\x98\xff\x12i\xaa\xffM\x9e\xd9\xffr\xbe\xf5\xffs\xbc\xf3\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffT\xa9\xec\xff\\\xad\xec\xffi\xb4\xed\xffs\xba\xed\xffy\xbb\xed\xff\x7f\xbe\xed\xff\x83\xc0\xed\xff\x85\xc0\xec\xff\x9e\xcd\xf0\xff\xc3\xe1\xf7\xff\xcb\xe4\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xffa\xb3\xf1\xfff\xb5\xf1\xffA\xa2\xed\xff \x90\xea\xff(\x94\xea\xff.\x98\xea\xff4\x9a\xea\xff;\x9d\xea\xff?\x9f\xea\xffA\xa0\xea\xffE\xa2\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x9c\xce\xf3\xff\xa5\xd4\xf6\xff~\xb6\xdf\xff%t\xaf\xff\x00U\x99\xff\x00Z\x9c\xb6\x00\\\x9e&\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eI\x00X\x9b\xd3\x00X\x9b\xff)}\xbb\xffg\xb2\xea\xff|\xc2\xf6\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffH\xa4\xec\xffO\xa7\xec\xff[\xae\xec\xffh\xb3\xed\xffn\xb7\xed\xffs\xba\xed\xffy\xbb\xed\xff~\xbd\xed\xff\x80\xbf\xec\xff\xa1\xcf\xf1\xff\xc3\xe1\xf7\xff\xc6\xe3\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffe\xb5\xf1\xffm\xb9\xf1\xffQ\xaa\xee\xff)\x94\xea\xff,\x97\xea\xff4\x9a\xea\xff:\x9d\xea\xffA\xa0\xea\xffG\xa3\xea\xffJ\xa5\xea\xffM\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x9d\xd0\xf6\xff\x93\xc7\xed\xffF\x8d\xc2\xff\x02Z\x9d\xff\x00W\x9a\xe4\x00\\\x9e`\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\n\x00\\\x9e\x81\x00V\x99\xf6\x0cc\xa4\xffM\x9a\xd3\xffz\xbf\xf3\xffD\xa3\xee\xff;\x9e\xec\xffB\xa2\xec\xffM\xa6\xec\xff[\xae\xed\xffd\xb2\xed\xffh\xb4\xed\xffm\xb6\xed\xffs\xba\xed\xffw\xba\xed\xff~\xbd\xed\xff\xa5\xd2\xf3\xff\xc1\xe0\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffs\xbb\xf1\xffa\xb2\xef\xff3\x9a\xeb\xff0\x98\xea\xff:\x9d\xea\xffA\xa0\xea\xffG\xa3\xea\xffM\xa6\xea\xffR\xa8\xea\xffV\xa9\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xeb\xff\x94\xcc\xf5\xffn\xad\xda\xff\x18k\xa9\xff\x00T\x98\xff\x00[\x9d\x9b\x00\\\x9e\x18\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e/\x00Y\x9c\xbd\x00W\x99\xff%z\xb7\xffB\xa0\xea\xff;\x9e\xec\xffK\xa6\xed\xffY\xad\xed\xff_\xaf\xed\xffc\xb2\xed\xffh\xb4\xed\xffm\xb6\xed\xffp\xb8\xec\xff}\xbd\xee\xff\xa8\xd4\xf4\xff\xbe\xdf\xf8\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xffx\xbe\xf1\xffq\xb9\xf0\xff@\xa0\xeb\xff5\x9b\xea\xffA\xa0\xea\xffG\xa3\xea\xffM\xa6\xea\xffS\xa9\xea\xffZ\xab\xea\xff]\xae\xea\xff]\xac\xea\xffk\xb3\xeb\xff:\x87\xbf\xff\x00X\x9a\xff\x00X\x9b\xd1\x00\\\x9eD\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x03\x00[\x9d\xb6\x00[\x9e\xffi\xb2\xe8\xffq\xbc\xf4\xffT\xaa\xed\xffV\xaa\xed\xff^\xaf\xed\xffc\xb2\xed\xffh\xb4\xed\xffj\xb5\xec\xffz\xbd\xee\xff\xa9\xd5\xf5\xff\xb9\xdd\xf8\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff~\xbf\xf1\xff|\xbe\xf0\xffM\xa6\xeb\xff;\x9d\xea\xffG\xa3\xea\xffM\xa6\xea\xffS\xa9\xea\xffW\xaa\xea\xff]\xae\xea\xff\x84\xc1\xee\xff\x9c\xc9\xea\xff\ta\xa1\xff\x00Y\x9b\xcd\x00\\\x9e\x0e\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00[\x9d\x8c\x04_\xa1\xff~\xbc\xea\xff\x9c\xd2\xfa\xff\x8a\xc7\xf5\xffg\xb4\xef\xffY\xad\xec\xffa\xb1\xed\xffd\xb2\xec\xff|\xbe\xef\xff\xa9\xd4\xf6\xff\xb3\xda\xf7\xff\xb6\xdb\xf7\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff{\xbe\xf1\xff\x83\xc2\xf1\xff\x88\xc4\xf1\xff^\xae\xed\xffB\xa0\xea\xffK\xa5\xea\xffN\xa6\xea\xffd\xb0\xeb\xff\x97\xca\xef\xff\xb7\xda\xf3\xff\xa9\xd0\xeb\xff\x0ef\xa4\xff\x00Z\x9c\xa7\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00[\x9d\x92\x04_\xa1\xff\x82\xbf\xee\xff\x98\xd0\xfb\xff\x99\xce\xf7\xff\x9a\xce\xf7\xff\x81\xc1\xf2\xffd\xb2\xed\xff~\xbf\xf0\xff\xa8\xd5\xf7\xff\xae\xd8\xf7\xff\xb1\xd9\xf7\xff\xb6\xdb\xf7\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff{\xbe\xf1\xff\x81\xc1\xf1\xff\x89\xc5\xf1\xff\x91\xc9\xf1\xffo\xb7\xed\xffN\xa6\xea\xffu\xb9\xed\xff\xa3\xd0\xf0\xff\xaf\xd6\xf1\xff\xb5\xda\xf4\xff\xaf\xd5\xef\xff\x0ef\xa4\xff\x00Z\x9c\xaa\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9em\x01[\x9d\xffT\x9c\xd1\xff\x98\xd0\xfb\xff\x9b\xd1\xfa\xff\x99\xce\xf7\xff\x9f\xd2\xf7\xff\x9a\xcf\xf6\xff\xa4\xd4\xf7\xff\xa9\xd6\xf7\xff\xac\xd7\xf7\xff\xb1\xd9\xf7\xff\xb6\xdb\xf7\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff{\xbe\xf1\xff\x81\xc1\xf1\xff\x88\xc4\xf1\xff\x8e\xc7\xf1\xff\x96\xcb\xf1\xff\x93\xc8\xf0\xff\xa3\xd0\xf1\xff\xa7\xd3\xf1\xff\xb3\xd9\xf5\xff\xae\xd4\xf0\xffZ\x98\xc7\xff\x03\\\x9d\xff\x00[\x9d{\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x16\x00[\x9d\xcd\x00W\x99\xff*z\xb6\xff\x85\xc0\xec\xff\xa3\xd6\xfd\xff\x9d\xd1\xf8\xff\xa0\xd3\xf7\xff\xa4\xd4\xf7\xff\xa8\xd5\xf7\xff\xac\xd7\xf7\xff\xb1\xd9\xf7\xff\xb6\xdb\xf7\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff{\xbe\xf1\xff\x81\xc1\xf1\xff\x88\xc4\xf1\xff\x8e\xc7\xf1\xff\x94\xca\xf1\xff\x9c\xcd\xf1\xff\xa3\xd1\xf2\xff\xb1\xd9\xf6\xff\x87\xba\xdf\xff!p\xac\xff\x00T\x99\xff\x00[\x9e\xb9\x00\\\x9e\x13\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x0b\x00\\\x9e\x81\x00V\x99\xf5\na\xa2\xff\\\x9f\xd2\xff\xa0\xd3\xf9\xff\xa6\xd7\xfb\xff\xa4\xd4\xf7\xff\xa8\xd5\xf7\xff\xac\xd7\xf7\xff\xb1\xd9\xf7\xff\xb6\xdb\xf7\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff{\xbe\xf1\xff\x81\xc1\xf1\xff\x88\xc4\xf1\xff\x8e\xc7\xf1\xff\x94\xca\xf1\xff\xa3\xd2\xf5\xff\x9e\xcd\xf0\xffQ\x95\xc6\xff\x03[\x9d\xff\x00W\x9a\xe9\x00\\\x9ei\x00\\\x9e\x01\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e.\x00Z\x9c\xb6\x00U\x99\xff(x\xb3\xff\x81\xbc\xe6\xff\xae\xdb\xfc\xff\xab\xd7\xf9\xff\xac\xd7\xf7\xff\xb1\xd9\xf7\xff\xb6\xdb\xf7\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff{\xbe\xf1\xff\x81\xc1\xf1\xff\x88\xc4\xf1\xff\x92\xca\xf3\xff\x9c\xd0\xf5\xffr\xae\xda\xff\x1dn\xab\xff\x00U\x99\xff\x00[\x9d\x9f\x00\\\x9e\x1d\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9ed\x00V\x99\xe9\x03[\x9e\xffL\x92\xc6\xff\xa1\xd0\xf3\xff\xb6\xde\xfc\xff\xb2\xda\xf7\xff\xb6\xdb\xf7\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff{\xbe\xf1\xff\x82\xc2\xf2\xff\x90\xca\xf6\xff\x82\xbe\xe9\xff8\x84\xbc\xff\x00X\x9b\xff\x00W\x9b\xd9\x00\\\x9eO\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x1c\x00[\x9d\x9f\x00T\x98\xff\x18j\xa8\xfft\xaf\xd9\xff\xb8\xde\xfa\xff\xbb\xdf\xfa\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff\x80\xc2\xf4\xff\x85\xc4\xf3\xffR\x9a\xcf\xff\rc\xa3\xff\x00U\x99\xf9\x00[\x9e\x88\x00\\\x9e\x0f\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eG\x00X\x9b\xd0\x00V\x9a\xff=\x85\xbb\xff\xa3\xce\xee\xff\xc6\xe5\xfd\xff\xbf\xdf\xf8\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffo\xba\xf2\xff|\xc2\xf6\xffj\xb0\xe4\xff%w\xb4\xff\x00U\x99\xff\x00Z\x9c\xbc\x00\\\x9e3\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x08\x00\\\x9e{\x00T\x99\xf5\rb\xa2\xffk\xa5\xd0\xff\xbe\xdf\xf7\xff\xcb\xe6\xfb\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffn\xbc\xf5\xffm\xb8\xf0\xff=\x8d\xc8\xff\x06^\x9f\xff\x00W\x99\xe9\x00\\\x9ed\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e0\x00Y\x9c\xbc\x00T\x98\xff*v\xb0\xff\x95\xc2\xe2\xff\xd0\xea\xfc\xff\xcd\xe6\xf9\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xffa\xb4\xf3\xffj\xb9\xf5\xffN\x9f\xda\xff\x15k\xab\xff\x00V\x98\xff\x00Z\x9d\xa7\x00\\\x9e \x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9ec\x00V\x9a\xe4\x01Y\x9c\xffS\x93\xc2\xff\xbf\xdd\xf2\xff\xd9\xee\xfc\xff\xc9\xe3\xf7\xffe\xb5\xf2\xff^\xb4\xf6\xffZ\xad\xeb\xff(}\xbc\xff\x00Y\x9b\xff\x00X\x9b\xd7\x00\\\x9eO\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x18\x00[\x9d\x9a\x00S\x98\xff\x1cl\xa8\xff\x8d\xb9\xda\xff\xd1\xe9\xfb\xffk\xba\xf5\xff;\x92\xd3\xff\x0cd\xa5\xff\x00V\x99\xf7\x00[\x9d\x85\x00\\\x9e\r\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eA\x00X\x9b\xd2\x00V\x9a\xff?\x87\xbb\xff&z\xb8\xff\x00W\x99\xff\x00Y\x9c\xc3\x00\\\x9e1\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x0b\x00\\\x9e\x81\x00V\x9a\xf0\x00X\x9a\xf2\x00\\\x9eu\x00\\\x9e\x03\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x0f\x00[\x9et\x00\\\x9ex\x00\\\x9e\x0b\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00', gtk.gdk.COLORSPACE_RGB, True, 8, 64, 64, 256)
window_icon = gtk.gdk.pixbuf_new_from_data('\x00\\\x9e\x00\x00\\\x9e\x00\x00^\xa0\x00\x00V\x99\x00\x00L\x91g\x00N\x93q\x00X\x9c\x00\x00^\x9f\x00\x00]\x9f\x00\x00Y\x9c\x00\x00P\x94o\x00M\x92i\x00V\x99\x00\x00^\xa0\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00^\xa0\x00\x00T\x97\x00\x00F\x8c_1\x81\xba\xff+|\xb6\xff\x00F\x8e?\x00\\\x9e\x00\x00\\\x9e\x00\x00J\x8e;$y\xb6\xff-\x7f\xbc\xff\x00E\x8cb\x00R\x97\x00\x00^\xa0\x00\x00\\\x9e\x00\x00[\x9d\x00\x00I\x8c*\x05X\x9b\xc0P\x9b\xd5\xff\xa7\xdd\xff\xff\xbb\xe5\xff\xff@\x86\xbb\xff\x00>\x89D\x00D\x89B\'{\xbb\xff\x89\xcf\xff\xff\xa2\xdb\xff\xffg\xa6\xd5\xff\x07Y\x9b\xc3\x00C\x8c,\x00[\x9d\x00\x05\\\x9e\x971\x84\xc5\xffk\xbb\xf6\xff\x8e\xd0\xff\xff\x86\xc3\xf0\xff\xa2\xd2\xf5\xff\xc4\xe7\xff\xffP\x90\xc0\xff\x15u\xbf\xfff\xbf\xff\xffx\xc1\xf6\xff\x80\xc0\xf0\xff\xb0\xdf\xff\xff\xa9\xd7\xf6\xff\\\x97\xc5\xff\x0b]\x9e\x98\x08[\x9e\xdcX\xb0\xf0\xff\x84\xd0\xff\xffx\xbc\xf0\xff\x83\xc2\xf0\xff\x88\xc3\xee\xff\xb1\xd7\xf3\xff\xf9\xff\xff\xff\xca\xec\xff\xffm\xba\xf3\xffX\xae\xee\xff{\xbe\xf0\xff\x91\xc7\xf0\xff\xd2\xf2\xff\xff\xa6\xd4\xf0\xff\x11]\x9e\xde\x00T\x96\x00\x00N\x91\x9eD\x98\xd5\xff\x84\xc9\xfc\xff\x85\xc3\xf1\xff\xb7\xdb\xf6\xff\xe9\xf4\xfc\xff\xe9\xf5\xfd\xff\xdb\xee\xfd\xff\xdf\xef\xfc\xff\xa8\xd5\xf6\xff|\xbf\xf1\xff\xa3\xd6\xfc\xffl\xaa\xd6\xff\x00J\x91\xa1\x00Q\x96\x00\x00^\xa0\x00\x00T\x97\x00\x008\x7f\x9eC\x94\xd1\xff\xde\xf6\xff\xff\xf5\xfc\xff\xff\xe0\xef\xfb\xff\xe0\xf0\xfb\xff\xc8\xe5\xfb\xff\xcf\xe7\xfb\xff\xff\xff\xff\xff\xfe\xff\xff\xffV\x9d\xd2\xff\x002\x80\xa2\x00Q\x96\x00\x00_\xa0\x00\x00W\x99\x00\x00I\x8cq9\x89\xc3\xf1Y\xb0\xf2\xffR\xaa\xef\xff\xbc\xde\xf7\xff\xf9\xfc\xfe\xff\xe3\xf2\xfb\xff\xd3\xea\xfc\xff\xf5\xfb\xff\xff\xb7\xdb\xf7\xffd\xb1\xed\xff\x86\xc3\xf2\xffR\x93\xc4\xf3\x00D\x8du\x00T\x99\x00\x06Z\x9d\xb3I\xa0\xe0\xff\x8a\xd2\xff\xffe\xb5\xf2\xff/\x97\xe8\xffK\xa4\xe9\xff\x9c\xcd\xf0\xff\xf6\xf9\xfc\xff\xd6\xec\xfc\xffX\xab\xf0\xff\x15\x8a\xe6\xff9\x9b\xe6\xff\x8c\xc6\xf1\xff\xd1\xf0\xff\xff\x8b\xbe\xe1\xff\x0e\\\x9d\xb6\x07]\x9f\xc1D\x98\xd9\xff\x85\xcd\xff\xffm\xbc\xf9\xff;\x9d\xe9\xff^\xae\xec\xffl\xb3\xe8\xff\xb7\xd9\xf2\xffC\xa2\xef\xff\x00s\xe5\xff3\x99\xea\xffL\xa3\xe7\xff\x96\xce\xf9\xff\xc7\xeb\xff\xff\x81\xb3\xd9\xff\x10_\x9f\xc4\x00X\x9a\x00\x00H\x8bU\x1eq\xad\xeeR\xa8\xe8\xffA\xa4\xf1\xff`\xae\xea\xff\xa9\xd3\xf2\xff\xc8\xe4\xf8\xffh\xb7\xf2\xff@\xa2\xed\xff,\x95\xe8\xffQ\xaa\xef\xff|\xba\xe9\xff*u\xae\xf1\x00A\x8bX\x00V\x9a\x00\x00\\\x9e\x00\x00]\x9f\x00\x00>\x84\x0c"v\xb3\xff\x9b\xdb\xff\xff\x97\xcf\xf8\xff\xce\xe6\xf8\xff\xc5\xe1\xf7\xffe\xb5\xf1\xfft\xbc\xf0\xffu\xbe\xf5\xff\xa9\xde\xff\xff0{\xb0\xff\x00:\x85\x0f\x00]\x9f\x00\x00]\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00Y\x9c\x00\x02S\x97zH\x89\xbf\xff\xb8\xe3\xfd\xff\xe8\xfb\xff\xff\xc2\xdf\xf7\xff`\xb3\xf1\xff\x82\xcb\xff\xff\xa1\xd3\xf7\xffJ\x88\xb8\xff\x00S\x96r\x00Z\x9d\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00]\x9f\x00\x00[\x9d\x00\x00C\x8b*\x08W\x9b\xc5\x8c\xb9\xda\xff\xea\xfd\xff\xff\x80\xcb\xff\xffG\x97\xd4\xff\x03W\x99\xbc\x00E\x8d"\x00[\x9e\x00\x00]\x9f\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00_\xa0\x00\x00Q\x96\x00\x00C\x8di>\x88\xbd\xff,\x7f\xbb\xff\x00G\x8c`\x00T\x98\x00\x00^\xa0\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00_\xa0\x00\x00R\x98\x00\x00I\x92r\x00P\x92n\x00V\x99\x00\x00^\xa0\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00', gtk.gdk.COLORSPACE_RGB, True, 8, 16, 16, 64)
GUI_AVAILABLE = os.environ.get("DISPLAY", '')
if GUI_AVAILABLE:
def download():
import pygtk
pygtk.require("2.0")
import gtk
import gobject
import pango
import webbrowser
gtk.gdk.threads_init()
load_serialized_images()
global FatalVisibleError
def FatalVisibleError(s):
error = gtk.MessageDialog(parent = None,
flags = gtk.DIALOG_MODAL,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = s)
error.set_title("Error")
error.run()
gtk.main_quit()
sys.exit(-1)
class GeneratorTask(object):
def __init__(self, generator, loop_callback, on_done=None, on_exception=None):
self.generator = generator
self.loop_callback = loop_callback
self.on_done = on_done
self.on_exception = on_exception
def _run(self, *args, **kwargs):
self._stopped = False
try:
for ret in self.generator(*args, **kwargs):
if ret is None:
ret = ()
if not isinstance(ret, tuple):
ret = (ret,)
gobject.idle_add(self.loop_callback, *ret)
if self._stopped:
thread.exit()
except Exception, ex:
print ex
if self.on_exception is not None:
gobject.idle_add(self.on_exception, ex)
else:
if self.on_done is not None:
gobject.idle_add(self.on_done)
def start(self, *args, **kwargs):
t = threading.Thread(target=self._run, args=args, kwargs=kwargs)
t.setDaemon(True)
t.start()
def stop(self):
self._stopped = True
class DownloadDialog(gtk.Dialog):
def handle_delete_event(self, wid, ev, data=None):
self.handle_cancel(wid)
def handle_dont_show_toggle(self, button, data=None):
reroll_autostart(not button.get_active())
def handle_cancel(self, button):
if self.task:
self.task.stop()
if self.download:
self.download.cancel()
gtk.main_quit()
self.user_cancelled = True
def handle_ok(self, button):
# begin download
self.ok.hide()
self.download = DownloadState()
self.label.hide()
if self.dont_show_again_align is not None:
self.dont_show_again_align.hide()
self.progress.show()
def download_progress(progress, status):
if not status:
self.task.stop()
self.update_progress(DOWNLOADING, progress)
def finished():
self.update_progress(DOWNLOADING, 1.0)
self.unpack_dropbox()
def error(ex):
FatalVisibleError(ERROR_CONNECTING)
self.update_progress(DOWNLOADING, 0)
self.task = GeneratorTask(self.download.copy_data,
download_progress,
finished, error).start()
def update_progress(self, text, fraction):
self.progress.set_text(text % int(fraction*100))
self.progress.set_fraction(fraction)
def unpack_dropbox(self):
def unpack_progress(name, i, total):
self.update_progress(UNPACKING, float(i)/total)
def finished():
self.update_progress(UNPACKING, 1.0)
gtk.main_quit()
def error(ex):
if isinstance(ex, SignatureVerifyError):
FatalVisibleError(ERROR_SIGNATURE)
else:
FatalVisibleError(ERROR_CONNECTING)
self.task = GeneratorTask(self.download.unpack,
unpack_progress,
finished, error).start()
def mouse_down(self, widget, event):
if self.hovering:
self.clicked_link = True
def mouse_up(self, widget, event):
if self.clicked_link:
webbrowser.open(LINK)
self.clicked_link = False
def label_motion(self, widget, event):
offx, offy = self.label.get_layout_offsets()
layout = self.label.get_layout()
index = layout.xy_to_index(int((offx+event.x)*pango.SCALE),
int((offy+event.y)*pango.SCALE))[0]
link_index = layout.get_text().find(LINK)
if index >= link_index and index < link_index+len(LINK):
self.hovering = True
self.label_box.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND2))
else:
self.hovering = False
self.label_box.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
def __init__(self):
super(DownloadDialog, self).__init__(parent = None,
title = "Dropbox Installation")
self.download = None
self.hovering = False
self.clicked_link = False
self.user_cancelled = False
self.task = None
self.ok = ok = gtk.Button(stock=gtk.STOCK_OK)
ok.connect('clicked', self.handle_ok)
self.action_area.add(ok)
ok.show()
cancel = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel.connect('clicked', self.handle_cancel)
self.action_area.add(cancel)
cancel.show()
self.connect('delete_event', self.handle_delete_event)
self.box_logo = gtk.image_new_from_pixbuf(box_logo_pixbuf)
self.box_logo.show()
self.set_icon(window_icon)
self.progress = gtk.ProgressBar()
self.progress.set_property('width-request', 300)
self.label = gtk.Label()
GPG_WARNING_MSG = (u"\n\n" + GPG_WARNING) if not gpgme else u""
self.label.set_markup('%s <span foreground="#000099" underline="single" weight="bold">%s</span>\n\n%s%s' % (INFO, LINK, WARNING, GPG_WARNING_MSG))
self.label.set_line_wrap(True)
self.label.set_property('width-request', 300)
self.label.show()
self.label_box = gtk.EventBox()
self.label_box.add(self.label)
self.label_box.connect("button-release-event", self.mouse_up)
self.label_box.connect("button-press-event", self.mouse_down)
self.label_box.connect("motion-notify-event", self.label_motion)
self.label_box.show()
def on_realize(widget):
self.label_box.add_events(gtk.gdk.POINTER_MOTION_MASK)
self.label_box.connect("realize", on_realize)
self.hbox = gtk.HBox(spacing=10)
self.hbox.set_property('border-width',10)
self.hbox.pack_start(self.box_logo, False, False)
self.hbox.pack_start(self.label_box, False, False)
self.hbox.pack_start(self.progress, False, False)
self.hbox.show()
self.vbox.add(self.hbox)
self.dont_show_again_align = None
try:
if can_reroll_autostart():
dont_show_again = gtk.CheckButton("_Don't show this again")
dont_show_again.connect('toggled', self.handle_dont_show_toggle)
dont_show_again.show()
self.dont_show_again_align = gtk.Alignment(xalign=1.0, yalign=0.0, xscale=0.0, yscale=0.0)
self.dont_show_again_align.add(dont_show_again)
self.dont_show_again_align.show()
hbox = gtk.HBox()
hbox.set_property('border-width', 10)
hbox.pack_start(self.dont_show_again_align, True, True)
hbox.show()
self.vbox.add(hbox)
self.set_resizable(False)
except:
traceback.print_exc()
self.ok.grab_focus()
dialog = DownloadDialog()
dialog.show()
gtk.main()
if dialog.user_cancelled:
raise Exception("user cancelled download!!!")
else:
def download():
global FatalVisibleError
def FatalVisibleError(s):
console_print(u"\nError: %s" % s, f=sys.stderr)
sys.exit(-1)
ESC = "\x1b"
save = ESC+"7"
unsave = ESC+"8"
clear = ESC+"[2J"
erase_to_start = ESC+"[1K"
write = sys.stdout.write
flush = sys.stdout.flush
last_progress = [None, None]
def setprogress(text, frac):
if last_progress == [text, frac]:
return
if sys.stdout.isatty():
write(erase_to_start)
write(unsave)
console_print(text % int(100*frac), linebreak=not sys.stdout.isatty())
if sys.stdout.isatty():
flush()
last_progress[0], last_progress[1] = text, frac
console_print()
if sys.stdout.isatty():
write(save)
flush()
console_print(u"%s %s\n" % (INFO, LINK))
GPG_WARNING_MSG = (u"\n%s" % GPG_WARNING) if not gpgme else u""
if not yes_no_question("%s%s" % (WARNING, GPG_WARNING_MSG)):
return
download = DownloadState()
try:
for progress, status in download.copy_data():
if not status:
break
setprogress(DOWNLOADING, progress)
except Exception:
FatalVisibleError(ERROR_CONNECTING)
else:
setprogress(DOWNLOADING, 1.0)
console_print()
write(save)
try:
for name, i, total in download.unpack():
setprogress(UNPACKING, float(i)/total)
except SignatureVerifyError:
FatalVisibleError(ERROR_SIGNATURE)
except Exception:
FatalVisibleError(ERROR_CONNECTING)
else:
setprogress(UNPACKING, 1.0)
console_print()
class CommandTicker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.stop_event = threading.Event()
def stop(self):
self.stop_event.set()
def run(self):
ticks = ['[. ]', '[.. ]', '[...]', '[ ..]', '[ .]', '[ ]']
i = 0
first = True
while True:
self.stop_event.wait(0.25)
if self.stop_event.isSet(): break
if i == len(ticks):
first = False
i = 0
if not first:
sys.stderr.write("\r%s\r" % ticks[i])
sys.stderr.flush()
i += 1
sys.stderr.flush()
class DropboxCommand(object):
class CouldntConnectError(Exception): pass
class BadConnectionError(Exception): pass
class EOFError(Exception): pass
class CommandError(Exception): pass
def __init__(self, timeout=5):
self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.s.settimeout(timeout)
try:
self.s.connect(os.path.expanduser(u'~/.dropbox/command_socket'))
except socket.error, e:
raise DropboxCommand.CouldntConnectError()
self.f = self.s.makefile("r+", 4096)
def close(self):
self.f.close()
self.s.close()
def __readline(self):
try:
toret = self.f.readline().decode('utf8').rstrip(u"\n")
except socket.error, e:
raise DropboxCommand.BadConnectionError()
if toret == '':
raise DropboxCommand.EOFError()
else:
return toret
# atttribute doesn't exist, i know what you want
def send_command(self, name, args):
self.f.write(name.encode('utf8'))
self.f.write(u"\n".encode('utf8'))
self.f.writelines((u"\t".join([k] + (list(v)
if hasattr(v, '__iter__') else
[v])) + u"\n").encode('utf8')
for k,v in args.iteritems())
self.f.write(u"done\n".encode('utf8'))
self.f.flush()
# Start a ticker
ticker_thread = CommandTicker()
ticker_thread.start()
# This is the potentially long-running call.
try:
ok = self.__readline() == u"ok"
except KeyboardInterrupt:
raise DropboxCommand.BadConnectionError("Keyboard interruption detected")
finally:
# Tell the ticker to stop.
ticker_thread.stop()
ticker_thread.join()
if ok:
toret = {}
for i in range(21):
if i == 20:
raise Exception(u"close this connection!")
line = self.__readline()
if line == u"done":
break
argval = line.split(u"\t")
toret[argval[0]] = argval[1:]
return toret
else:
problems = []
for i in range(21):
if i == 20:
raise Exception(u"close this connection!")
line = self.__readline()
if line == u"done":
break
problems.append(line)
raise DropboxCommand.CommandError(u"\n".join(problems))
# this is the hotness, auto marshalling
def __getattr__(self, name):
try:
return super(DropboxCommand, self).__getattr__(name)
except:
def __spec_command(**kw):
return self.send_command(unicode(name), kw)
self.__setattr__(name, __spec_command)
return __spec_command
commands = {}
aliases = {}
def command(meth):
global commands, aliases
assert meth.__doc__, "All commands need properly formatted docstrings (even %r!!)" % meth
if hasattr(meth, 'im_func'): # bound method, if we ever have one
meth = meth.im_func
commands[meth.func_name] = meth
meth_aliases = [unicode(alias) for alias in aliases.iterkeys() if aliases[alias].func_name == meth.func_name]
if meth_aliases:
meth.__doc__ += u"\nAliases: %s" % ",".join(meth_aliases)
return meth
def alias(name):
def decorator(meth):
global commands, aliases
assert name not in commands, "This alias is the name of a command."
aliases[name] = meth
return meth
return decorator
def requires_dropbox_running(meth):
def newmeth(*n, **kw):
if is_dropbox_running():
return meth(*n, **kw)
else:
console_print(u"Dropbox isn't running!")
newmeth.func_name = meth.func_name
newmeth.__doc__ = meth.__doc__
return newmeth
def start_dropbox():
db_path = os.path.expanduser(u"~/.dropbox-dist/dropboxd").encode(sys.getfilesystemencoding())
if os.access(db_path, os.X_OK):
f = open("/dev/null", "w")
# we don't reap the child because we're gonna die anyway, let init do it
a = subprocess.Popen([db_path], preexec_fn=os.setsid, cwd=os.path.expanduser("~"),
stderr=sys.stderr, stdout=f, close_fds=True)
# in seconds
interval = 0.5
wait_for = 60
for i in xrange(int(wait_for / interval)):
if is_dropbox_running():
return True
# back off from connect for a while
time.sleep(interval)
return False
else:
return False
# Extracted and modified from os.cmd.Cmd
def columnize(list, display_list=None, display_width=None):
if not list:
console_print(u"<empty>")
return
non_unicode = [i for i in range(len(list)) if not (isinstance(list[i], unicode))]
if non_unicode:
raise TypeError, ("list[i] not a string for i in %s" %
", ".join(map(unicode, non_unicode)))
if not display_width:
d = os.popen('stty size', 'r').read().split()
if d:
display_width = int(d[1])
else:
for item in list:
console_print(item)
return
if not display_list:
display_list = list
size = len(list)
if size == 1:
console_print(display_list[0])
return
for nrows in range(1, len(list)):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -2
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = row + nrows*col
if i >= size:
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += colwidth + 2
if totwidth > display_width:
break
if totwidth <= display_width:
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
lines = []
for row in range(nrows):
texts = []
display_texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
y = ""
else:
x = list[i]
y = display_list[i]
texts.append(x)
display_texts.append(y)
while texts and not texts[-1]:
del texts[-1]
original_texts = texts[:]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
texts[col] = texts[col].replace(original_texts[col], display_texts[col])
line = u" ".join(texts)
lines.append(line)
for line in lines:
console_print(line)
@command
@requires_dropbox_running
@alias('stat')
def filestatus(args):
u"""get current sync status of one or more files
dropbox filestatus [-l] [-a] [FILE]...
Prints the current status of each FILE.
options:
-l --list prints out information in a format similar to ls. works best when your console supports color :)
-a --all do not ignore entries starting with .
"""
global enc
oparser = optparse.OptionParser()
oparser.add_option("-l", "--list", action="store_true", dest="list")
oparser.add_option("-a", "--all", action="store_true", dest="all")
(options, args) = oparser.parse_args(args)
try:
with closing(DropboxCommand()) as dc:
if options.list:
# Listing.
# Separate directories from files.
if len(args) == 0:
dirs, nondirs = [u"."], []
else:
dirs, nondirs = [], []
for a in args:
try:
(dirs if os.path.isdir(a) else nondirs).append(a.decode(enc))
except UnicodeDecodeError:
continue
if len(dirs) == 0 and len(nondirs) == 0:
#TODO: why?
exit(1)
dirs.sort(key=methodcaller('lower'))
nondirs.sort(key=methodcaller('lower'))
# Gets a string representation for a path.
def path_to_string(file_path):
if not os.path.exists(file_path):
path = u"%s (File doesn't exist!)" % os.path.basename(file_path)
return (path, path)
try:
status = dc.icon_overlay_file_status(path=file_path).get(u'status', [None])[0]
except DropboxCommand.CommandError, e:
path = u"%s (%s)" % (os.path.basename(file_path), e)
return (path, path)
env_term = os.environ.get('TERM','')
supports_color = (sys.stderr.isatty() and (
env_term.startswith('vt') or
env_term.startswith('linux') or
'xterm' in env_term or
'color' in env_term
)
)
# TODO: Test when you don't support color.
if not supports_color:
path = os.path.basename(file_path)
return (path, path)
if status == u"up to date":
init, cleanup = "\x1b[32;1m", "\x1b[0m"
elif status == u"syncing":
init, cleanup = "\x1b[36;1m", "\x1b[0m"
elif status == u"unsyncable":
init, cleanup = "\x1b[41;1m", "\x1b[0m"
elif status == u"selsync":
init, cleanup = "\x1b[37;1m", "\x1b[0m"
else:
init, cleanup = '', ''
path = os.path.basename(file_path)
return (path, u"%s%s%s" % (init, path, cleanup))
# Prints a directory.
def print_directory(name):
clean_paths = []
formatted_paths = []
for subname in sorted(os.listdir(name), key=methodcaller('lower')):
if type(subname) != unicode:
continue
if not options.all and subname[0] == u'.':
continue
try:
clean, formatted = path_to_string(unicode_abspath(os.path.join(name, subname)))
clean_paths.append(clean)
formatted_paths.append(formatted)
except (UnicodeEncodeError, UnicodeDecodeError), e:
continue
columnize(clean_paths, formatted_paths)
try:
if len(dirs) == 1 and len(nondirs) == 0:
print_directory(dirs[0])
else:
nondir_formatted_paths = []
nondir_clean_paths = []
for name in nondirs:
try:
clean, formatted = path_to_string(unicode_abspath(name))
nondir_clean_paths.append(clean)
nondir_formatted_paths.append(formatted)
except (UnicodeEncodeError, UnicodeDecodeError), e:
continue
if nondir_clean_paths:
columnize(nondir_clean_paths, nondir_formatted_paths)
if len(nondirs) == 0:
console_print(dirs[0] + u":")
print_directory(dirs[0])
dirs = dirs[1:]
for name in dirs:
console_print()
console_print(name + u":")
print_directory(name)
except DropboxCommand.EOFError:
console_print(u"Dropbox daemon stopped.")
except DropboxCommand.BadConnectionError, e:
console_print(u"Dropbox isn't responding!")
else:
if len(args) == 0:
args = [name for name in sorted(os.listdir(u"."), key=methodcaller('lower')) if type(name) == unicode]
if len(args) == 0:
# Bail early if there's nothing to list to avoid crashing on indent below
console_print(u"<empty>")
return
indent = max(len(st)+1 for st in args)
for file in args:
try:
if type(file) is not unicode:
file = file.decode(enc)
fp = unicode_abspath(file)
except (UnicodeEncodeError, UnicodeDecodeError), e:
continue
if not os.path.exists(fp):
console_print(u"%-*s %s" % \
(indent, file+':', "File doesn't exist"))
continue
try:
status = dc.icon_overlay_file_status(path=fp).get(u'status', [u'unknown'])[0]
console_print(u"%-*s %s" % (indent, file+':', status))
except DropboxCommand.CommandError, e:
console_print(u"%-*s %s" % (indent, file+':', e))
except DropboxCommand.CouldntConnectError, e:
console_print(u"Dropbox isn't running!")
@command
@requires_dropbox_running
def ls(args):
u"""list directory contents with current sync status
dropbox ls [FILE]...
This is an alias for filestatus -l
"""
return filestatus(["-l"] + args)
@command
@requires_dropbox_running
def puburl(args):
u"""get public url of a file in your dropbox
dropbox puburl FILE
Prints out a public url for FILE.
"""
if len(args) != 1:
console_print(puburl.__doc__,linebreak=False)
return
try:
with closing(DropboxCommand()) as dc:
try:
console_print(dc.get_public_link(path=unicode_abspath(args[0].decode(sys.getfilesystemencoding()))).get(u'link', [u'No Link'])[0])
except DropboxCommand.CommandError, e:
console_print(u"Couldn't get public url: " + str(e))
except DropboxCommand.BadConnectionError, e:
console_print(u"Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print(u"Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError, e:
console_print(u"Dropbox isn't running!")
@command
@requires_dropbox_running
def status(args):
u"""get current status of the dropboxd
dropbox status
Prints out the current status of the Dropbox daemon.
"""
if len(args) != 0:
console_print(status.__doc__,linebreak=False)
return
try:
with closing(DropboxCommand()) as dc:
try:
lines = dc.get_dropbox_status()[u'status']
if len(lines) == 0:
console_print(u'Idle')
else:
for line in lines:
console_print(line)
except KeyError:
console_print(u"Couldn't get status: daemon isn't responding")
except DropboxCommand.CommandError, e:
console_print(u"Couldn't get status: " + str(e))
except DropboxCommand.BadConnectionError, e:
console_print(u"Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print(u"Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError, e:
console_print(u"Dropbox isn't running!")
@command
def running(argv):
u"""return whether dropbox is running
dropbox running
Returns 1 if running 0 if not running.
"""
return int(is_dropbox_running())
@command
@requires_dropbox_running
def stop(args):
u"""stop dropboxd
dropbox stop
Stops the dropbox daemon.
"""
try:
with closing(DropboxCommand()) as dc:
try:
dc.tray_action_hard_exit()
except DropboxCommand.BadConnectionError, e:
console_print(u"Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print(u"Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError, e:
console_print(u"Dropbox isn't running!")
#returns true if link is necessary
def grab_link_url_if_necessary():
try:
with closing(DropboxCommand()) as dc:
try:
link_url = dc.needs_link().get(u"link_url", None)
if link_url is not None:
console_print(u"To link this computer to a dropbox account, visit the following url:\n%s" % link_url[0])
return True
else:
return False
except DropboxCommand.CommandError, e:
pass
except DropboxCommand.BadConnectionError, e:
console_print(u"Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print(u"Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError, e:
console_print(u"Dropbox isn't running!")
@command
@requires_dropbox_running
def lansync(argv):
u"""enables or disables LAN sync
dropbox lansync [y/n]
options:
y dropbox will use LAN sync (default)
n dropbox will not use LAN sync
"""
if len(argv) != 1:
console_print(lansync.__doc__, linebreak=False)
return
s = argv[0].lower()
if s.startswith('y') or s.startswith('-y'):
should_lansync = True
elif s.startswith('n') or s.startswith('-n'):
should_lansync = False
else:
should_lansync = None
if should_lansync is None:
console_print(lansync.__doc__,linebreak=False)
else:
with closing(DropboxCommand()) as dc:
dc.set_lan_sync(lansync='enabled' if should_lansync else 'disabled')
@command
@requires_dropbox_running
def exclude(args):
u"""ignores/excludes a directory from syncing
dropbox exclude [list]
dropbox exclude add [DIRECTORY] [DIRECTORY] ...
dropbox exclude remove [DIRECTORY] [DIRECTORY] ...
"list" prints a list of directories currently excluded from syncing.
"add" adds one or more directories to the exclusion list, then resynchronizes Dropbox.
"remove" removes one or more directories from the exclusion list, then resynchronizes Dropbox.
With no arguments, executes "list".
Any specified path must be within Dropbox.
"""
if len(args) == 0:
try:
with closing(DropboxCommand()) as dc:
try:
lines = [relpath(path) for path in dc.get_ignore_set()[u'ignore_set']]
lines.sort()
if len(lines) == 0:
console_print(u'No directories are being ignored.')
else:
console_print(u'Excluded: ')
for line in lines:
console_print(unicode(line))
except KeyError:
console_print(u"Couldn't get ignore set: daemon isn't responding")
except DropboxCommand.CommandError, e:
if e.args[0].startswith(u"No command exists by that name"):
console_print(u"This version of the client does not support this command.")
else:
console_print(u"Couldn't get ignore set: " + str(e))
except DropboxCommand.BadConnectionError, e:
console_print(u"Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print(u"Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError, e:
console_print(u"Dropbox isn't running!")
elif len(args) == 1 and args[0] == u"list":
exclude([])
elif len(args) >= 2:
sub_command = args[0]
paths = args[1:]
absolute_paths = [unicode_abspath(path.decode(sys.getfilesystemencoding())) for path in paths]
if sub_command == u"add":
try:
with closing(DropboxCommand(timeout=None)) as dc:
try:
result = dc.ignore_set_add(paths=absolute_paths)
if result[u"ignored"]:
console_print(u"Excluded: ")
lines = [relpath(path) for path in result[u"ignored"]]
for line in lines:
console_print(unicode(line))
except KeyError:
console_print(u"Couldn't add ignore path: daemon isn't responding")
except DropboxCommand.CommandError, e:
if e.args[0].startswith(u"No command exists by that name"):
console_print(u"This version of the client does not support this command.")
else:
console_print(u"Couldn't get ignore set: " + str(e))
except DropboxCommand.BadConnectionError, e:
console_print(u"Dropbox isn't responding! [%s]" % e)
except DropboxCommand.EOFError:
console_print(u"Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError, e:
console_print(u"Dropbox isn't running!")
elif sub_command == u"remove":
try:
with closing(DropboxCommand(timeout=None)) as dc:
try:
result = dc.ignore_set_remove(paths=absolute_paths)
if result[u"removed"]:
console_print(u"No longer excluded: ")
lines = [relpath(path) for path in result[u"removed"]]
for line in lines:
console_print(unicode(line))
except KeyError:
console_print(u"Couldn't remove ignore path: daemon isn't responding")
except DropboxCommand.CommandError, e:
if e.args[0].startswith(u"No command exists by that name"):
console_print(u"This version of the client does not support this command.")
else:
console_print(u"Couldn't get ignore set: " + str(e))
except DropboxCommand.BadConnectionError, e:
console_print(u"Dropbox isn't responding! [%s]" % e)
except DropboxCommand.EOFError:
console_print(u"Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError, e:
console_print(u"Dropbox isn't running!")
else:
console_print(exclude.__doc__, linebreak=False)
return
else:
console_print(exclude.__doc__, linebreak=False)
return
@command
def start(argv):
u"""start dropboxd
dropbox start [-i]
Starts the dropbox daemon, dropboxd. If dropboxd is already running, this will do nothing.
options:
-i --install auto install dropboxd if not available on the system
"""
should_install = "-i" in argv or "--install" in argv
# first check if dropbox is already running
if is_dropbox_running():
if not grab_link_url_if_necessary():
console_print(u"Dropbox is already running!")
return
console_print(u"Starting Dropbox...", linebreak=False)
console_flush()
if not start_dropbox():
if not should_install:
console_print()
console_print(u"The Dropbox daemon is not installed!")
console_print(u"Run \"dropbox start -i\" to install the daemon")
return
# install dropbox!!!
try:
download()
except:
traceback.print_exc()
else:
if GUI_AVAILABLE:
start_dropbox()
console_print(u"Done!")
else:
if start_dropbox():
if not grab_link_url_if_necessary():
console_print(u"Done!")
else:
if not grab_link_url_if_necessary():
console_print(u"Done!")
def can_reroll_autostart():
return u".config" in os.listdir(os.path.expanduser(u'~'))
def reroll_autostart(should_autostart):
home_dir = os.path.expanduser(u'~')
contents = os.listdir(home_dir)
# UBUNTU
if u".config" in contents:
autostart_dir = os.path.join(home_dir, u".config", u"autostart")
autostart_link = os.path.join(autostart_dir, u"dropbox.desktop")
if should_autostart:
if os.path.exists(DESKTOP_FILE):
if not os.path.exists(autostart_dir):
os.makedirs(autostart_dir)
shutil.copyfile(DESKTOP_FILE, autostart_link)
elif os.path.exists(autostart_link):
os.remove(autostart_link)
@command
def autostart(argv):
u"""automatically start dropbox at login
dropbox autostart [y/n]
options:
n dropbox will not start automatically at login
y dropbox will start automatically at login (default)
Note: May only work on current Ubuntu distributions.
"""
if len(argv) != 1:
console_print(''.join(autostart.__doc__.split('\n', 1)[1:]).decode('ascii'))
return
s = argv[0].lower()
if s.startswith('y') or s.startswith('-y'):
should_autostart = True
elif s.startswith('n') or s.startswith('-n'):
should_autostart = False
else:
should_autostart = None
if should_autostart is None:
console_print(autostart.__doc__,linebreak=False)
else:
reroll_autostart(should_autostart)
@command
def help(argv):
u"""provide help
dropbox help [COMMAND]
With no arguments, print a list of commands and a short description of each. With a command, print descriptive help on how to use the command.
"""
if not argv:
return usage(argv)
for command in commands:
if command == argv[0]:
console_print(commands[command].__doc__.split('\n', 1)[1].decode('ascii'))
return
for alias in aliases:
if alias == argv[0]:
console_print(aliases[alias].__doc__.split('\n', 1)[1].decode('ascii'))
return
console_print(u"unknown command '%s'" % argv[0], f=sys.stderr)
def usage(argv):
console_print(u"Dropbox command-line interface\n")
console_print(u"commands:\n")
console_print(u"Note: use dropbox help <command> to view usage for a specific command.\n")
out = []
for command in commands:
out.append((command, commands[command].__doc__.splitlines()[0]))
spacing = max(len(o[0])+3 for o in out)
for o in out:
console_print(" %-*s%s" % (spacing, o[0], o[1]))
console_print()
def main(argv):
global commands
# now we need to find out if one of the commands are in the
# argv list, and if so split the list at the point to
# separate the argv list at that point
cut = None
for i in range(len(argv)):
if argv[i] in commands or argv[i] in aliases:
cut = i
break
if cut == None:
usage(argv)
os._exit(0)
return
# lol no options for now
globaloptionparser = optparse.OptionParser()
globaloptionparser.parse_args(argv[0:i])
# now dispatch and run
result = None
if argv[i] in commands:
result = commands[argv[i]](argv[i+1:])
elif argv[i] in aliases:
result = aliases[argv[i]](argv[i+1:])
# flush, in case output is rerouted to a file.
console_flush()
# done
return result
if __name__ == "__main__":
ret = main(sys.argv)
if ret is not None:
sys.exit(ret)
|
test_csv.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import abc
import bz2
from datetime import date, datetime
from decimal import Decimal
import gc
import gzip
import io
import itertools
import os
import pickle
import select
import shutil
import signal
import string
import tempfile
import threading
import time
import unittest
import weakref
import pytest
import numpy as np
import pyarrow as pa
from pyarrow.csv import (
open_csv, read_csv, ReadOptions, ParseOptions, ConvertOptions, ISO8601,
write_csv, WriteOptions, CSVWriter, InvalidRow)
from pyarrow.tests import util
def generate_col_names():
# 'a', 'b'... 'z', then 'aa', 'ab'...
letters = string.ascii_lowercase
yield from letters
for first in letters:
for second in letters:
yield first + second
def make_random_csv(num_cols=2, num_rows=10, linesep='\r\n', write_names=True):
arr = np.random.RandomState(42).randint(0, 1000, size=(num_cols, num_rows))
csv = io.StringIO()
col_names = list(itertools.islice(generate_col_names(), num_cols))
if write_names:
csv.write(",".join(col_names))
csv.write(linesep)
for row in arr.T:
csv.write(",".join(map(str, row)))
csv.write(linesep)
csv = csv.getvalue().encode()
columns = [pa.array(a, type=pa.int64()) for a in arr]
expected = pa.Table.from_arrays(columns, col_names)
return csv, expected
def make_empty_csv(column_names):
csv = io.StringIO()
csv.write(",".join(column_names))
csv.write("\n")
return csv.getvalue().encode()
def check_options_class(cls, **attr_values):
"""
Check setting and getting attributes of an *Options class.
"""
opts = cls()
for name, values in attr_values.items():
assert getattr(opts, name) == values[0], \
"incorrect default value for " + name
for v in values:
setattr(opts, name, v)
assert getattr(opts, name) == v, "failed setting value"
with pytest.raises(AttributeError):
opts.zzz_non_existent = True
# Check constructor named arguments
non_defaults = {name: values[1] for name, values in attr_values.items()}
opts = cls(**non_defaults)
for name, value in non_defaults.items():
assert getattr(opts, name) == value
# The various options classes need to be picklable for dataset
def check_options_class_pickling(cls, **attr_values):
opts = cls(**attr_values)
new_opts = pickle.loads(pickle.dumps(opts,
protocol=pickle.HIGHEST_PROTOCOL))
for name, value in attr_values.items():
assert getattr(new_opts, name) == value
class InvalidRowHandler:
def __init__(self, result):
self.result = result
self.rows = []
def __call__(self, row):
self.rows.append(row)
return self.result
def __eq__(self, other):
return (isinstance(other, InvalidRowHandler) and
other.result == self.result)
def __ne__(self, other):
return (not isinstance(other, InvalidRowHandler) or
other.result != self.result)
def test_read_options():
cls = ReadOptions
opts = cls()
check_options_class(cls, use_threads=[True, False],
skip_rows=[0, 3],
column_names=[[], ["ab", "cd"]],
autogenerate_column_names=[False, True],
encoding=['utf8', 'utf16'],
skip_rows_after_names=[0, 27])
check_options_class_pickling(cls, use_threads=True,
skip_rows=3,
column_names=["ab", "cd"],
autogenerate_column_names=False,
encoding='utf16',
skip_rows_after_names=27)
assert opts.block_size > 0
opts.block_size = 12345
assert opts.block_size == 12345
opts = cls(block_size=1234)
assert opts.block_size == 1234
opts.validate()
match = "ReadOptions: block_size must be at least 1: 0"
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.block_size = 0
opts.validate()
match = "ReadOptions: skip_rows cannot be negative: -1"
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.skip_rows = -1
opts.validate()
match = "ReadOptions: skip_rows_after_names cannot be negative: -1"
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.skip_rows_after_names = -1
opts.validate()
match = "ReadOptions: autogenerate_column_names cannot be true when" \
" column_names are provided"
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.autogenerate_column_names = True
opts.column_names = ('a', 'b')
opts.validate()
def test_parse_options():
cls = ParseOptions
skip_handler = InvalidRowHandler('skip')
check_options_class(cls, delimiter=[',', 'x'],
escape_char=[False, 'y'],
quote_char=['"', 'z', False],
double_quote=[True, False],
newlines_in_values=[False, True],
ignore_empty_lines=[True, False],
invalid_row_handler=[None, skip_handler])
check_options_class_pickling(cls, delimiter='x',
escape_char='y',
quote_char=False,
double_quote=False,
newlines_in_values=True,
ignore_empty_lines=False,
invalid_row_handler=skip_handler)
cls().validate()
opts = cls()
opts.delimiter = "\t"
opts.validate()
match = "ParseOptions: delimiter cannot be \\\\r or \\\\n"
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.delimiter = "\n"
opts.validate()
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.delimiter = "\r"
opts.validate()
match = "ParseOptions: quote_char cannot be \\\\r or \\\\n"
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.quote_char = "\n"
opts.validate()
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.quote_char = "\r"
opts.validate()
match = "ParseOptions: escape_char cannot be \\\\r or \\\\n"
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.escape_char = "\n"
opts.validate()
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.escape_char = "\r"
opts.validate()
def test_convert_options():
cls = ConvertOptions
opts = cls()
check_options_class(
cls, check_utf8=[True, False],
strings_can_be_null=[False, True],
quoted_strings_can_be_null=[True, False],
decimal_point=['.', ','],
include_columns=[[], ['def', 'abc']],
include_missing_columns=[False, True],
auto_dict_encode=[False, True],
timestamp_parsers=[[], [ISO8601, '%y-%m']])
check_options_class_pickling(
cls, check_utf8=False,
strings_can_be_null=True,
quoted_strings_can_be_null=False,
decimal_point=',',
include_columns=['def', 'abc'],
include_missing_columns=False,
auto_dict_encode=True,
timestamp_parsers=[ISO8601, '%y-%m'])
with pytest.raises(ValueError):
opts.decimal_point = '..'
assert opts.auto_dict_max_cardinality > 0
opts.auto_dict_max_cardinality = 99999
assert opts.auto_dict_max_cardinality == 99999
assert opts.column_types == {}
# Pass column_types as mapping
opts.column_types = {'b': pa.int16(), 'c': pa.float32()}
assert opts.column_types == {'b': pa.int16(), 'c': pa.float32()}
opts.column_types = {'v': 'int16', 'w': 'null'}
assert opts.column_types == {'v': pa.int16(), 'w': pa.null()}
# Pass column_types as schema
schema = pa.schema([('a', pa.int32()), ('b', pa.string())])
opts.column_types = schema
assert opts.column_types == {'a': pa.int32(), 'b': pa.string()}
# Pass column_types as sequence
opts.column_types = [('x', pa.binary())]
assert opts.column_types == {'x': pa.binary()}
with pytest.raises(TypeError, match='DataType expected'):
opts.column_types = {'a': None}
with pytest.raises(TypeError):
opts.column_types = 0
assert isinstance(opts.null_values, list)
assert '' in opts.null_values
assert 'N/A' in opts.null_values
opts.null_values = ['xxx', 'yyy']
assert opts.null_values == ['xxx', 'yyy']
assert isinstance(opts.true_values, list)
opts.true_values = ['xxx', 'yyy']
assert opts.true_values == ['xxx', 'yyy']
assert isinstance(opts.false_values, list)
opts.false_values = ['xxx', 'yyy']
assert opts.false_values == ['xxx', 'yyy']
assert opts.timestamp_parsers == []
opts.timestamp_parsers = [ISO8601]
assert opts.timestamp_parsers == [ISO8601]
opts = cls(column_types={'a': pa.null()},
null_values=['N', 'nn'], true_values=['T', 'tt'],
false_values=['F', 'ff'], auto_dict_max_cardinality=999,
timestamp_parsers=[ISO8601, '%Y-%m-%d'])
assert opts.column_types == {'a': pa.null()}
assert opts.null_values == ['N', 'nn']
assert opts.false_values == ['F', 'ff']
assert opts.true_values == ['T', 'tt']
assert opts.auto_dict_max_cardinality == 999
assert opts.timestamp_parsers == [ISO8601, '%Y-%m-%d']
def test_write_options():
cls = WriteOptions
opts = cls()
check_options_class(
cls, include_header=[True, False], delimiter=[',', '\t', '|'])
assert opts.batch_size > 0
opts.batch_size = 12345
assert opts.batch_size == 12345
opts = cls(batch_size=9876)
assert opts.batch_size == 9876
opts.validate()
match = "WriteOptions: batch_size must be at least 1: 0"
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.batch_size = 0
opts.validate()
class BaseTestCSV(abc.ABC):
"""Common tests which are shared by streaming and non streaming readers"""
@abc.abstractmethod
def read_bytes(self, b, **kwargs):
"""
:param b: bytes to be parsed
:param kwargs: arguments passed on to open the csv file
:return: b parsed as a single RecordBatch
"""
raise NotImplementedError
@property
@abc.abstractmethod
def use_threads(self):
"""Whether this test is multi-threaded"""
raise NotImplementedError
@staticmethod
def check_names(table, names):
assert table.num_columns == len(names)
assert table.column_names == names
def test_header_skip_rows(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
opts = ReadOptions()
opts.skip_rows = 1
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["ef", "gh"])
assert table.to_pydict() == {
"ef": ["ij", "mn"],
"gh": ["kl", "op"],
}
opts.skip_rows = 3
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["mn", "op"])
assert table.to_pydict() == {
"mn": [],
"op": [],
}
opts.skip_rows = 4
with pytest.raises(pa.ArrowInvalid):
# Not enough rows
table = self.read_bytes(rows, read_options=opts)
# Can skip rows with a different number of columns
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
opts.skip_rows = 2
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["ij", "kl"])
assert table.to_pydict() == {
"ij": ["mn"],
"kl": ["op"],
}
# Can skip all rows exactly when columns are given
opts.skip_rows = 4
opts.column_names = ['ij', 'kl']
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["ij", "kl"])
assert table.to_pydict() == {
"ij": [],
"kl": [],
}
def test_skip_rows_after_names(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
opts = ReadOptions()
opts.skip_rows_after_names = 1
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["ab", "cd"])
assert table.to_pydict() == {
"ab": ["ij", "mn"],
"cd": ["kl", "op"],
}
# Can skip exact number of rows
opts.skip_rows_after_names = 3
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["ab", "cd"])
assert table.to_pydict() == {
"ab": [],
"cd": [],
}
# Can skip beyond all rows
opts.skip_rows_after_names = 4
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["ab", "cd"])
assert table.to_pydict() == {
"ab": [],
"cd": [],
}
# Can skip rows with a different number of columns
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
opts.skip_rows_after_names = 2
opts.column_names = ["f0", "f1"]
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["f0", "f1"])
assert table.to_pydict() == {
"f0": ["ij", "mn"],
"f1": ["kl", "op"],
}
opts = ReadOptions()
# Can skip rows with new lines in the value
rows = b'ab,cd\n"e\nf","g\n\nh"\n"ij","k\nl"\nmn,op'
opts.skip_rows_after_names = 2
parse_opts = ParseOptions()
parse_opts.newlines_in_values = True
table = self.read_bytes(rows, read_options=opts,
parse_options=parse_opts)
self.check_names(table, ["ab", "cd"])
assert table.to_pydict() == {
"ab": ["mn"],
"cd": ["op"],
}
# Can skip rows when block ends in middle of quoted value
opts.skip_rows_after_names = 2
opts.block_size = 26
table = self.read_bytes(rows, read_options=opts,
parse_options=parse_opts)
self.check_names(table, ["ab", "cd"])
assert table.to_pydict() == {
"ab": ["mn"],
"cd": ["op"],
}
opts = ReadOptions()
# Can skip rows that are beyond the first block without lexer
rows, expected = make_random_csv(num_cols=5, num_rows=1000)
opts.skip_rows_after_names = 900
opts.block_size = len(rows) / 11
table = self.read_bytes(rows, read_options=opts)
assert table.schema == expected.schema
assert table.num_rows == 100
table_dict = table.to_pydict()
for name, values in expected.to_pydict().items():
assert values[900:] == table_dict[name]
# Can skip rows that are beyond the first block with lexer
table = self.read_bytes(rows, read_options=opts,
parse_options=parse_opts)
assert table.schema == expected.schema
assert table.num_rows == 100
table_dict = table.to_pydict()
for name, values in expected.to_pydict().items():
assert values[900:] == table_dict[name]
# Skip rows and skip rows after names
rows, expected = make_random_csv(num_cols=5, num_rows=200,
write_names=False)
opts = ReadOptions()
opts.skip_rows = 37
opts.skip_rows_after_names = 41
opts.column_names = expected.schema.names
table = self.read_bytes(rows, read_options=opts,
parse_options=parse_opts)
assert table.schema == expected.schema
assert (table.num_rows ==
expected.num_rows - opts.skip_rows -
opts.skip_rows_after_names)
table_dict = table.to_pydict()
for name, values in expected.to_pydict().items():
assert (values[opts.skip_rows + opts.skip_rows_after_names:] ==
table_dict[name])
def test_row_number_offset_in_errors(self):
# Row numbers are only correctly counted in serial reads
def format_msg(msg_format, row, *args):
if self.use_threads:
row_info = ""
else:
row_info = "Row #{}: ".format(row)
return msg_format.format(row_info, *args)
csv, _ = make_random_csv(4, 100, write_names=True)
read_options = ReadOptions()
read_options.block_size = len(csv) / 3
convert_options = ConvertOptions()
convert_options.column_types = {"a": pa.int32()}
# Test without skip_rows and column names in the csv
csv_bad_columns = csv + b"1,2\r\n"
message_columns = format_msg("{}Expected 4 columns, got 2", 102)
with pytest.raises(pa.ArrowInvalid, match=message_columns):
self.read_bytes(csv_bad_columns,
read_options=read_options,
convert_options=convert_options)
csv_bad_type = csv + b"a,b,c,d\r\n"
message_value = format_msg(
"In CSV column #0: {}"
"CSV conversion error to int32: invalid value 'a'",
102, csv)
with pytest.raises(pa.ArrowInvalid, match=message_value):
self.read_bytes(csv_bad_type,
read_options=read_options,
convert_options=convert_options)
long_row = (b"this is a long row" * 15) + b",3\r\n"
csv_bad_columns_long = csv + long_row
message_long = format_msg("{}Expected 4 columns, got 2: {} ...", 102,
long_row[0:96].decode("utf-8"))
with pytest.raises(pa.ArrowInvalid, match=message_long):
self.read_bytes(csv_bad_columns_long,
read_options=read_options,
convert_options=convert_options)
# Test skipping rows after the names
read_options.skip_rows_after_names = 47
with pytest.raises(pa.ArrowInvalid, match=message_columns):
self.read_bytes(csv_bad_columns,
read_options=read_options,
convert_options=convert_options)
with pytest.raises(pa.ArrowInvalid, match=message_value):
self.read_bytes(csv_bad_type,
read_options=read_options,
convert_options=convert_options)
with pytest.raises(pa.ArrowInvalid, match=message_long):
self.read_bytes(csv_bad_columns_long,
read_options=read_options,
convert_options=convert_options)
read_options.skip_rows_after_names = 0
# Test without skip_rows and column names not in the csv
csv, _ = make_random_csv(4, 100, write_names=False)
read_options.column_names = ["a", "b", "c", "d"]
csv_bad_columns = csv + b"1,2\r\n"
message_columns = format_msg("{}Expected 4 columns, got 2", 101)
with pytest.raises(pa.ArrowInvalid, match=message_columns):
self.read_bytes(csv_bad_columns,
read_options=read_options,
convert_options=convert_options)
csv_bad_columns_long = csv + long_row
message_long = format_msg("{}Expected 4 columns, got 2: {} ...", 101,
long_row[0:96].decode("utf-8"))
with pytest.raises(pa.ArrowInvalid, match=message_long):
self.read_bytes(csv_bad_columns_long,
read_options=read_options,
convert_options=convert_options)
csv_bad_type = csv + b"a,b,c,d\r\n"
message_value = format_msg(
"In CSV column #0: {}"
"CSV conversion error to int32: invalid value 'a'",
101)
message_value = message_value.format(len(csv))
with pytest.raises(pa.ArrowInvalid, match=message_value):
self.read_bytes(csv_bad_type,
read_options=read_options,
convert_options=convert_options)
# Test with skip_rows and column names not in the csv
read_options.skip_rows = 23
with pytest.raises(pa.ArrowInvalid, match=message_columns):
self.read_bytes(csv_bad_columns,
read_options=read_options,
convert_options=convert_options)
with pytest.raises(pa.ArrowInvalid, match=message_value):
self.read_bytes(csv_bad_type,
read_options=read_options,
convert_options=convert_options)
def test_invalid_row_handler(self):
rows = b"a,b\nc\nd,e\nf,g,h\ni,j\n"
parse_opts = ParseOptions()
with pytest.raises(
ValueError,
match="Expected 2 columns, got 1: c"):
self.read_bytes(rows, parse_options=parse_opts)
# Skip requested
parse_opts.invalid_row_handler = InvalidRowHandler('skip')
table = self.read_bytes(rows, parse_options=parse_opts)
assert table.to_pydict() == {
'a': ["d", "i"],
'b': ["e", "j"],
}
def row_num(x):
return None if self.use_threads else x
expected_rows = [
InvalidRow(2, 1, row_num(2), "c"),
InvalidRow(2, 3, row_num(4), "f,g,h"),
]
assert parse_opts.invalid_row_handler.rows == expected_rows
# Error requested
parse_opts.invalid_row_handler = InvalidRowHandler('error')
with pytest.raises(
ValueError,
match="Expected 2 columns, got 1: c"):
self.read_bytes(rows, parse_options=parse_opts)
expected_rows = [InvalidRow(2, 1, row_num(2), "c")]
assert parse_opts.invalid_row_handler.rows == expected_rows
class BaseCSVTableRead(BaseTestCSV):
def read_csv(self, csv, *args, validate_full=True, **kwargs):
"""
Reads the CSV file into memory using pyarrow's read_csv
csv The CSV bytes
args Positional arguments to be forwarded to pyarrow's read_csv
validate_full Whether or not to fully validate the resulting table
kwargs Keyword arguments to be forwarded to pyarrow's read_csv
"""
assert isinstance(self.use_threads, bool) # sanity check
read_options = kwargs.setdefault('read_options', ReadOptions())
read_options.use_threads = self.use_threads
table = read_csv(csv, *args, **kwargs)
table.validate(full=validate_full)
return table
def read_bytes(self, b, **kwargs):
return self.read_csv(pa.py_buffer(b), **kwargs)
def test_file_object(self):
data = b"a,b\n1,2\n"
expected_data = {'a': [1], 'b': [2]}
bio = io.BytesIO(data)
table = self.read_csv(bio)
assert table.to_pydict() == expected_data
# Text files not allowed
sio = io.StringIO(data.decode())
with pytest.raises(TypeError):
self.read_csv(sio)
def test_header(self):
rows = b"abc,def,gh\n"
table = self.read_bytes(rows)
assert isinstance(table, pa.Table)
self.check_names(table, ["abc", "def", "gh"])
assert table.num_rows == 0
def test_bom(self):
rows = b"\xef\xbb\xbfa,b\n1,2\n"
expected_data = {'a': [1], 'b': [2]}
table = self.read_bytes(rows)
assert table.to_pydict() == expected_data
def test_one_chunk(self):
# ARROW-7661: lack of newline at end of file should not produce
# an additional chunk.
rows = [b"a,b", b"1,2", b"3,4", b"56,78"]
for line_ending in [b'\n', b'\r', b'\r\n']:
for file_ending in [b'', line_ending]:
data = line_ending.join(rows) + file_ending
table = self.read_bytes(data)
assert len(table.to_batches()) == 1
assert table.to_pydict() == {
"a": [1, 3, 56],
"b": [2, 4, 78],
}
def test_header_column_names(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
opts = ReadOptions()
opts.column_names = ["x", "y"]
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["x", "y"])
assert table.to_pydict() == {
"x": ["ab", "ef", "ij", "mn"],
"y": ["cd", "gh", "kl", "op"],
}
opts.skip_rows = 3
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["x", "y"])
assert table.to_pydict() == {
"x": ["mn"],
"y": ["op"],
}
opts.skip_rows = 4
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["x", "y"])
assert table.to_pydict() == {
"x": [],
"y": [],
}
opts.skip_rows = 5
with pytest.raises(pa.ArrowInvalid):
# Not enough rows
table = self.read_bytes(rows, read_options=opts)
# Unexpected number of columns
opts.skip_rows = 0
opts.column_names = ["x", "y", "z"]
with pytest.raises(pa.ArrowInvalid,
match="Expected 3 columns, got 2"):
table = self.read_bytes(rows, read_options=opts)
# Can skip rows with a different number of columns
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
opts.skip_rows = 2
opts.column_names = ["x", "y"]
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["x", "y"])
assert table.to_pydict() == {
"x": ["ij", "mn"],
"y": ["kl", "op"],
}
def test_header_autogenerate_column_names(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
opts = ReadOptions()
opts.autogenerate_column_names = True
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["f0", "f1"])
assert table.to_pydict() == {
"f0": ["ab", "ef", "ij", "mn"],
"f1": ["cd", "gh", "kl", "op"],
}
opts.skip_rows = 3
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["f0", "f1"])
assert table.to_pydict() == {
"f0": ["mn"],
"f1": ["op"],
}
# Not enough rows, impossible to infer number of columns
opts.skip_rows = 4
with pytest.raises(pa.ArrowInvalid):
table = self.read_bytes(rows, read_options=opts)
def test_include_columns(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
convert_options = ConvertOptions()
convert_options.include_columns = ['ab']
table = self.read_bytes(rows, convert_options=convert_options)
self.check_names(table, ["ab"])
assert table.to_pydict() == {
"ab": ["ef", "ij", "mn"],
}
# Order of include_columns is respected, regardless of CSV order
convert_options.include_columns = ['cd', 'ab']
table = self.read_bytes(rows, convert_options=convert_options)
schema = pa.schema([('cd', pa.string()),
('ab', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
"cd": ["gh", "kl", "op"],
"ab": ["ef", "ij", "mn"],
}
# Include a column not in the CSV file => raises by default
convert_options.include_columns = ['xx', 'ab', 'yy']
with pytest.raises(KeyError,
match="Column 'xx' in include_columns "
"does not exist in CSV file"):
self.read_bytes(rows, convert_options=convert_options)
def test_include_missing_columns(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
read_options = ReadOptions()
convert_options = ConvertOptions()
convert_options.include_columns = ['xx', 'ab', 'yy']
convert_options.include_missing_columns = True
table = self.read_bytes(rows, read_options=read_options,
convert_options=convert_options)
schema = pa.schema([('xx', pa.null()),
('ab', pa.string()),
('yy', pa.null())])
assert table.schema == schema
assert table.to_pydict() == {
"xx": [None, None, None],
"ab": ["ef", "ij", "mn"],
"yy": [None, None, None],
}
# Combining with `column_names`
read_options.column_names = ["xx", "yy"]
convert_options.include_columns = ["yy", "cd"]
table = self.read_bytes(rows, read_options=read_options,
convert_options=convert_options)
schema = pa.schema([('yy', pa.string()),
('cd', pa.null())])
assert table.schema == schema
assert table.to_pydict() == {
"yy": ["cd", "gh", "kl", "op"],
"cd": [None, None, None, None],
}
# And with `column_types` as well
convert_options.column_types = {"yy": pa.binary(),
"cd": pa.int32()}
table = self.read_bytes(rows, read_options=read_options,
convert_options=convert_options)
schema = pa.schema([('yy', pa.binary()),
('cd', pa.int32())])
assert table.schema == schema
assert table.to_pydict() == {
"yy": [b"cd", b"gh", b"kl", b"op"],
"cd": [None, None, None, None],
}
def test_simple_ints(self):
# Infer integer columns
rows = b"a,b,c\n1,2,3\n4,5,6\n"
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.int64()),
('b', pa.int64()),
('c', pa.int64())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1, 4],
'b': [2, 5],
'c': [3, 6],
}
def test_simple_varied(self):
# Infer various kinds of data
rows = b"a,b,c,d\n1,2,3,0\n4.0,-5,foo,True\n"
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.float64()),
('b', pa.int64()),
('c', pa.string()),
('d', pa.bool_())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1.0, 4.0],
'b': [2, -5],
'c': ["3", "foo"],
'd': [False, True],
}
def test_simple_nulls(self):
# Infer various kinds of data, with nulls
rows = (b"a,b,c,d,e,f\n"
b"1,2,,,3,N/A\n"
b"nan,-5,foo,,nan,TRUE\n"
b"4.5,#N/A,nan,,\xff,false\n")
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.float64()),
('b', pa.int64()),
('c', pa.string()),
('d', pa.null()),
('e', pa.binary()),
('f', pa.bool_())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1.0, None, 4.5],
'b': [2, -5, None],
'c': ["", "foo", "nan"],
'd': [None, None, None],
'e': [b"3", b"nan", b"\xff"],
'f': [None, True, False],
}
def test_decimal_point(self):
# Infer floats with a custom decimal point
parse_options = ParseOptions(delimiter=';')
rows = b"a;b\n1.25;2,5\nNA;-3\n-4;NA"
table = self.read_bytes(rows, parse_options=parse_options)
schema = pa.schema([('a', pa.float64()),
('b', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1.25, None, -4.0],
'b': ["2,5", "-3", "NA"],
}
convert_options = ConvertOptions(decimal_point=',')
table = self.read_bytes(rows, parse_options=parse_options,
convert_options=convert_options)
schema = pa.schema([('a', pa.string()),
('b', pa.float64())])
assert table.schema == schema
assert table.to_pydict() == {
'a': ["1.25", "NA", "-4"],
'b': [2.5, -3.0, None],
}
def test_simple_timestamps(self):
# Infer a timestamp column
rows = (b"a,b,c\n"
b"1970,1970-01-01 00:00:00,1970-01-01 00:00:00.123\n"
b"1989,1989-07-14 01:00:00,1989-07-14 01:00:00.123456\n")
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.int64()),
('b', pa.timestamp('s')),
('c', pa.timestamp('ns'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1970, 1989],
'b': [datetime(1970, 1, 1), datetime(1989, 7, 14, 1)],
'c': [datetime(1970, 1, 1, 0, 0, 0, 123000),
datetime(1989, 7, 14, 1, 0, 0, 123456)],
}
def test_timestamp_parsers(self):
# Infer timestamps with custom parsers
rows = b"a,b\n1970/01/01,1980-01-01 00\n1970/01/02,1980-01-02 00\n"
opts = ConvertOptions()
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.string()),
('b', pa.timestamp('s'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': ['1970/01/01', '1970/01/02'],
'b': [datetime(1980, 1, 1), datetime(1980, 1, 2)],
}
opts.timestamp_parsers = ['%Y/%m/%d']
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.timestamp('s')),
('b', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [datetime(1970, 1, 1), datetime(1970, 1, 2)],
'b': ['1980-01-01 00', '1980-01-02 00'],
}
opts.timestamp_parsers = ['%Y/%m/%d', ISO8601]
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.timestamp('s')),
('b', pa.timestamp('s'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': [datetime(1970, 1, 1), datetime(1970, 1, 2)],
'b': [datetime(1980, 1, 1), datetime(1980, 1, 2)],
}
def test_dates(self):
# Dates are inferred as date32 by default
rows = b"a,b\n1970-01-01,1970-01-02\n1971-01-01,1971-01-02\n"
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.date32()),
('b', pa.date32())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [date(1970, 1, 1), date(1971, 1, 1)],
'b': [date(1970, 1, 2), date(1971, 1, 2)],
}
# Can ask for date types explicitly
opts = ConvertOptions()
opts.column_types = {'a': pa.date32(), 'b': pa.date64()}
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.date32()),
('b', pa.date64())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [date(1970, 1, 1), date(1971, 1, 1)],
'b': [date(1970, 1, 2), date(1971, 1, 2)],
}
# Can ask for timestamp types explicitly
opts = ConvertOptions()
opts.column_types = {'a': pa.timestamp('s'), 'b': pa.timestamp('ms')}
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.timestamp('s')),
('b', pa.timestamp('ms'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': [datetime(1970, 1, 1), datetime(1971, 1, 1)],
'b': [datetime(1970, 1, 2), datetime(1971, 1, 2)],
}
def test_times(self):
# Times are inferred as time32[s] by default
from datetime import time
rows = b"a,b\n12:34:56,12:34:56.789\n23:59:59,23:59:59.999\n"
table = self.read_bytes(rows)
# Column 'b' has subseconds, so cannot be inferred as time32[s]
schema = pa.schema([('a', pa.time32('s')),
('b', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [time(12, 34, 56), time(23, 59, 59)],
'b': ["12:34:56.789", "23:59:59.999"],
}
# Can ask for time types explicitly
opts = ConvertOptions()
opts.column_types = {'a': pa.time64('us'), 'b': pa.time32('ms')}
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.time64('us')),
('b', pa.time32('ms'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': [time(12, 34, 56), time(23, 59, 59)],
'b': [time(12, 34, 56, 789000), time(23, 59, 59, 999000)],
}
def test_auto_dict_encode(self):
opts = ConvertOptions(auto_dict_encode=True)
rows = "a,b\nab,1\ncdé,2\ncdé,3\nab,4".encode()
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.dictionary(pa.int32(), pa.string())),
('b', pa.int64())])
expected = {
'a': ["ab", "cdé", "cdé", "ab"],
'b': [1, 2, 3, 4],
}
assert table.schema == schema
assert table.to_pydict() == expected
opts.auto_dict_max_cardinality = 2
table = self.read_bytes(rows, convert_options=opts)
assert table.schema == schema
assert table.to_pydict() == expected
# Cardinality above max => plain-encoded
opts.auto_dict_max_cardinality = 1
table = self.read_bytes(rows, convert_options=opts)
assert table.schema == pa.schema([('a', pa.string()),
('b', pa.int64())])
assert table.to_pydict() == expected
# With invalid UTF8, not checked
opts.auto_dict_max_cardinality = 50
opts.check_utf8 = False
rows = b"a,b\nab,1\ncd\xff,2\nab,3"
table = self.read_bytes(rows, convert_options=opts,
validate_full=False)
assert table.schema == schema
dict_values = table['a'].chunk(0).dictionary
assert len(dict_values) == 2
assert dict_values[0].as_py() == "ab"
assert dict_values[1].as_buffer() == b"cd\xff"
# With invalid UTF8, checked
opts.check_utf8 = True
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.dictionary(pa.int32(), pa.binary())),
('b', pa.int64())])
expected = {
'a': [b"ab", b"cd\xff", b"ab"],
'b': [1, 2, 3],
}
assert table.schema == schema
assert table.to_pydict() == expected
def test_custom_nulls(self):
# Infer nulls with custom values
opts = ConvertOptions(null_values=['Xxx', 'Zzz'])
rows = b"""a,b,c,d\nZzz,"Xxx",1,2\nXxx,#N/A,,Zzz\n"""
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.null()),
('b', pa.string()),
('c', pa.string()),
('d', pa.int64())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [None, None],
'b': ["Xxx", "#N/A"],
'c': ["1", ""],
'd': [2, None],
}
opts = ConvertOptions(null_values=['Xxx', 'Zzz'],
strings_can_be_null=True)
table = self.read_bytes(rows, convert_options=opts)
assert table.to_pydict() == {
'a': [None, None],
'b': [None, "#N/A"],
'c': ["1", ""],
'd': [2, None],
}
opts.quoted_strings_can_be_null = False
table = self.read_bytes(rows, convert_options=opts)
assert table.to_pydict() == {
'a': [None, None],
'b': ["Xxx", "#N/A"],
'c': ["1", ""],
'd': [2, None],
}
opts = ConvertOptions(null_values=[])
rows = b"a,b\n#N/A,\n"
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.string()),
('b', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'a': ["#N/A"],
'b': [""],
}
def test_custom_bools(self):
# Infer booleans with custom values
opts = ConvertOptions(true_values=['T', 'yes'],
false_values=['F', 'no'])
rows = (b"a,b,c\n"
b"True,T,t\n"
b"False,F,f\n"
b"True,yes,yes\n"
b"False,no,no\n"
b"N/A,N/A,N/A\n")
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.string()),
('b', pa.bool_()),
('c', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'a': ["True", "False", "True", "False", "N/A"],
'b': [True, False, True, False, None],
'c': ["t", "f", "yes", "no", "N/A"],
}
def test_column_types(self):
# Ask for specific column types in ConvertOptions
opts = ConvertOptions(column_types={'b': 'float32',
'c': 'string',
'd': 'boolean',
'e': pa.decimal128(11, 2),
'zz': 'null'})
rows = b"a,b,c,d,e\n1,2,3,true,1.0\n4,-5,6,false,0\n"
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.int64()),
('b', pa.float32()),
('c', pa.string()),
('d', pa.bool_()),
('e', pa.decimal128(11, 2))])
expected = {
'a': [1, 4],
'b': [2.0, -5.0],
'c': ["3", "6"],
'd': [True, False],
'e': [Decimal("1.00"), Decimal("0.00")]
}
assert table.schema == schema
assert table.to_pydict() == expected
# Pass column_types as schema
opts = ConvertOptions(
column_types=pa.schema([('b', pa.float32()),
('c', pa.string()),
('d', pa.bool_()),
('e', pa.decimal128(11, 2)),
('zz', pa.bool_())]))
table = self.read_bytes(rows, convert_options=opts)
assert table.schema == schema
assert table.to_pydict() == expected
# One of the columns in column_types fails converting
rows = b"a,b,c,d,e\n1,XXX,3,true,5\n4,-5,6,false,7\n"
with pytest.raises(pa.ArrowInvalid) as exc:
self.read_bytes(rows, convert_options=opts)
err = str(exc.value)
assert "In CSV column #1: " in err
assert "CSV conversion error to float: invalid value 'XXX'" in err
def test_column_types_dict(self):
# Ask for dict-encoded column types in ConvertOptions
column_types = [
('a', pa.dictionary(pa.int32(), pa.utf8())),
('b', pa.dictionary(pa.int32(), pa.int64())),
('c', pa.dictionary(pa.int32(), pa.decimal128(11, 2))),
('d', pa.dictionary(pa.int32(), pa.large_utf8()))]
opts = ConvertOptions(column_types=dict(column_types))
rows = (b"a,b,c,d\n"
b"abc,123456,1.0,zz\n"
b"defg,123456,0.5,xx\n"
b"abc,N/A,1.0,xx\n")
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema(column_types)
expected = {
'a': ["abc", "defg", "abc"],
'b': [123456, 123456, None],
'c': [Decimal("1.00"), Decimal("0.50"), Decimal("1.00")],
'd': ["zz", "xx", "xx"],
}
assert table.schema == schema
assert table.to_pydict() == expected
# Unsupported index type
column_types[0] = ('a', pa.dictionary(pa.int8(), pa.utf8()))
opts = ConvertOptions(column_types=dict(column_types))
with pytest.raises(NotImplementedError):
table = self.read_bytes(rows, convert_options=opts)
def test_column_types_with_column_names(self):
# When both `column_names` and `column_types` are given, names
# in `column_types` should refer to names in `column_names`
rows = b"a,b\nc,d\ne,f\n"
read_options = ReadOptions(column_names=['x', 'y'])
convert_options = ConvertOptions(column_types={'x': pa.binary()})
table = self.read_bytes(rows, read_options=read_options,
convert_options=convert_options)
schema = pa.schema([('x', pa.binary()),
('y', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'x': [b'a', b'c', b'e'],
'y': ['b', 'd', 'f'],
}
def test_no_ending_newline(self):
# No \n after last line
rows = b"a,b,c\n1,2,3\n4,5,6"
table = self.read_bytes(rows)
assert table.to_pydict() == {
'a': [1, 4],
'b': [2, 5],
'c': [3, 6],
}
def test_trivial(self):
# A bit pointless, but at least it shouldn't crash
rows = b",\n\n"
table = self.read_bytes(rows)
assert table.to_pydict() == {'': []}
def test_empty_lines(self):
rows = b"a,b\n\r1,2\r\n\r\n3,4\r\n"
table = self.read_bytes(rows)
assert table.to_pydict() == {
'a': [1, 3],
'b': [2, 4],
}
parse_options = ParseOptions(ignore_empty_lines=False)
table = self.read_bytes(rows, parse_options=parse_options)
assert table.to_pydict() == {
'a': [None, 1, None, 3],
'b': [None, 2, None, 4],
}
read_options = ReadOptions(skip_rows=2)
table = self.read_bytes(rows, parse_options=parse_options,
read_options=read_options)
assert table.to_pydict() == {
'1': [None, 3],
'2': [None, 4],
}
def test_invalid_csv(self):
# Various CSV errors
rows = b"a,b,c\n1,2\n4,5,6\n"
with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 2"):
self.read_bytes(rows)
rows = b"a,b,c\n1,2,3\n4"
with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 1"):
self.read_bytes(rows)
for rows in [b"", b"\n", b"\r\n", b"\r", b"\n\n"]:
with pytest.raises(pa.ArrowInvalid, match="Empty CSV file"):
self.read_bytes(rows)
def test_options_delimiter(self):
rows = b"a;b,c\nde,fg;eh\n"
table = self.read_bytes(rows)
assert table.to_pydict() == {
'a;b': ['de'],
'c': ['fg;eh'],
}
opts = ParseOptions(delimiter=';')
table = self.read_bytes(rows, parse_options=opts)
assert table.to_pydict() == {
'a': ['de,fg'],
'b,c': ['eh'],
}
def test_small_random_csv(self):
csv, expected = make_random_csv(num_cols=2, num_rows=10)
table = self.read_bytes(csv)
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
def test_stress_block_sizes(self):
# Test a number of small block sizes to stress block stitching
csv_base, expected = make_random_csv(num_cols=2, num_rows=500)
block_sizes = [11, 12, 13, 17, 37, 111]
csvs = [csv_base, csv_base.rstrip(b'\r\n')]
for csv in csvs:
for block_size in block_sizes:
read_options = ReadOptions(block_size=block_size)
table = self.read_bytes(csv, read_options=read_options)
assert table.schema == expected.schema
if not table.equals(expected):
# Better error output
assert table.to_pydict() == expected.to_pydict()
def test_stress_convert_options_blowup(self):
# ARROW-6481: A convert_options with a very large number of columns
# should not blow memory and CPU time.
try:
clock = time.thread_time
except AttributeError:
clock = time.time
num_columns = 10000
col_names = ["K{}".format(i) for i in range(num_columns)]
csv = make_empty_csv(col_names)
t1 = clock()
convert_options = ConvertOptions(
column_types={k: pa.string() for k in col_names[::2]})
table = self.read_bytes(csv, convert_options=convert_options)
dt = clock() - t1
# Check that processing time didn't blow up.
# This is a conservative check (it takes less than 300 ms
# in debug mode on my local machine).
assert dt <= 10.0
# Check result
assert table.num_columns == num_columns
assert table.num_rows == 0
assert table.column_names == col_names
def test_cancellation(self):
if (threading.current_thread().ident !=
threading.main_thread().ident):
pytest.skip("test only works from main Python thread")
# Skips test if not available
raise_signal = util.get_raise_signal()
signum = signal.SIGINT
def signal_from_thread():
# Give our workload a chance to start up
time.sleep(0.2)
raise_signal(signum)
# We start with a small CSV reading workload and increase its size
# until it's large enough to get an interruption during it, even in
# release mode on fast machines.
last_duration = 0.0
workload_size = 100_000
attempts = 0
while last_duration < 5.0 and attempts < 10:
print("workload size:", workload_size)
large_csv = b"a,b,c\n" + b"1,2,3\n" * workload_size
exc_info = None
try:
# We use a signal fd to reliably ensure that the signal
# has been delivered to Python, regardless of how exactly
# it was caught.
with util.signal_wakeup_fd() as sigfd:
try:
t = threading.Thread(target=signal_from_thread)
t.start()
t1 = time.time()
try:
self.read_bytes(large_csv)
except KeyboardInterrupt as e:
exc_info = e
last_duration = time.time() - t1
finally:
# Wait for signal to arrive if it didn't already,
# to avoid getting a KeyboardInterrupt after the
# `except` block below.
select.select([sigfd], [], [sigfd], 10.0)
except KeyboardInterrupt:
# KeyboardInterrupt didn't interrupt `read_bytes` above.
pass
if exc_info is not None:
# We managed to get `self.read_bytes` interrupted, see if it
# was actually interrupted inside Arrow C++ or in the Python
# scaffolding.
if exc_info.__context__ is not None:
# Interrupted inside Arrow C++, we're satisfied now
break
# Increase workload size to get a better chance
workload_size = workload_size * 3
if exc_info is None:
pytest.fail("Failed to get an interruption during CSV reading")
# Interruption should have arrived timely
assert last_duration <= 1.0
e = exc_info.__context__
assert isinstance(e, pa.ArrowCancelled)
assert e.signum == signum
def test_cancellation_disabled(self):
# ARROW-12622: reader would segfault when the cancelling signal
# handler was not enabled (e.g. if disabled, or if not on the
# main thread)
t = threading.Thread(
target=lambda: self.read_bytes(b"f64\n0.1"))
t.start()
t.join()
class TestSerialCSVTableRead(BaseCSVTableRead):
@property
def use_threads(self):
return False
class TestThreadedCSVTableRead(BaseCSVTableRead):
@property
def use_threads(self):
return True
class BaseStreamingCSVRead(BaseTestCSV):
def open_csv(self, csv, *args, **kwargs):
"""
Reads the CSV file into memory using pyarrow's open_csv
csv The CSV bytes
args Positional arguments to be forwarded to pyarrow's open_csv
kwargs Keyword arguments to be forwarded to pyarrow's open_csv
"""
read_options = kwargs.setdefault('read_options', ReadOptions())
read_options.use_threads = self.use_threads
return open_csv(csv, *args, **kwargs)
def open_bytes(self, b, **kwargs):
return self.open_csv(pa.py_buffer(b), **kwargs)
def check_reader(self, reader, expected_schema, expected_data):
assert reader.schema == expected_schema
batches = list(reader)
assert len(batches) == len(expected_data)
for batch, expected_batch in zip(batches, expected_data):
batch.validate(full=True)
assert batch.schema == expected_schema
assert batch.to_pydict() == expected_batch
def read_bytes(self, b, **kwargs):
return self.open_bytes(b, **kwargs).read_all()
def test_file_object(self):
data = b"a,b\n1,2\n3,4\n"
expected_data = {'a': [1, 3], 'b': [2, 4]}
bio = io.BytesIO(data)
reader = self.open_csv(bio)
expected_schema = pa.schema([('a', pa.int64()),
('b', pa.int64())])
self.check_reader(reader, expected_schema, [expected_data])
def test_header(self):
rows = b"abc,def,gh\n"
reader = self.open_bytes(rows)
expected_schema = pa.schema([('abc', pa.null()),
('def', pa.null()),
('gh', pa.null())])
self.check_reader(reader, expected_schema, [])
def test_inference(self):
# Inference is done on first block
rows = b"a,b\n123,456\nabc,de\xff\ngh,ij\n"
expected_schema = pa.schema([('a', pa.string()),
('b', pa.binary())])
read_options = ReadOptions()
read_options.block_size = len(rows)
reader = self.open_bytes(rows, read_options=read_options)
self.check_reader(reader, expected_schema,
[{'a': ['123', 'abc', 'gh'],
'b': [b'456', b'de\xff', b'ij']}])
read_options.block_size = len(rows) - 1
reader = self.open_bytes(rows, read_options=read_options)
self.check_reader(reader, expected_schema,
[{'a': ['123', 'abc'],
'b': [b'456', b'de\xff']},
{'a': ['gh'],
'b': [b'ij']}])
def test_inference_failure(self):
# Inference on first block, then conversion failure on second block
rows = b"a,b\n123,456\nabc,de\xff\ngh,ij\n"
read_options = ReadOptions()
read_options.block_size = len(rows) - 7
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('a', pa.int64()),
('b', pa.int64())])
assert reader.schema == expected_schema
assert reader.read_next_batch().to_pydict() == {
'a': [123], 'b': [456]
}
# Second block
with pytest.raises(ValueError,
match="CSV conversion error to int64"):
reader.read_next_batch()
# EOF
with pytest.raises(StopIteration):
reader.read_next_batch()
def test_invalid_csv(self):
# CSV errors on first block
rows = b"a,b\n1,2,3\n4,5\n6,7\n"
read_options = ReadOptions()
read_options.block_size = 10
with pytest.raises(pa.ArrowInvalid,
match="Expected 2 columns, got 3"):
reader = self.open_bytes(
rows, read_options=read_options)
# CSV errors on second block
rows = b"a,b\n1,2\n3,4,5\n6,7\n"
read_options.block_size = 8
reader = self.open_bytes(rows, read_options=read_options)
assert reader.read_next_batch().to_pydict() == {'a': [1], 'b': [2]}
with pytest.raises(pa.ArrowInvalid,
match="Expected 2 columns, got 3"):
reader.read_next_batch()
# Cannot continue after a parse error
with pytest.raises(StopIteration):
reader.read_next_batch()
def test_options_delimiter(self):
rows = b"a;b,c\nde,fg;eh\n"
reader = self.open_bytes(rows)
expected_schema = pa.schema([('a;b', pa.string()),
('c', pa.string())])
self.check_reader(reader, expected_schema,
[{'a;b': ['de'],
'c': ['fg;eh']}])
opts = ParseOptions(delimiter=';')
reader = self.open_bytes(rows, parse_options=opts)
expected_schema = pa.schema([('a', pa.string()),
('b,c', pa.string())])
self.check_reader(reader, expected_schema,
[{'a': ['de,fg'],
'b,c': ['eh']}])
def test_no_ending_newline(self):
# No \n after last line
rows = b"a,b,c\n1,2,3\n4,5,6"
reader = self.open_bytes(rows)
expected_schema = pa.schema([('a', pa.int64()),
('b', pa.int64()),
('c', pa.int64())])
self.check_reader(reader, expected_schema,
[{'a': [1, 4],
'b': [2, 5],
'c': [3, 6]}])
def test_empty_file(self):
with pytest.raises(ValueError, match="Empty CSV file"):
self.open_bytes(b"")
def test_column_options(self):
# With column_names
rows = b"1,2,3\n4,5,6"
read_options = ReadOptions()
read_options.column_names = ['d', 'e', 'f']
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('d', pa.int64()),
('e', pa.int64()),
('f', pa.int64())])
self.check_reader(reader, expected_schema,
[{'d': [1, 4],
'e': [2, 5],
'f': [3, 6]}])
# With include_columns
convert_options = ConvertOptions()
convert_options.include_columns = ['f', 'e']
reader = self.open_bytes(rows, read_options=read_options,
convert_options=convert_options)
expected_schema = pa.schema([('f', pa.int64()),
('e', pa.int64())])
self.check_reader(reader, expected_schema,
[{'e': [2, 5],
'f': [3, 6]}])
# With column_types
convert_options.column_types = {'e': pa.string()}
reader = self.open_bytes(rows, read_options=read_options,
convert_options=convert_options)
expected_schema = pa.schema([('f', pa.int64()),
('e', pa.string())])
self.check_reader(reader, expected_schema,
[{'e': ["2", "5"],
'f': [3, 6]}])
# Missing columns in include_columns
convert_options.include_columns = ['g', 'f', 'e']
with pytest.raises(
KeyError,
match="Column 'g' in include_columns does not exist"):
reader = self.open_bytes(rows, read_options=read_options,
convert_options=convert_options)
convert_options.include_missing_columns = True
reader = self.open_bytes(rows, read_options=read_options,
convert_options=convert_options)
expected_schema = pa.schema([('g', pa.null()),
('f', pa.int64()),
('e', pa.string())])
self.check_reader(reader, expected_schema,
[{'g': [None, None],
'e': ["2", "5"],
'f': [3, 6]}])
convert_options.column_types = {'e': pa.string(), 'g': pa.float64()}
reader = self.open_bytes(rows, read_options=read_options,
convert_options=convert_options)
expected_schema = pa.schema([('g', pa.float64()),
('f', pa.int64()),
('e', pa.string())])
self.check_reader(reader, expected_schema,
[{'g': [None, None],
'e': ["2", "5"],
'f': [3, 6]}])
def test_encoding(self):
# latin-1 (invalid utf-8)
rows = b"a,b\nun,\xe9l\xe9phant"
read_options = ReadOptions()
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('a', pa.string()),
('b', pa.binary())])
self.check_reader(reader, expected_schema,
[{'a': ["un"],
'b': [b"\xe9l\xe9phant"]}])
read_options.encoding = 'latin1'
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('a', pa.string()),
('b', pa.string())])
self.check_reader(reader, expected_schema,
[{'a': ["un"],
'b': ["éléphant"]}])
# utf-16
rows = (b'\xff\xfea\x00,\x00b\x00\n\x00u\x00n\x00,'
b'\x00\xe9\x00l\x00\xe9\x00p\x00h\x00a\x00n\x00t\x00')
read_options.encoding = 'utf16'
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('a', pa.string()),
('b', pa.string())])
self.check_reader(reader, expected_schema,
[{'a': ["un"],
'b': ["éléphant"]}])
def test_small_random_csv(self):
csv, expected = make_random_csv(num_cols=2, num_rows=10)
reader = self.open_bytes(csv)
table = reader.read_all()
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
def test_stress_block_sizes(self):
# Test a number of small block sizes to stress block stitching
csv_base, expected = make_random_csv(num_cols=2, num_rows=500)
block_sizes = [19, 21, 23, 26, 37, 111]
csvs = [csv_base, csv_base.rstrip(b'\r\n')]
for csv in csvs:
for block_size in block_sizes:
# Need at least two lines for type inference
assert csv[:block_size].count(b'\n') >= 2
read_options = ReadOptions(block_size=block_size)
reader = self.open_bytes(
csv, read_options=read_options)
table = reader.read_all()
assert table.schema == expected.schema
if not table.equals(expected):
# Better error output
assert table.to_pydict() == expected.to_pydict()
def test_batch_lifetime(self):
gc.collect()
old_allocated = pa.total_allocated_bytes()
# Memory occupation should not grow with CSV file size
def check_one_batch(reader, expected):
batch = reader.read_next_batch()
assert batch.to_pydict() == expected
rows = b"10,11\n12,13\n14,15\n16,17\n"
read_options = ReadOptions()
read_options.column_names = ['a', 'b']
read_options.block_size = 6
reader = self.open_bytes(rows, read_options=read_options)
check_one_batch(reader, {'a': [10], 'b': [11]})
allocated_after_first_batch = pa.total_allocated_bytes()
check_one_batch(reader, {'a': [12], 'b': [13]})
assert pa.total_allocated_bytes() <= allocated_after_first_batch
check_one_batch(reader, {'a': [14], 'b': [15]})
assert pa.total_allocated_bytes() <= allocated_after_first_batch
check_one_batch(reader, {'a': [16], 'b': [17]})
assert pa.total_allocated_bytes() <= allocated_after_first_batch
with pytest.raises(StopIteration):
reader.read_next_batch()
assert pa.total_allocated_bytes() == old_allocated
reader = None
assert pa.total_allocated_bytes() == old_allocated
def test_header_skip_rows(self):
super().test_header_skip_rows()
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
# Skipping all rows immediately results in end of iteration
opts = ReadOptions()
opts.skip_rows = 4
opts.column_names = ['ab', 'cd']
reader = self.open_bytes(rows, read_options=opts)
with pytest.raises(StopIteration):
assert reader.read_next_batch()
def test_skip_rows_after_names(self):
super().test_skip_rows_after_names()
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
# Skipping all rows immediately results in end of iteration
opts = ReadOptions()
opts.skip_rows_after_names = 3
reader = self.open_bytes(rows, read_options=opts)
with pytest.raises(StopIteration):
assert reader.read_next_batch()
# Skipping beyond all rows immediately results in end of iteration
opts.skip_rows_after_names = 99999
reader = self.open_bytes(rows, read_options=opts)
with pytest.raises(StopIteration):
assert reader.read_next_batch()
class TestSerialStreamingCSVRead(BaseStreamingCSVRead, unittest.TestCase):
@property
def use_threads(self):
return False
class TestThreadedStreamingCSVRead(BaseStreamingCSVRead, unittest.TestCase):
@property
def use_threads(self):
return True
class BaseTestCompressedCSVRead:
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='arrow-csv-test-')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def read_csv(self, csv_path):
try:
return read_csv(csv_path)
except pa.ArrowNotImplementedError as e:
pytest.skip(str(e))
def test_random_csv(self):
csv, expected = make_random_csv(num_cols=2, num_rows=100)
csv_path = os.path.join(self.tmpdir, self.csv_filename)
self.write_file(csv_path, csv)
table = self.read_csv(csv_path)
table.validate(full=True)
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
class TestGZipCSVRead(BaseTestCompressedCSVRead, unittest.TestCase):
csv_filename = "compressed.csv.gz"
def write_file(self, path, contents):
with gzip.open(path, 'wb', 3) as f:
f.write(contents)
def test_concatenated(self):
# ARROW-5974
csv_path = os.path.join(self.tmpdir, self.csv_filename)
with gzip.open(csv_path, 'wb', 3) as f:
f.write(b"ab,cd\nef,gh\n")
with gzip.open(csv_path, 'ab', 3) as f:
f.write(b"ij,kl\nmn,op\n")
table = self.read_csv(csv_path)
assert table.to_pydict() == {
'ab': ['ef', 'ij', 'mn'],
'cd': ['gh', 'kl', 'op'],
}
class TestBZ2CSVRead(BaseTestCompressedCSVRead, unittest.TestCase):
csv_filename = "compressed.csv.bz2"
def write_file(self, path, contents):
with bz2.BZ2File(path, 'w') as f:
f.write(contents)
def test_read_csv_does_not_close_passed_file_handles():
# ARROW-4823
buf = io.BytesIO(b"a,b,c\n1,2,3\n4,5,6")
read_csv(buf)
assert not buf.closed
def test_write_read_round_trip():
t = pa.Table.from_arrays([[1, 2, 3], ["a", "b", "c"]], ["c1", "c2"])
record_batch = t.to_batches(max_chunksize=4)[0]
for data in [t, record_batch]:
# Test with header
buf = io.BytesIO()
write_csv(data, buf, WriteOptions(include_header=True))
buf.seek(0)
assert t == read_csv(buf)
# Test without header
buf = io.BytesIO()
write_csv(data, buf, WriteOptions(include_header=False))
buf.seek(0)
read_options = ReadOptions(column_names=t.column_names)
assert t == read_csv(buf, read_options=read_options)
# Test with writer
for read_options, parse_options, write_options in [
(None, None, WriteOptions(include_header=True)),
(ReadOptions(column_names=t.column_names), None,
WriteOptions(include_header=False)),
(None, ParseOptions(delimiter='|'),
WriteOptions(include_header=True, delimiter='|')),
(ReadOptions(column_names=t.column_names),
ParseOptions(delimiter='\t'),
WriteOptions(include_header=False, delimiter='\t')),
]:
buf = io.BytesIO()
with CSVWriter(buf, t.schema, write_options=write_options) as writer:
writer.write_table(t)
buf.seek(0)
assert t == read_csv(buf, read_options=read_options,
parse_options=parse_options)
buf = io.BytesIO()
with CSVWriter(buf, t.schema, write_options=write_options) as writer:
for batch in t.to_batches(max_chunksize=1):
writer.write_batch(batch)
buf.seek(0)
assert t == read_csv(buf, read_options=read_options,
parse_options=parse_options)
def test_read_csv_reference_cycle():
# ARROW-13187
def inner():
buf = io.BytesIO(b"a,b,c\n1,2,3\n4,5,6")
table = read_csv(buf)
return weakref.ref(table)
with util.disabled_gc():
wr = inner()
assert wr() is None
|
scdlbot.py | # -*- coding: utf-8 -*-
"""Main module."""
import configparser
import gc
import pathlib
import random
import shelve
import shutil
from datetime import datetime
from multiprocessing import Process, Queue
from queue import Empty
from subprocess import PIPE, TimeoutExpired
from urllib.parse import urljoin
from uuid import uuid4
import ffmpeg
from boltons.urlutils import find_all_links, URL
from mutagen.id3 import ID3
from mutagen.mp3 import EasyMP3 as MP3
# from pydub import AudioSegment
from pyshorteners import Shortener
from telegram import Message, Chat, ChatMember, MessageEntity, ChatAction, InlineKeyboardMarkup, InlineKeyboardButton, \
InlineQueryResultAudio
from telegram.error import (TelegramError, Unauthorized, BadRequest,
TimedOut, ChatMigrated, NetworkError)
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, InlineQueryHandler, CallbackQueryHandler
from telegram.ext.dispatcher import run_async
from scdlbot.utils import *
logger = logging.getLogger(__name__)
class ScdlBot:
def __init__(self, tg_bot_token, botan_token=None, google_shortener_api_key=None, proxy=None,
sc_auth_token=None, store_chat_id=None, no_flood_chat_ids=None,
alert_chat_ids=None, dl_dir="/tmp/scdlbot", dl_timeout=300,
max_convert_file_size=80_000_000, chat_storage_file="/tmp/scdlbotdata", app_url=None,
serve_audio=False, cookies_file=None, source_ips=None):
self.SERVE_AUDIO = serve_audio
if self.SERVE_AUDIO:
self.MAX_TG_FILE_SIZE = 19_000_000
else:
self.MAX_TG_FILE_SIZE = 45_000_000
self.SITES = {
"sc": "soundcloud",
"scapi": "api.soundcloud",
"bc": "bandcamp",
"yt": "youtu",
}
self.APP_URL = app_url
self.DL_TIMEOUT = dl_timeout
self.MAX_CONVERT_FILE_SIZE = max_convert_file_size
self.HELP_TEXT = get_response_text('help.tg.md')
self.SETTINGS_TEXT = get_response_text('settings.tg.md')
self.DL_TIMEOUT_TEXT = get_response_text('dl_timeout.txt').format(self.DL_TIMEOUT // 60)
self.WAIT_BIT_TEXT = [get_response_text('wait_bit.txt'), get_response_text('wait_beat.txt'),
get_response_text('wait_beet.txt')]
self.NO_AUDIO_TEXT = get_response_text('no_audio.txt')
self.NO_URLS_TEXT = get_response_text('no_urls.txt')
self.OLG_MSG_TEXT = get_response_text('old_msg.txt')
self.REGION_RESTRICTION_TEXT = get_response_text('region_restriction.txt')
self.DIRECT_RESTRICTION_TEXT = get_response_text('direct_restriction.txt')
self.LIVE_RESTRICTION_TEXT = get_response_text('live_restriction.txt')
# self.chat_storage = {}
self.chat_storage = shelve.open(chat_storage_file, writeback=True)
for chat_id in no_flood_chat_ids:
self.init_chat(chat_id=chat_id, chat_type=Chat.PRIVATE if chat_id > 0 else Chat.SUPERGROUP, flood="no")
self.ALERT_CHAT_IDS = set(alert_chat_ids) if alert_chat_ids else set()
self.STORE_CHAT_ID = store_chat_id
self.DL_DIR = dl_dir
self.COOKIES_DOWNLOAD_FILE = "/tmp/scdlbot_cookies.txt"
self.botan_token = botan_token if botan_token else None
self.shortener = Shortener('Google', api_key=google_shortener_api_key) if google_shortener_api_key else None
self.proxy = proxy
# https://yandex.com/support/music-app-ios/search-and-listen/listening-abroad.html
self.cookies_file = cookies_file
self.source_ips = source_ips
config = configparser.ConfigParser()
config['scdl'] = {}
config['scdl']['path'] = self.DL_DIR
if sc_auth_token:
config['scdl']['auth_token'] = sc_auth_token
config_dir = os.path.join(os.path.expanduser('~'), '.config', 'scdl')
config_path = os.path.join(config_dir, 'scdl.cfg')
os.makedirs(config_dir, exist_ok=True)
with open(config_path, 'w') as config_file:
config.write(config_file)
self.updater = Updater(token=tg_bot_token)
dispatcher = self.updater.dispatcher
start_command_handler = CommandHandler('start', self.help_command_callback)
dispatcher.add_handler(start_command_handler)
help_command_handler = CommandHandler('help', self.help_command_callback)
dispatcher.add_handler(help_command_handler)
settings_command_handler = CommandHandler('settings', self.settings_command_callback)
dispatcher.add_handler(settings_command_handler)
dl_command_handler = CommandHandler('dl', self.common_command_callback, filters=~ Filters.forwarded,
pass_args=True)
dispatcher.add_handler(dl_command_handler)
link_command_handler = CommandHandler('link', self.common_command_callback, filters=~ Filters.forwarded,
pass_args=True)
dispatcher.add_handler(link_command_handler)
message_with_links_handler = MessageHandler(Filters.text & (Filters.entity(MessageEntity.URL) |
Filters.entity(MessageEntity.TEXT_LINK)),
self.common_command_callback)
dispatcher.add_handler(message_with_links_handler)
button_query_handler = CallbackQueryHandler(self.button_query_callback)
dispatcher.add_handler(button_query_handler)
inline_query_handler = InlineQueryHandler(self.inline_query_callback)
dispatcher.add_handler(inline_query_handler)
unknown_handler = MessageHandler(Filters.command, self.unknown_command_callback)
dispatcher.add_handler(unknown_handler)
dispatcher.add_error_handler(self.error_callback)
self.bot_username = self.updater.bot.get_me().username
self.RANT_TEXT_PRIVATE = "Read /help to learn how to use me"
self.RANT_TEXT_PUBLIC = "[Start me in PM to read help and learn how to use me](t.me/{}?start=1)".format(
self.bot_username)
def start(self, use_webhook=False, webhook_port=None, cert_file=None, cert_key_file=None,
url_path="scdlbot", webhook_host="0.0.0.0"):
if use_webhook:
self.updater.start_webhook(listen=webhook_host,
port=webhook_port,
url_path=url_path)
# cert=cert_file if cert_file else None,
# key=cert_key_file if cert_key_file else None,
# webhook_url=urljoin(app_url, url_path))
self.updater.bot.set_webhook(url=urljoin(self.APP_URL, url_path),
certificate=open(cert_file, 'rb') if cert_file else None)
else:
self.updater.start_polling()
logger.warning("Bot started")
self.updater.idle()
def unknown_command_callback(self, bot, update):
pass
# bot.send_message(chat_id=update.message.chat_id, text="Unknown command")
def error_callback(self, bot, update, error):
try:
raise error
except Unauthorized:
# remove update.message.chat_id from conversation list
logger.debug('Update {} caused Unauthorized error: {}'.format(update, error))
except BadRequest:
# handle malformed requests - read more below!
logger.debug('Update {} caused BadRequest error: {}'.format(update, error))
except TimedOut:
# handle slow connection problems
logger.debug('Update {} caused TimedOut error: {}'.format(update, error))
except NetworkError:
# handle other connection problems
logger.debug('Update {} caused NetworkError: {}'.format(update, error))
except ChatMigrated as e:
# the chat_id of a group has changed, use e.new_chat_id instead
logger.debug('Update {} caused ChatMigrated error: {}'.format(update, error))
except TelegramError:
# handle all other telegram related errors
logger.debug('Update {} caused TelegramError: {}'.format(update, error))
def init_chat(self, message=None, chat_id=None, chat_type=None, flood="yes"):
if message:
chat_id = str(message.chat_id)
chat_type = message.chat.type
else:
chat_id = str(chat_id)
if chat_id not in self.chat_storage:
self.chat_storage[chat_id] = {}
if "settings" not in self.chat_storage[chat_id]:
self.chat_storage[chat_id]["settings"] = {}
if "mode" not in self.chat_storage[chat_id]["settings"]:
if chat_type == Chat.PRIVATE:
self.chat_storage[chat_id]["settings"]["mode"] = "dl"
else:
self.chat_storage[chat_id]["settings"]["mode"] = "ask"
if "flood" not in self.chat_storage[chat_id]["settings"]:
self.chat_storage[chat_id]["settings"]["flood"] = flood
if "rant_msg_ids" not in self.chat_storage[chat_id]["settings"]:
self.chat_storage[chat_id]["settings"]["rant_msg_ids"] = []
self.chat_storage.sync()
# logger.debug("Current chat_storage: %r", self.chat_storage)
def cleanup_chat(self, chat_id):
for msg_id in self.chat_storage[str(chat_id)]:
if msg_id != "settings":
timedelta = datetime.now() - self.chat_storage[str(chat_id)][msg_id]["message"].date
if timedelta.days > 0:
self.chat_storage[str(chat_id)].pop(msg_id)
self.chat_storage.sync()
@run_async
def log_and_track(self, event_name, message=None):
logger.info("Event: %s", event_name)
if message:
# TODO: add to local db
if self.botan_token:
return botan_track(self.botan_token, message, event_name)
else:
return False
def rant_and_cleanup(self, bot, chat_id, rant_text, reply_to_message_id=None):
rant_msg = bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=rant_text, parse_mode='Markdown', disable_web_page_preview=True)
flood = self.chat_storage[str(chat_id)]["settings"]["flood"]
if flood == "no":
for rant_msg_id in self.chat_storage[str(chat_id)]["settings"]["rant_msg_ids"]:
try:
bot.delete_message(chat_id=chat_id, message_id=rant_msg_id)
except:
pass
self.chat_storage[str(chat_id)]["settings"]["rant_msg_ids"].remove(rant_msg_id)
self.chat_storage[str(chat_id)]["settings"]["rant_msg_ids"].append(rant_msg.message_id)
self.chat_storage.sync()
def help_command_callback(self, bot, update):
self.init_chat(update.message)
event_name = "help"
entities = update.message.parse_entities(types=[MessageEntity.BOT_COMMAND])
for entity_value in entities.values():
event_name = entity_value.replace("/", "").replace("@{}".format(self.bot_username), "")
break
self.log_and_track(event_name, update.message)
chat_id = update.message.chat_id
chat_type = update.message.chat.type
reply_to_message_id = update.message.message_id
flood = self.chat_storage[str(chat_id)]["settings"]["flood"]
if chat_type != Chat.PRIVATE and flood == "no":
self.rant_and_cleanup(bot, chat_id, self.RANT_TEXT_PUBLIC, reply_to_message_id=reply_to_message_id)
else:
bot.send_message(chat_id=chat_id, text=self.HELP_TEXT,
parse_mode='Markdown', disable_web_page_preview=True)
def get_wait_text(self):
return random.choice(self.WAIT_BIT_TEXT)
def get_settings_inline_keyboard(self, chat_id):
mode = self.chat_storage[str(chat_id)]["settings"]["mode"]
flood = self.chat_storage[str(chat_id)]["settings"]["flood"]
emoji_yes = "✅"
emoji_no = "❌"
button_dl = InlineKeyboardButton(text=" ".join([emoji_yes if mode == "dl" else emoji_no, "Download"]),
callback_data=" ".join(["settings", "dl"]))
button_link = InlineKeyboardButton(text=" ".join([emoji_yes if mode == "link" else emoji_no, "Links"]),
callback_data=" ".join(["settings", "link"]))
button_ask = InlineKeyboardButton(text=" ".join([emoji_yes if mode == "ask" else emoji_no, "Ask"]),
callback_data=" ".join(["settings", "ask"]))
button_flood = InlineKeyboardButton(text=" ".join([emoji_yes if flood == "yes" else emoji_no, "Captions"]),
callback_data=" ".join(["settings", "flood"]))
button_close = InlineKeyboardButton(text=" ".join([emoji_no, "Close settings"]),
callback_data=" ".join(["settings", "close"]))
inline_keyboard = InlineKeyboardMarkup([[button_dl, button_link, button_ask], [button_flood, button_close]])
return inline_keyboard
def settings_command_callback(self, bot, update):
self.init_chat(update.message)
self.log_and_track("settings")
chat_id = update.message.chat_id
bot.send_message(chat_id=chat_id, parse_mode='Markdown',
reply_markup=self.get_settings_inline_keyboard(chat_id),
text=self.SETTINGS_TEXT)
def common_command_callback(self, bot, update, args=None):
self.init_chat(update.message)
chat_id = update.message.chat_id
chat_type = update.message.chat.type
reply_to_message_id = update.message.message_id
entities = update.message.parse_entities(types=[MessageEntity.BOT_COMMAND])
if not entities:
command_passed = False
# if no command then it is just a message and use default mode
mode = self.chat_storage[str(chat_id)]["settings"]["mode"]
else:
command_passed = True
# try to determine mode from command
mode = None
for entity_value in entities.values():
mode = entity_value.replace("/", "").replace("@{}".format(self.bot_username), "")
break
if not mode:
mode = "dl"
if command_passed and not args:
rant_text = self.RANT_TEXT_PRIVATE if chat_type == Chat.PRIVATE else self.RANT_TEXT_PUBLIC
rant_text += "\nYou can simply send message with links (to download) OR command as `/{} <links>`.".format(
mode)
self.rant_and_cleanup(bot, chat_id, rant_text, reply_to_message_id=reply_to_message_id)
return
# apologize and send TYPING: always in PM and only when it's command in non-PM
apologize = chat_type == Chat.PRIVATE or command_passed
if apologize:
bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)
source_ip = None
if self.source_ips:
source_ip = random.choice(self.source_ips)
# TODO find working IP?
urls = self.prepare_urls(msg_or_text=update.message,
direct_urls=(mode == "link"),
source_ip=source_ip)
logger.debug(urls)
if not urls:
if apologize:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.NO_URLS_TEXT, parse_mode='Markdown')
else:
event_name = ("{}_cmd".format(mode)) if command_passed else ("{}_msg".format(mode))
self.log_and_track(event_name, update.message)
if mode == "dl":
wait_message = bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
parse_mode='Markdown', text=get_italic(self.get_wait_text()))
for url in urls:
self.download_url_and_send(bot, url, urls[url], chat_id=chat_id,
reply_to_message_id=reply_to_message_id,
wait_message_id=wait_message.message_id,
source_ip=source_ip)
elif mode == "link":
wait_message = bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
parse_mode='Markdown', text=get_italic(self.get_wait_text()))
link_text = self.get_link_text(urls)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
parse_mode='Markdown', disable_web_page_preview=True,
text=link_text if link_text else self.NO_URLS_TEXT)
bot.delete_message(chat_id=chat_id, message_id=wait_message.message_id)
elif mode == "ask":
# ask: always in PM and only if good urls exist in non-PM
if chat_type == Chat.PRIVATE or "http" in " ".join(urls.values()):
orig_msg_id = str(reply_to_message_id)
self.chat_storage[str(chat_id)][orig_msg_id] = {"message": update.message, "urls": urls,
"source_ip": source_ip}
question = "🎶 links found, what to do?"
button_dl = InlineKeyboardButton(text="✅ Download", callback_data=" ".join([orig_msg_id, "dl"]))
button_link = InlineKeyboardButton(text="❇️ Links",
callback_data=" ".join([orig_msg_id, "link"]))
button_cancel = InlineKeyboardButton(text="❎", callback_data=" ".join([orig_msg_id, "nodl"]))
inline_keyboard = InlineKeyboardMarkup([[button_dl, button_link, button_cancel]])
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
reply_markup=inline_keyboard, text=question)
self.cleanup_chat(chat_id)
def button_query_callback(self, bot, update):
btn_msg = update.callback_query.message
self.init_chat(btn_msg)
user_id = update.callback_query.from_user.id
btn_msg_id = btn_msg.message_id
chat = btn_msg.chat
chat_id = chat.id
chat_type = chat.type
orig_msg_id, action = update.callback_query.data.split()
if orig_msg_id == "settings":
if chat_type != Chat.PRIVATE:
chat_member_status = chat.get_member(user_id).status
if chat_member_status not in [ChatMember.ADMINISTRATOR,
ChatMember.CREATOR] and user_id not in self.ALERT_CHAT_IDS:
self.log_and_track("settings_fail")
update.callback_query.answer(text="You're not chat admin")
return
self.log_and_track("settings_{}".format(action), btn_msg)
if action == "close":
bot.delete_message(chat_id, btn_msg_id)
else:
setting_changed = False
if action in ["dl", "link", "ask"]:
current_setting = self.chat_storage[str(chat_id)]["settings"]["mode"]
if action != current_setting:
setting_changed = True
self.chat_storage[str(chat_id)]["settings"]["mode"] = action
elif action in ["flood"]:
current_setting = self.chat_storage[str(chat_id)]["settings"]["flood"]
setting_changed = True
self.chat_storage[str(chat_id)]["settings"][action] = "no" if current_setting == "yes" else "yes"
if setting_changed:
self.chat_storage.sync()
update.callback_query.answer(text="Settings changed")
update.callback_query.edit_message_reply_markup(parse_mode='Markdown',
reply_markup=self.get_settings_inline_keyboard(
chat_id))
else:
update.callback_query.answer(text="Settings not changed")
elif orig_msg_id in self.chat_storage[str(chat_id)]:
msg_from_storage = self.chat_storage[str(chat_id)].pop(orig_msg_id)
orig_msg = msg_from_storage["message"]
urls = msg_from_storage["urls"]
source_ip = msg_from_storage["source_ip"]
self.log_and_track("{}_msg".format(action), orig_msg)
if action == "dl":
update.callback_query.answer(text=self.get_wait_text())
wait_message = update.callback_query.edit_message_text(parse_mode='Markdown',
text=get_italic(self.get_wait_text()))
for url in urls:
self.download_url_and_send(bot, url, urls[url], chat_id=chat_id,
reply_to_message_id=orig_msg_id,
wait_message_id=wait_message.message_id,
source_ip=source_ip)
elif action == "link":
update.callback_query.answer(text=self.get_wait_text())
wait_message = update.callback_query.edit_message_text(parse_mode='Markdown',
text=get_italic(self.get_wait_text()))
urls = self.prepare_urls(urls.keys(), direct_urls=True, source_ip=source_ip)
link_text = self.get_link_text(urls)
bot.send_message(chat_id=chat_id, reply_to_message_id=orig_msg_id,
parse_mode='Markdown', disable_web_page_preview=True,
text=link_text if link_text else self.NO_URLS_TEXT)
bot.delete_message(chat_id=chat_id, message_id=wait_message.message_id)
elif action == "nodl":
bot.delete_message(chat_id=chat_id, message_id=btn_msg_id)
else:
update.callback_query.answer(text=self.OLG_MSG_TEXT)
bot.delete_message(chat_id=chat_id, message_id=btn_msg_id)
def inline_query_callback(self, bot, update):
self.log_and_track("link_inline")
inline_query_id = update.inline_query.id
text = update.inline_query.query
results = []
urls = self.prepare_urls(msg_or_text=text, direct_urls=True)
for url in urls:
for direct_url in urls[url].splitlines(): # TODO: fix non-mp3 and allow only sc/bc
logger.debug(direct_url)
results.append(
InlineQueryResultAudio(id=str(uuid4()), audio_url=direct_url, title="FAST_INLINE_DOWNLOAD"))
try:
bot.answer_inline_query(inline_query_id, results)
except:
pass
def prepare_urls(self, msg_or_text, direct_urls=False, source_ip=None):
if isinstance(msg_or_text, Message):
urls = []
url_entities = msg_or_text.parse_entities(types=[MessageEntity.URL])
for entity in url_entities:
url_str = url_entities[entity]
logger.debug("Entity URL Parsed: %s", url_str)
if "://" not in url_str:
url_str = "http://{}".format(url_str)
urls.append(URL(url_str))
text_link_entities = msg_or_text.parse_entities(types=[MessageEntity.TEXT_LINK])
for entity in text_link_entities:
url_str = entity.url
logger.debug("Entity Text Link Parsed: %s", url_str)
urls.append(URL(url_str))
else:
urls = find_all_links(msg_or_text, default_scheme="http")
urls_dict = {}
for url in urls:
url_text = url.to_text(True)
url_parts_num = len([part for part in url.path_parts if part])
try:
if (
# SoundCloud: tracks, sets and widget pages, no /you/ pages
(self.SITES["sc"] in url.host and (2 <= url_parts_num <= 3 or self.SITES["scapi"] in url_text) and (
not "you" in url.path_parts)) or
# Bandcamp: tracks and albums
(self.SITES["bc"] in url.host and (2 <= url_parts_num <= 2)) or
# YouTube: videos and playlists
(self.SITES["yt"] in url.host and (
"youtu.be" in url.host or "watch" in url.path or "playlist" in url.path))
):
if direct_urls or self.SITES["yt"] in url.host:
urls_dict[url_text] = get_direct_urls(url_text, self.cookies_file, self.COOKIES_DOWNLOAD_FILE,
source_ip)
else:
urls_dict[url_text] = "http"
elif not any((site in url.host for site in self.SITES.values())):
urls_dict[url_text] = get_direct_urls(url_text, self.cookies_file, self.COOKIES_DOWNLOAD_FILE,
source_ip)
except ProcessExecutionError:
logger.debug("youtube-dl get-url failed: %s", url_text)
except URLError as exc:
urls_dict[url_text] = exc.status
return urls_dict
def get_link_text(self, urls):
link_text = ""
for i, url in enumerate(urls):
link_text += "[Source Link #{}]({}) | `{}`\n".format(str(i + 1), url, URL(url).host)
direct_urls = urls[url].splitlines()
for j, direct_url in enumerate(direct_urls):
if "http" in direct_url:
content_type = ""
if "googlevideo" in direct_url:
if "audio" in direct_url:
content_type = "Audio"
else:
content_type = "Video"
if self.shortener:
try:
direct_url = self.shortener.short(direct_url)
except:
pass
link_text += "• {} [Direct Link]({})\n".format(content_type, direct_url)
return link_text
@run_async
def download_url_and_send(self, bot, url, direct_urls, chat_id, reply_to_message_id=None,
wait_message_id=None, source_ip=None):
bot.send_chat_action(chat_id=chat_id, action=ChatAction.RECORD_AUDIO)
download_dir = os.path.join(self.DL_DIR, str(uuid4()))
shutil.rmtree(download_dir, ignore_errors=True)
os.makedirs(download_dir)
status = 0
if direct_urls == "direct":
status = -3
elif direct_urls == "country":
status = -4
elif direct_urls == "live":
status = -5
else:
if (self.SITES["sc"] in url and self.SITES["scapi"] not in url) or (self.SITES["bc"] in url):
cmd_name = "scdl"
cmd_args = []
cmd = None
cmd_input = None
if self.SITES["sc"] in url and self.SITES["scapi"] not in url:
cmd_name = "scdl"
cmd_args = (
"-l", url, # URL of track/playlist/user
"-c", # Continue if a music already exist
"--path", download_dir, # Download the music to a custom path
"--onlymp3", # Download only the mp3 file even if the track is Downloadable
"--addtofile", # Add the artist name to the filename if it isn't in the filename already
"--addtimestamp",
# Adds the timestamp of the creation of the track to the title (useful to sort chronologically)
"--no-playlist-folder",
# Download playlist tracks into directory, instead of making a playlist subfolder
"--extract-artist", # Set artist tag from title instead of username
)
cmd = scdl_bin
cmd_input = None
elif self.SITES["bc"] in url:
cmd_name = "bandcamp-dl"
cmd_args = (
"--base-dir", download_dir, # Base location of which all files are downloaded
"--template", "%{track} - %{artist} - %{title} [%{album}]", # Output filename template
"--overwrite", # Overwrite tracks that already exist
"--group", # Use album/track Label as iTunes grouping
"--embed-art", # Embed album art (if available)
"--no-slugify", # Disable slugification of track, album, and artist names
url, # URL of album/track
)
cmd = bandcamp_dl_bin
cmd_input = "yes"
logger.info("%s starts: %s", cmd_name, url)
cmd_proc = cmd[cmd_args].popen(stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
try:
cmd_stdout, cmd_stderr = cmd_proc.communicate(input=cmd_input, timeout=self.DL_TIMEOUT)
cmd_retcode = cmd_proc.returncode
# TODO listed are common scdl problems for one track with 0 retcode, all its output is always in stderr:
if cmd_retcode or (any(err in cmd_stderr for err in ["Error resolving url", "is not streamable",
"Failed to get item"]) and ".mp3" not in cmd_stderr):
raise ProcessExecutionError(cmd_args, cmd_retcode, cmd_stdout, cmd_stderr)
logger.info("%s succeeded: %s", cmd_name, url)
status = 1
except TimeoutExpired:
cmd_proc.kill()
logger.info("%s took too much time and dropped: %s", cmd_name, url)
status = -1
except ProcessExecutionError:
logger.exception("%s failed: %s", cmd_name, url)
if status == 0:
cmd_name = "youtube-dl"
cmd = youtube_dl_func
# TODO: set different ydl_opts for different sites
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': os.path.join(download_dir, '%(title)s.%(ext)s'),
# default: %(autonumber)s - %(title)s-%(id)s.%(ext)s
'postprocessors': [
{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '128',
},
# {'key': 'EmbedThumbnail',}, {'key': 'FFmpegMetadata',},
],
}
if self.proxy:
ydl_opts['proxy'] = self.proxy
if source_ip:
ydl_opts['source_address'] = source_ip
# https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L210
if self.cookies_file:
if "http" in self.cookies_file:
ydl_opts['cookiefile'] = self.COOKIES_DOWNLOAD_FILE
else:
ydl_opts['cookiefile'] = self.cookies_file
queue = Queue()
cmd_args = (url, ydl_opts, queue,)
logger.info("%s starts: %s", cmd_name, url)
cmd_proc = Process(target=cmd, args=cmd_args)
cmd_proc.start()
try:
cmd_retcode, cmd_stderr = queue.get(block=True, timeout=self.DL_TIMEOUT)
cmd_stdout = ""
cmd_proc.join()
if cmd_retcode:
raise ProcessExecutionError(cmd_args, cmd_retcode, cmd_stdout, cmd_stderr)
# raise cmd_status #TODO: pass and re-raise original Exception?
logger.info("%s succeeded: %s", cmd_name, url)
status = 1
except Empty:
cmd_proc.join(1)
if cmd_proc.is_alive():
cmd_proc.terminate()
logger.info("%s took too much time and dropped: %s", cmd_name, url)
status = -1
except ProcessExecutionError:
logger.exception("%s failed: %s" % (cmd_name, url))
status = -2
gc.collect()
if status == -1:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.DL_TIMEOUT_TEXT, parse_mode='Markdown')
elif status == -2:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.NO_AUDIO_TEXT, parse_mode='Markdown')
elif status == -3:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.DIRECT_RESTRICTION_TEXT, parse_mode='Markdown')
elif status == -4:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.REGION_RESTRICTION_TEXT, parse_mode='Markdown')
elif status == -5:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.LIVE_RESTRICTION_TEXT, parse_mode='Markdown')
elif status == 1:
file_list = []
for d, dirs, files in os.walk(download_dir):
for file in files:
file_list.append(os.path.join(d, file))
if not file_list:
logger.info("No files in dir: %s", download_dir)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, I couldn't download any files from provided links",
parse_mode='Markdown')
else:
for file in sorted(file_list):
file_name = os.path.split(file)[-1]
file_parts = []
try:
file_parts = self.convert_and_split_audio_file(file)
except FileNotSupportedError as exc:
if not (exc.file_format in ["m3u", "jpg", "jpeg", "png", "finished", "tmp"]):
logger.warning("Unsupported file format: %s", file_name)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, downloaded file `{}` is in format I could not yet convert or send".format(
file_name),
parse_mode='Markdown')
except FileTooLargeError as exc:
logger.info("Large file for convert: %s", file_name)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, downloaded file `{}` is `{}` MB and it is larger than I could convert (`{} MB`)".format(
file_name, exc.file_size // 1000000,
self.MAX_CONVERT_FILE_SIZE // 1000000),
parse_mode='Markdown')
except FileSplittedPartiallyError as exc:
file_parts = exc.file_parts
logger.exception("Splitting failed: %s", file_name)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, not enough memory to convert file `{}`..".format(
file_name),
parse_mode='Markdown')
except FileNotConvertedError as exc:
logger.exception("Splitting failed: %s", file_name)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, not enough memory to convert file `{}`..".format(
file_name),
parse_mode='Markdown')
try:
caption = None
flood = self.chat_storage[str(chat_id)]["settings"]["flood"]
if flood == "yes":
addition = ""
short_url = ""
url_obj = URL(url)
if self.SITES["yt"] in url_obj.host:
source = "YouTube"
if "qP303vxTLS8" in url:
addition = random.choice([
"Скачал музла, машина эмпэтри дала!",
"У тебя талант, братан! Ка-какой? Качать онлайн!",
"Слушаю и не плачУ, то, что скачал вчера",
"Всё по чесноку, если скачал, отгружу музла!",
"Дёрнул за канат, и телега поймала трэкан!",
"Сегодня я качаю, и трэки не влазят мне в RAM!",
])
else:
file_root, file_ext = os.path.splitext(file_name)
file_title = file_root.replace(file_ext, "")
addition = ": " + file_title
if "youtu.be" in url_obj.host:
short_url = url.replace("http://", "").replace("https://", "")
elif self.SITES["sc"] in url_obj.host:
source = "SoundCloud"
elif self.SITES["bc"] in url_obj.host:
source = "Bandcamp"
else:
source = url_obj.host.replace(".com", "").replace("www.", "").replace("m.", "")
if self.shortener and not short_url:
try:
short_url = self.shortener.short(url)
short_url = short_url.replace("http://", "").replace("https://", "")
except:
pass
caption = "@{} _got it from_ [{}]({}) {}".format(self.bot_username.replace("_", "\_"),
source,
short_url, addition.replace("_", "\_"))
# logger.info(caption)
sent_audio_ids = self.send_audio_file_parts(bot, chat_id, file_parts,
reply_to_message_id if flood == "yes" else None,
caption)
except FileSentPartiallyError as exc:
sent_audio_ids = exc.sent_audio_ids
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, could not send file `{}` or some of it's parts..".format(
file_name),
parse_mode='Markdown')
logger.warning("Sending some parts failed: %s", file_name)
if not self.SERVE_AUDIO:
shutil.rmtree(download_dir, ignore_errors=True)
if wait_message_id: # TODO: delete only once
try:
bot.delete_message(chat_id=chat_id, message_id=wait_message_id)
except:
pass
# downloader = URLopener()
# file_name, headers = downloader.retrieve(url.to_text(full_quote=True))
# patoolib.extract_archive(file_name, outdir=DL_DIR)
# os.remove(file_name)
def convert_and_split_audio_file(self, file=""):
file_root, file_ext = os.path.splitext(file)
file_format = file_ext.replace(".", "").lower()
file_size = os.path.getsize(file)
if file_format not in ["mp3", "m4a", "mp4"]:
raise FileNotSupportedError(file_format)
if file_size > self.MAX_CONVERT_FILE_SIZE:
raise FileTooLargeError(file_size)
if file_format != "mp3":
logger.info("Converting: %s", file)
try:
file_converted = file.replace(file_ext, ".mp3")
ffinput = ffmpeg.input(file)
ffmpeg.output(ffinput, file_converted, audio_bitrate="128k", vn=None).run()
file = file_converted
file_root, file_ext = os.path.splitext(file)
file_format = file_ext.replace(".", "").lower()
file_size = os.path.getsize(file)
except Exception:
# TODO exceptions
raise FileNotConvertedError
file_parts = []
if file_size <= self.MAX_TG_FILE_SIZE:
file_parts.append(file)
else:
logger.info("Splitting: %s", file)
id3 = None
try:
id3 = ID3(file, translate=False)
except:
pass
parts_number = file_size // self.MAX_TG_FILE_SIZE + 1
# https://github.com/c0decracker/video-splitter
# https://superuser.com/a/1354956/464797
try:
# file_duration = float(ffmpeg.probe(file)['format']['duration'])
part_size = file_size // parts_number
cur_position = 0
for i in range(parts_number):
file_part = file.replace(file_ext, ".part{}{}".format(str(i + 1), file_ext))
ffinput = ffmpeg.input(file)
if i == (parts_number - 1):
ffmpeg.output(ffinput, file_part, codec="copy", vn=None, ss=cur_position).run()
else:
ffmpeg.output(ffinput, file_part, codec="copy", vn=None, ss=cur_position, fs=part_size).run()
part_duration = float(ffmpeg.probe(file_part)['format']['duration'])
cur_position += part_duration
if id3:
try:
id3.save(file_part, v1=2, v2_version=4)
except:
pass
file_parts.append(file_part)
except Exception:
# TODO exceptions
raise FileSplittedPartiallyError(file_parts)
return file_parts
def send_audio_file_parts(self, bot, chat_id, file_parts, reply_to_message_id=None, caption=None):
sent_audio_ids = []
for index, file in enumerate(file_parts):
path = pathlib.Path(file)
file_name = os.path.split(file)[-1]
# file_name = translit(file_name, 'ru', reversed=True)
logger.info("Sending: %s", file_name)
bot.send_chat_action(chat_id=chat_id, action=ChatAction.UPLOAD_AUDIO)
caption_part = None
if len(file_parts) > 1:
caption_part = "Part {} of {}".format(str(index + 1), str(len(file_parts)))
if caption:
if caption_part:
caption_full = caption_part + " | " + caption
else:
caption_full = caption
else:
if caption_part:
caption_full = caption_part
else:
caption_full = ""
# caption_full = textwrap.shorten(caption_full, width=190, placeholder="..")
for i in range(3):
try:
mp3 = MP3(file)
duration = round(mp3.info.length)
performer = None
title = None
try:
performer = ", ".join(mp3['artist'])
title = ", ".join(mp3['title'])
except:
pass
audio = str(urljoin(self.APP_URL, str(path.relative_to(self.DL_DIR))))
logger.debug(audio)
if not self.SERVE_AUDIO:
audio = open(file, 'rb')
if i > 0:
# maybe: Reply message not found
reply_to_message_id = None
audio_msg = bot.send_audio(chat_id=chat_id,
reply_to_message_id=reply_to_message_id,
audio=audio,
duration=duration,
performer=performer,
title=title,
caption=caption_full,
parse_mode='Markdown')
sent_audio_ids.append(audio_msg.audio.file_id)
logger.info("Sending succeeded: %s", file_name)
break
except TelegramError:
if i == 2:
logger.exception("Sending failed because of TelegramError: %s", file_name)
if len(sent_audio_ids) != len(file_parts):
raise FileSentPartiallyError(sent_audio_ids)
return sent_audio_ids
|
trezor.py | import traceback
import sys
from typing import NamedTuple, Any
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable, OutdatedHwFirmwareException)
_logger = get_logger(__name__)
try:
import trezorlib
import trezorlib.transport
from trezorlib.transport.bridge import BridgeTransport, call_bridge
from .clientbase import TrezorClientBase
from trezorlib.messages import (
RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
RECOVERY_TYPE_SCRAMBLED_WORDS = RecoveryDeviceType.ScrambledWords
RECOVERY_TYPE_MATRIX = RecoveryDeviceType.Matrix
TREZORLIB = True
except Exception as e:
_logger.exception('error importing trezorlib')
TREZORLIB = False
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(2)
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorInitSettings(NamedTuple):
word_count: int
label: str
pin_enabled: bool
passphrase_enabled: bool
recovery_type: Any = None
no_backup: bool = False
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 11, 0)
maximum_library = (0, 12)
SUPPORTED_XTYPES = ('standard') # 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def enumerate(self):
# If there is a bridge, prefer that.
# On Windows, the bridge runs as Admin (and Electrum usually does not),
# so the bridge has better chances of finding devices. see #5420
# This also avoids duplicate entries.
try:
call_bridge("enumerate")
except Exception:
devices = trezorlib.transport.enumerate_devices()
else:
devices = BridgeTransport.enumerate()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Verge"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, device_id)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler):
if method == TIM_RECOVER and settings.recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength_from_word_count = {12: 128, 18: 192, 24: 256}
client.reset_device(
strength=strength_from_word_count[settings.word_count],
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label,
no_backup=settings.no_backup)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=settings.recovery_type,
word_count=settings.word_count,
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label)
if settings.recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise OutdatedHwFirmwareException(msg)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
client.get_xpub('m', 'standard', creating=is_creating_wallet)
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
prev_tx = { bfh(txhash): self.electrum_tx_to_txtype(tx, xpub_path) for txhash, tx in prev_tx.items() }
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, xpub_path, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
details = SignTx(lock_time=tx.locktime, version=tx.version, timestamp=tx.time)
signatures, _ = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for _, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx, xpub_path, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
xpubs = [parse_xpubkey(x) for x in x_pubkeys]
multisig = self._make_multisig(txin.get('num_sig'), xpubs, txin.get('signatures'))
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
# find which key is mine
for xpub, deriv in xpubs:
if xpub in xpub_path:
xpub_n = parse_path(xpub_path[xpub])
txinputtype.address_n = xpub_n + deriv
break
prev_hash = bfh(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs, signatures=None):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
if signatures is None:
signatures = [b''] * len(pubkeys)
elif len(signatures) != len(pubkeys):
raise RuntimeError('Mismatched number of signatures')
else:
signatures = [bfh(x)[:-1] if x else b'' for x in signatures]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=signatures,
m=m)
def tx_outputs(self, derivation, tx: Transaction):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
deriv = parse_path("/%d/%d" % index)
multisig = self._make_multisig(m, [(xpub, deriv) for xpub in xpubs])
txoutputtype = TxOutputType(
multisig=multisig,
amount=amount,
address_n=parse_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if info.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx, xpub_path):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.timestamp = d['time']
t.lock_time = d['lockTime']
t.inputs = self.tx_inputs(tx, xpub_path)
t.bin_outputs = [
TxOutputBinType(amount=vout['value'], script_pubkey=bfh(vout['scriptPubKey']))
for vout in d['outputs']
]
return t
|
vnet.py | #
# CORE
# Copyright (c)2010-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# authors: Tom Goff <thomas.goff@boeing.com>
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
'''
vnet.py: PyCoreNet and LxBrNet classes that implement virtual networks using
Linux Ethernet bridging and ebtables rules.
'''
import os, sys, threading, time, subprocess
from core.api import coreapi
from core.misc.utils import *
from core.constants import *
from core.coreobj import PyCoreNet, PyCoreObj
from core.netns.vif import VEth, GreTap
checkexec([BRCTL_BIN, IP_BIN, EBTABLES_BIN, TC_BIN])
ebtables_lock = threading.Lock()
class EbtablesQueue(object):
''' Helper class for queuing up ebtables commands into rate-limited
atomic commits. This improves performance and reliability when there are
many WLAN link updates.
'''
# update rate is every 300ms
rate = 0.3
# ebtables
atomic_file = "/tmp/pycore.ebtables.atomic"
def __init__(self):
''' Initialize the helper class, but don't start the update thread
until a WLAN is instantiated.
'''
self.doupdateloop = False
self.updatethread = None
# this lock protects cmds and updates lists
self.updatelock = threading.Lock()
# list of pending ebtables commands
self.cmds = []
# list of WLANs requiring update
self.updates = []
# timestamps of last WLAN update; this keeps track of WLANs that are
# using this queue
self.last_update_time = {}
def startupdateloop(self, wlan):
''' Kick off the update loop; only needs to be invoked once.
'''
self.updatelock.acquire()
self.last_update_time[wlan] = time.time()
self.updatelock.release()
if self.doupdateloop:
return
self.doupdateloop = True
self.updatethread = threading.Thread(target = self.updateloop)
self.updatethread.daemon = True
self.updatethread.start()
def stopupdateloop(self, wlan):
''' Kill the update loop thread if there are no more WLANs using it.
'''
self.updatelock.acquire()
try:
del self.last_update_time[wlan]
except KeyError:
pass
self.updatelock.release()
if len(self.last_update_time) > 0:
return
self.doupdateloop = False
if self.updatethread:
self.updatethread.join()
self.updatethread = None
def ebatomiccmd(self, cmd):
''' Helper for building ebtables atomic file command list.
'''
r = [EBTABLES_BIN, "--atomic-file", self.atomic_file]
if cmd:
r.extend(cmd)
return r
def lastupdate(self, wlan):
''' Return the time elapsed since this WLAN was last updated.
'''
try:
elapsed = time.time() - self.last_update_time[wlan]
except KeyError:
self.last_update_time[wlan] = time.time()
elapsed = 0.0
return elapsed
def updated(self, wlan):
''' Keep track of when this WLAN was last updated.
'''
self.last_update_time[wlan] = time.time()
self.updates.remove(wlan)
def updateloop(self):
''' Thread target that looks for WLANs needing update, and
rate limits the amount of ebtables activity. Only one userspace program
should use ebtables at any given time, or results can be unpredictable.
'''
while self.doupdateloop:
self.updatelock.acquire()
for wlan in self.updates:
if self.lastupdate(wlan) > self.rate:
self.buildcmds(wlan)
#print "ebtables commit %d rules" % len(self.cmds)
self.ebcommit(wlan)
self.updated(wlan)
self.updatelock.release()
time.sleep(self.rate)
def ebcommit(self, wlan):
''' Perform ebtables atomic commit using commands built in the
self.cmds list.
'''
# save kernel ebtables snapshot to a file
cmd = self.ebatomiccmd(["--atomic-save",])
try:
check_call(cmd)
except Exception, e:
self.eberror(wlan, "atomic-save (%s)" % cmd, e)
# no atomic file, exit
return
# modify the table file using queued ebtables commands
for c in self.cmds:
cmd = self.ebatomiccmd(c)
try:
check_call(cmd)
except Exception, e:
self.eberror(wlan, "cmd=%s" % cmd, e)
pass
self.cmds = []
# commit the table file to the kernel
cmd = self.ebatomiccmd(["--atomic-commit",])
try:
check_call(cmd)
os.unlink(self.atomic_file)
except Exception, e:
self.eberror(wlan, "atomic-commit (%s)" % cmd, e)
def ebchange(self, wlan):
''' Flag a change to the given WLAN's _linked dict, so the ebtables
chain will be rebuilt at the next interval.
'''
self.updatelock.acquire()
if wlan not in self.updates:
self.updates.append(wlan)
self.updatelock.release()
def buildcmds(self, wlan):
''' Inspect a _linked dict from a wlan, and rebuild the ebtables chain
for that WLAN.
'''
wlan._linked_lock.acquire()
# flush the chain
self.cmds.extend([["-F", wlan.brname],])
# rebuild the chain
for (netif1, v) in wlan._linked.items():
for (netif2, linked) in v.items():
if wlan.policy == "DROP" and linked:
self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname,
"-o", netif2.localname, "-j", "ACCEPT"],
["-A", wlan.brname, "-o", netif1.localname,
"-i", netif2.localname, "-j", "ACCEPT"]])
elif wlan.policy == "ACCEPT" and not linked:
self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname,
"-o", netif2.localname, "-j", "DROP"],
["-A", wlan.brname, "-o", netif1.localname,
"-i", netif2.localname, "-j", "DROP"]])
wlan._linked_lock.release()
def eberror(self, wlan, source, error):
''' Log an ebtables command error and send an exception.
'''
if not wlan:
return
wlan.exception(coreapi.CORE_EXCP_LEVEL_ERROR, wlan.brname,
"ebtables command error: %s\n%s\n" % (source, error))
# a global object because all WLANs share the same queue
# cannot have multiple threads invoking the ebtables commnd
ebq = EbtablesQueue()
def ebtablescmds(call, cmds):
ebtables_lock.acquire()
try:
for cmd in cmds:
call(cmd)
finally:
ebtables_lock.release()
class LxBrNet(PyCoreNet):
policy = "DROP"
def __init__(self, session, objid = None, name = None, verbose = False,
start = True, policy = None):
PyCoreNet.__init__(self, session, objid, name, verbose, start)
if name is None:
name = str(self.objid)
if policy is not None:
self.policy = policy
self.name = name
sessionid = self.session.shortsessionid()
self.brname = "b.%s.%s" % (str(self.objid), sessionid)
self.up = False
if start:
self.startup()
ebq.startupdateloop(self)
def startup(self):
try:
check_call([BRCTL_BIN, "addbr", self.brname])
except Exception, e:
self.exception(coreapi.CORE_EXCP_LEVEL_FATAL, self.brname,
"Error adding bridge: %s" % e)
try:
# turn off spanning tree protocol and forwarding delay
check_call([BRCTL_BIN, "stp", self.brname, "off"])
check_call([BRCTL_BIN, "setfd", self.brname, "0"])
check_call([IP_BIN, "link", "set", self.brname, "up"])
# create a new ebtables chain for this bridge
ebtablescmds(check_call, [
[EBTABLES_BIN, "-N", self.brname, "-P", self.policy],
[EBTABLES_BIN, "-A", "FORWARD",
"--logical-in", self.brname, "-j", self.brname]])
# turn off multicast snooping so mcast forwarding occurs w/o IGMP joins
snoop = "/sys/devices/virtual/net/%s/bridge/multicast_snooping" % \
self.brname
if os.path.exists(snoop):
open(snoop, "w").write('0')
except Exception, e:
self.exception(coreapi.CORE_EXCP_LEVEL_WARNING, self.brname,
"Error setting bridge parameters: %s" % e)
self.up = True
def shutdown(self):
if not self.up:
return
ebq.stopupdateloop(self)
mutecall([IP_BIN, "link", "set", self.brname, "down"])
mutecall([BRCTL_BIN, "delbr", self.brname])
ebtablescmds(mutecall, [
[EBTABLES_BIN, "-D", "FORWARD",
"--logical-in", self.brname, "-j", self.brname],
[EBTABLES_BIN, "-X", self.brname]])
for netif in self.netifs():
# removes veth pairs used for bridge-to-bridge connections
netif.shutdown()
self._netif.clear()
self._linked.clear()
del self.session
self.up = False
def attach(self, netif):
if self.up:
try:
check_call([BRCTL_BIN, "addif", self.brname, netif.localname])
check_call([IP_BIN, "link", "set", netif.localname, "up"])
except Exception, e:
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname,
"Error joining interface %s to bridge %s: %s" % \
(netif.localname, self.brname, e))
return
PyCoreNet.attach(self, netif)
def detach(self, netif):
if self.up:
try:
check_call([BRCTL_BIN, "delif", self.brname, netif.localname])
except Exception, e:
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname,
"Error removing interface %s from bridge %s: %s" % \
(netif.localname, self.brname, e))
return
PyCoreNet.detach(self, netif)
def linked(self, netif1, netif2):
# check if the network interfaces are attached to this network
if self._netif[netif1.netifi] != netif1:
raise ValueError, "inconsistency for netif %s" % netif1.name
if self._netif[netif2.netifi] != netif2:
raise ValueError, "inconsistency for netif %s" % netif2.name
try:
linked = self._linked[netif1][netif2]
except KeyError:
if self.policy == "ACCEPT":
linked = True
elif self.policy == "DROP":
linked = False
else:
raise Exception, "unknown policy: %s" % self.policy
self._linked[netif1][netif2] = linked
return linked
def unlink(self, netif1, netif2):
''' Unlink two PyCoreNetIfs, resulting in adding or removing ebtables
filtering rules.
'''
self._linked_lock.acquire()
if not self.linked(netif1, netif2):
self._linked_lock.release()
return
self._linked[netif1][netif2] = False
self._linked_lock.release()
ebq.ebchange(self)
def link(self, netif1, netif2):
''' Link two PyCoreNetIfs together, resulting in adding or removing
ebtables filtering rules.
'''
self._linked_lock.acquire()
if self.linked(netif1, netif2):
self._linked_lock.release()
return
self._linked[netif1][netif2] = True
self._linked_lock.release()
ebq.ebchange(self)
def linkconfig(self, netif, bw = None, delay = None,
loss = None, duplicate = None, jitter = None, netif2 = None,
devname = None):
''' Configure link parameters by applying tc queuing disciplines on the
interface.
'''
if devname is None:
devname = netif.localname
tc = [TC_BIN, "qdisc", "replace", "dev", devname]
parent = ["root"]
changed = False
if netif.setparam('bw', bw):
# from tc-tbf(8): minimum value for burst is rate / kernel_hz
if bw is not None:
burst = max(2 * netif.mtu, bw / 1000)
limit = 0xffff # max IP payload
tbf = ["tbf", "rate", str(bw),
"burst", str(burst), "limit", str(limit)]
if bw > 0:
if self.up:
if (self.verbose):
self.info("linkconfig: %s" % \
([tc + parent + ["handle", "1:"] + tbf],))
check_call(tc + parent + ["handle", "1:"] + tbf)
netif.setparam('has_tbf', True)
changed = True
elif netif.getparam('has_tbf') and bw <= 0:
tcd = [] + tc
tcd[2] = "delete"
if self.up:
check_call(tcd + parent)
netif.setparam('has_tbf', False)
# removing the parent removes the child
netif.setparam('has_netem', False)
changed = True
if netif.getparam('has_tbf'):
parent = ["parent", "1:1"]
netem = ["netem"]
changed = max(changed, netif.setparam('delay', delay))
if loss is not None:
loss = float(loss)
changed = max(changed, netif.setparam('loss', loss))
if duplicate is not None:
duplicate = float(duplicate)
changed = max(changed, netif.setparam('duplicate', duplicate))
changed = max(changed, netif.setparam('jitter', jitter))
if not changed:
return
# jitter and delay use the same delay statement
if delay is not None:
netem += ["delay", "%sus" % delay]
if jitter is not None:
if delay is None:
netem += ["delay", "0us", "%sus" % jitter, "25%"]
else:
netem += ["%sus" % jitter, "25%"]
if loss is not None:
netem += ["loss", "%s%%" % min(loss, 100)]
if duplicate is not None:
netem += ["duplicate", "%s%%" % min(duplicate, 100)]
if delay <= 0 and jitter <= 0 and loss <= 0 and duplicate <= 0:
# possibly remove netem if it exists and parent queue wasn't removed
if not netif.getparam('has_netem'):
return
tc[2] = "delete"
if self.up:
if self.verbose:
self.info("linkconfig: %s" % \
([tc + parent + ["handle", "10:"]],))
check_call(tc + parent + ["handle", "10:"])
netif.setparam('has_netem', False)
elif len(netem) > 1:
if self.up:
if self.verbose:
self.info("linkconfig: %s" % \
([tc + parent + ["handle", "10:"] + netem],))
check_call(tc + parent + ["handle", "10:"] + netem)
netif.setparam('has_netem', True)
def linknet(self, net):
''' Link this bridge with another by creating a veth pair and installing
each device into each bridge.
'''
sessionid = self.session.shortsessionid()
try:
self_objid = '%x' % self.objid
except TypeError:
self_objid = '%s' % self.objid
try:
net_objid = '%x' % net.objid
except TypeError:
net_objid = '%s' % net.objid
localname = 'veth%s.%s.%s' % (self_objid, net_objid, sessionid)
if len(localname) >= 16:
raise ValueError, "interface local name '%s' too long" % \
localname
name = 'veth%s.%s.%s' % (net_objid, self_objid, sessionid)
if len(name) >= 16:
raise ValueError, "interface name '%s' too long" % name
netif = VEth(node = None, name = name, localname = localname,
mtu = 1500, net = self, start = self.up)
self.attach(netif)
if net.up:
# this is similar to net.attach() but uses netif.name instead
# of localname
check_call([BRCTL_BIN, "addif", net.brname, netif.name])
check_call([IP_BIN, "link", "set", netif.name, "up"])
i = net.newifindex()
net._netif[i] = netif
with net._linked_lock:
net._linked[netif] = {}
netif.net = self
netif.othernet = net
return netif
def getlinknetif(self, net):
''' Return the interface of that links this net with another net
(that were linked using linknet()).
'''
for netif in self.netifs():
if hasattr(netif, 'othernet') and netif.othernet == net:
return netif
return None
def addrconfig(self, addrlist):
''' Set addresses on the bridge.
'''
if not self.up:
return
for addr in addrlist:
try:
check_call([IP_BIN, "addr", "add", str(addr), "dev", self.brname])
except Exception, e:
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname,
"Error adding IP address: %s" % e)
class GreTapBridge(LxBrNet):
''' A network consisting of a bridge with a gretap device for tunneling to
another system.
'''
def __init__(self, session, remoteip = None, objid = None, name = None,
policy = "ACCEPT", localip = None, ttl = 255, key = None,
verbose = False, start = True):
LxBrNet.__init__(self, session = session, objid = objid,
name = name, verbose = verbose, policy = policy,
start = False)
self.grekey = key
if self.grekey is None:
self.grekey = self.session.sessionid ^ self.objid
self.localnum = None
self.remotenum = None
self.remoteip = remoteip
self.localip = localip
self.ttl = ttl
if remoteip is None:
self.gretap = None
else:
self.gretap = GreTap(node = self, name = None, session = session,
remoteip = remoteip, objid = None, localip = localip, ttl = ttl,
key = self.grekey)
if start:
self.startup()
def startup(self):
''' Creates a bridge and adds the gretap device to it.
'''
LxBrNet.startup(self)
if self.gretap:
self.attach(self.gretap)
def shutdown(self):
''' Detach the gretap device and remove the bridge.
'''
if self.gretap:
self.detach(self.gretap)
self.gretap.shutdown()
self.gretap = None
LxBrNet.shutdown(self)
def addrconfig(self, addrlist):
''' Set the remote tunnel endpoint. This is a one-time method for
creating the GreTap device, which requires the remoteip at startup.
The 1st address in the provided list is remoteip, 2nd optionally
specifies localip.
'''
if self.gretap:
raise ValueError, "gretap already exists for %s" % self.name
remoteip = addrlist[0].split('/')[0]
localip = None
if len(addrlist) > 1:
localip = addrlist[1].split('/')[0]
self.gretap = GreTap(session = self.session, remoteip = remoteip,
objid = None, name = None,
localip = localip, ttl = self.ttl, key = self.grekey)
self.attach(self.gretap)
def setkey(self, key):
''' Set the GRE key used for the GreTap device. This needs to be set
prior to instantiating the GreTap device (before addrconfig).
'''
self.grekey = key
|
revocation_notifier.py | '''
DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
This material is based upon work supported by the Assistant Secretary of Defense for
Research and Engineering under Air Force Contract No. FA8721-05-C-0002 and/or
FA8702-15-D-0001. Any opinions, findings, conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views of the
Assistant Secretary of Defense for Research and Engineering.
Copyright 2017 Massachusetts Institute of Technology.
The software/firmware is provided to you on an As-Is basis
Delivered to the US Government with Unlimited Rights, as defined in DFARS Part
252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government
rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed
above. Use of this work other than as specifically authorized by the U.S. Government may
violate any copyrights that exist in this work.
'''
import configparser
import threading
import functools
import time
import os
import sys
import signal
import zmq
try:
import simplejson as json
except ImportError:
raise("Simplejson is mandatory, please install")
from multiprocessing import Process
from keylime import common
from keylime import keylime_logging
from keylime import crypto
logger = keylime_logging.init_logging('revocation_notifier')
config = configparser.ConfigParser()
config.read(common.CONFIG_FILE)
broker_proc = None
def start_broker():
def worker():
context = zmq.Context(1)
frontend = context.socket(zmq.SUB)
frontend.bind("ipc:///tmp/keylime.verifier.ipc")
frontend.setsockopt(zmq.SUBSCRIBE, b'')
# Socket facing services
backend = context.socket(zmq.PUB)
backend.bind("tcp://*:%s"%config.getint('general','revocation_notifier_port'))
zmq.device(zmq.FORWARDER, frontend, backend)
global broker_proc
broker_proc = Process(target=worker)
broker_proc.start()
def stop_broker():
global broker_proc
if broker_proc is not None:
os.kill(broker_proc.pid,signal.SIGKILL)
def notify(tosend):
def worker(tosend):
context = zmq.Context()
mysock = context.socket(zmq.PUB)
mysock.connect("ipc:///tmp/keylime.verifier.ipc")
# wait 100ms for connect to happen
time.sleep(0.1)
# now send it out vi 0mq
for i in range(config.getint('cloud_verifier','max_retries')):
try:
mysock.send_string(json.dumps(tosend))
break
except Exception as e:
logger.debug("Unable to publish revocation message %d times, trying again in %f seconds: %s"%(i,config.getfloat('cloud_verifier','retry_interval'),e))
time.sleep(config.getfloat('cloud_verifier','retry_interval'))
mysock.close()
cb = functools.partial(worker,tosend)
t = threading.Thread(target=cb)
t.start()
cert_key=None
def await_notifications(callback,revocation_cert_path):
global cert_key
if revocation_cert_path is None:
raise Exception("must specify revocation_cert_path")
context = zmq.Context()
mysock = context.socket(zmq.SUB)
mysock.setsockopt(zmq.SUBSCRIBE, b'')
mysock.connect("tcp://%s:%s"%(config.get('general','revocation_notifier_ip'),config.getint('general','revocation_notifier_port')))
logger.info('Waiting for revocation messages on 0mq %s:%s'%
(config.get('general','revocation_notifier_ip'),config.getint('general','revocation_notifier_port')))
while True:
rawbody = mysock.recv()
body = json.loads(rawbody)
if cert_key is None:
# load up the CV signing public key
if revocation_cert_path is not None and os.path.exists(revocation_cert_path):
logger.info("Lazy loading the revocation certificate from %s"%revocation_cert_path)
with open(revocation_cert_path,'r') as f:
certpem = f.read()
cert_key = crypto.rsa_import_pubkey(certpem)
if cert_key is None:
logger.warning("Unable to check signature of revocation message: %s not available"%revocation_cert_path)
elif 'signature' not in body or body['signature']=='none':
logger.warning("No signature on revocation message from server")
elif not crypto.rsa_verify(cert_key,body['msg'].encode('utf-8'),body['signature'].encode('utf-8')):
logger.error("Invalid revocation message siganture %s"%body)
else:
message = json.loads(body['msg'])
logger.debug("Revocation signature validated for revocation: %s"%message)
callback(message)
def main():
start_broker()
from keylime import secure_mount
def worker():
def print_notification(revocation):
logger.warning("Received revocation: %s"%revocation)
keypath = '%s/unzipped/RevocationNotifier-cert.crt'%(secure_mount.mount())
await_notifications(print_notification,revocation_cert_path=keypath)
t = threading.Thread(target=worker)
t.start()
#time.sleep(0.5)
json_body2 = {
'v': 'vbaby',
'agent_id': '2094aqrea3',
'cloudagent_ip': 'ipaddy',
'cloudagent_port': '39843',
'tpm_policy': '{"ab":"1"}',
'vtpm_policy': '{"ab":"1"}',
'metadata': '{"cert_serial":"1"}',
'ima_whitelist': '{}',
'revocation_key': '',
'revocation': '{"cert_serial":"1"}',
}
print("sending notification")
notify(json_body2)
time.sleep(2)
print("shutting down")
stop_broker()
print("exiting...")
sys.exit(0)
print("done")
if __name__=="__main__":
main()
|
tunnel.py | """Basic ssh tunnel utilities, and convenience functions for tunneling
zeromq connections.
"""
# Copyright (C) 2010-2011 IPython Development Team
# Copyright (C) 2011- PyZMQ Developers
#
# Redistributed from IPython under the terms of the BSD License.
from __future__ import print_function
import atexit
import os
import re
import signal
import socket
import sys
import warnings
from getpass import getpass, getuser
from multiprocessing import Process
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import paramiko
SSHException = paramiko.ssh_exception.SSHException
except ImportError:
paramiko = None
class SSHException(Exception):
pass
else:
from .forward import forward_tunnel
try:
import pexpect
except ImportError:
pexpect = None
from ..utils.strtypes import b
def select_random_ports(n):
"""Select and return n random ports that are available."""
ports = []
sockets = []
for i in range(n):
sock = socket.socket()
sock.bind(('', 0))
ports.append(sock.getsockname()[1])
sockets.append(sock)
for sock in sockets:
sock.close()
return ports
#-----------------------------------------------------------------------------
# Check for passwordless login
#-----------------------------------------------------------------------------
_password_pat = re.compile(b(r'pass(word|phrase):'), re.IGNORECASE)
def try_passwordless_ssh(server, keyfile, paramiko=None):
"""Attempt to make an ssh connection without a password.
This is mainly used for requiring password input only once
when many tunnels may be connected to the same server.
If paramiko is None, the default for the platform is chosen.
"""
if paramiko is None:
paramiko = sys.platform == 'win32'
if not paramiko:
f = _try_passwordless_openssh
else:
f = _try_passwordless_paramiko
return f(server, keyfile)
def _try_passwordless_openssh(server, keyfile):
"""Try passwordless login with shell ssh command."""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko")
cmd = 'ssh -f '+ server
if keyfile:
cmd += ' -i ' + keyfile
cmd += ' exit'
# pop SSH_ASKPASS from env
env = os.environ.copy()
env.pop('SSH_ASKPASS', None)
ssh_newkey = 'Are you sure you want to continue connecting'
p = pexpect.spawn(cmd, env=env)
while True:
try:
i = p.expect([ssh_newkey, _password_pat], timeout=.1)
if i==0:
raise SSHException('The authenticity of the host can\'t be established.')
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
return True
else:
return False
def _try_passwordless_paramiko(server, keyfile):
"""Try passwordless login with paramiko."""
if paramiko is None:
msg = "Paramiko unavaliable, "
if sys.platform == 'win32':
msg += "Paramiko is required for ssh tunneled connections on Windows."
else:
msg += "use OpenSSH."
raise ImportError(msg)
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True)
except paramiko.AuthenticationException:
return False
else:
client.close()
return True
def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Connect a socket to an address via an ssh tunnel.
This is a wrapper for socket.connect(addr), when addr is not accessible
from the local machine. It simply creates an ssh tunnel using the remaining args,
and calls socket.connect('tcp://localhost:lport') where lport is the randomly
selected local port of the tunnel.
"""
new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout)
socket.connect(new_url)
return tunnel
def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Open a tunneled connection from a 0MQ url.
For use inside tunnel_connection.
Returns
-------
(url, tunnel) : (str, object)
The 0MQ url that has been forwarded, and the tunnel object
"""
lport = select_random_ports(1)[0]
transport, addr = addr.split('://')
ip,rport = addr.split(':')
rport = int(rport)
if paramiko is None:
paramiko = sys.platform == 'win32'
if paramiko:
tunnelf = paramiko_tunnel
else:
tunnelf = openssh_tunnel
tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout)
return 'tcp://127.0.0.1:%i'%lport, tunnel
def openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""Create an ssh tunnel using command-line ssh that connects port lport
on this machine to localhost:rport on server. The tunnel
will automatically close when not in use, remaining open
for a minimum of timeout seconds for an initial connection.
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko_tunnel")
ssh="ssh "
if keyfile:
ssh += "-i " + keyfile
if ':' in server:
server, port = server.split(':')
ssh += " -p %s" % port
cmd = "%s -O check %s" % (ssh, server)
(output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
if not exitstatus:
pid = int(output[output.find(b"(pid=")+5:output.find(b")")])
cmd = "%s -O forward -L 127.0.0.1:%i:%s:%i %s" % (
ssh, lport, remoteip, rport, server)
(output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
if not exitstatus:
atexit.register(_stop_tunnel, cmd.replace("-O forward", "-O cancel", 1))
return pid
cmd = "%s -f -S none -L 127.0.0.1:%i:%s:%i %s sleep %i" % (
ssh, lport, remoteip, rport, server, timeout)
# pop SSH_ASKPASS from env
env = os.environ.copy()
env.pop('SSH_ASKPASS', None)
ssh_newkey = 'Are you sure you want to continue connecting'
tunnel = pexpect.spawn(cmd, env=env)
failed = False
while True:
try:
i = tunnel.expect([ssh_newkey, _password_pat], timeout=.1)
if i==0:
raise SSHException('The authenticity of the host can\'t be established.')
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
if tunnel.exitstatus:
print(tunnel.exitstatus)
print(tunnel.before)
print(tunnel.after)
raise RuntimeError("tunnel '%s' failed to start"%(cmd))
else:
return tunnel.pid
else:
if failed:
print("Password rejected, try again")
password=None
if password is None:
password = getpass("%s's password: "%(server))
tunnel.sendline(password)
failed = True
def _stop_tunnel(cmd):
pexpect.run(cmd)
def _split_server(server):
if '@' in server:
username,server = server.split('@', 1)
else:
username = getuser()
if ':' in server:
server, port = server.split(':')
port = int(port)
else:
port = 22
return username, server, port
def paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""launch a tunner with paramiko in a subprocess. This should only be used
when shell ssh is unavailable (e.g. Windows).
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
If you are familiar with ssh tunnels, this creates the tunnel:
ssh server -L localhost:lport:remoteip:rport
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if paramiko is None:
raise ImportError("Paramiko not available")
if password is None:
if not _try_passwordless_paramiko(server, keyfile):
password = getpass("%s's password: "%(server))
p = Process(target=_paramiko_tunnel,
args=(lport, rport, server, remoteip),
kwargs=dict(keyfile=keyfile, password=password))
p.daemon = True
p.start()
return p
def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):
"""Function for actually starting a paramiko tunnel, to be passed
to multiprocessing.Process(target=this), and not called directly.
"""
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True, password=password)
# except paramiko.AuthenticationException:
# if password is None:
# password = getpass("%s@%s's password: "%(username, server))
# client.connect(server, port, username=username, password=password)
# else:
# raise
except Exception as e:
print('*** Failed to connect to %s:%d: %r' % (server, port, e))
sys.exit(1)
# Don't let SIGINT kill the tunnel subprocess
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
forward_tunnel(lport, remoteip, rport, client.get_transport())
except KeyboardInterrupt:
print('SIGINT: Port forwarding stopped cleanly')
sys.exit(0)
except Exception as e:
print("Port forwarding stopped uncleanly: %s"%e)
sys.exit(255)
if sys.platform == 'win32':
ssh_tunnel = paramiko_tunnel
else:
ssh_tunnel = openssh_tunnel
__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh']
|
views.py | import json
import logging
import re
from datetime import timedelta
from threading import Thread
import shortuuid
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.db import IntegrityError, transaction
from django.http import JsonResponse
from django.shortcuts import HttpResponseRedirect, HttpResponse, reverse
from django.shortcuts import get_object_or_404, redirect
from django.views import View
from django.views.generic import TemplateView
from django.views.generic.base import TemplateResponseMixin, ContextMixin
from django.views.generic.edit import UpdateView, FormView
from django.views.generic.list import ListView
from account.models import User
from account.permissions import is_admin_or_root
from contest.models import Contest, ContestInvitation, ContestParticipant, ContestClarification, Activity
from contest.statistics import invalidate_contest
from contest.tasks import add_participant_with_invitation
from polygon.base_views import PolygonBaseMixin
from polygon.rejudge import (
rejudge_all_submission_on_contest, rejudge_all_submission_on_contest_problem,
rejudge_submission_set
)
from problem.models import Problem
from problem.views import StatusList
from submission.util import SubmissionStatus
from utils.csv_writer import write_csv
from utils.download import respond_generate_file
from utils.identicon import Identicon
from utils.permission import is_contest_manager
from .forms import ContestEditForm, TestSysUploadForm
logger = logging.getLogger(__name__)
def reorder_contest_problem_identifiers(contest: Contest, orders=None):
with transaction.atomic():
problems = list(contest.contestproblem_set.select_for_update().order_by('identifier').all())
if orders:
problems.sort(key=lambda x: orders[x.id])
if len(problems) > 26:
for index, problem in enumerate(problems, start=1):
problem.identifier = str(1000 + index)
problem.save(update_fields=['identifier'])
else:
for index, problem in enumerate(problems, start=0):
problem.identifier = chr(ord('A') + index)
problem.save(update_fields=['identifier'])
class ContestList(PolygonBaseMixin, ListView):
template_name = 'polygon/contest/list.jinja2'
context_object_name = 'contest_list'
def get_queryset(self):
if is_admin_or_root(self.request.user):
return Contest.objects.all()
else:
return self.request.user.managing_contests.all()
class PolygonContestMixin(TemplateResponseMixin, ContextMixin, PolygonBaseMixin):
raise_exception = True
def dispatch(self, request, *args, **kwargs):
self.contest = get_object_or_404(Contest, pk=kwargs.get('pk'))
return super(PolygonContestMixin, self).dispatch(request, *args, **kwargs)
def test_func(self):
if not is_contest_manager(self.request.user, self.contest):
return False
return super(PolygonContestMixin, self).test_func()
def get_context_data(self, **kwargs):
data = super(PolygonContestMixin, self).get_context_data(**kwargs)
data['contest'] = self.contest
return data
class ContestEdit(PolygonContestMixin, UpdateView):
form_class = ContestEditForm
template_name = 'polygon/contest/edit.jinja2'
queryset = Contest.objects.all()
def get_context_data(self, **kwargs):
data = super(ContestEdit, self).get_context_data(**kwargs)
data['admin_list'] = self.contest.managers.all()
data['author_list'] = self.contest.authors.all()
data['volunteer_list'] = self.contest.volunteers.all()
return data
def form_valid(self, form):
instance = form.save(commit=False)
instance.allowed_lang = ','.join(form.cleaned_data['allowed_lang'])
instance.save()
if instance.contest_type == 0:
with transaction.atomic():
participants = {p.user_id: p for p in instance.contestparticipant_set.all()}
for sub in instance.submission_set.all():
start = participants[sub.author_id].start_time(instance)
end = start + instance.length
if start <= sub.create_time <= end:
sub.contest_time = sub.create_time - start
else:
sub.contest_time = None
sub.save(update_fields=["contest_time"])
return redirect(self.request.path)
class ContestDropStatement(PolygonContestMixin, View):
def post(self, request, *args, **kwargs):
self.contest.pdf_statement = None
self.contest.save(update_fields=['pdf_statement'])
return redirect(reverse('polygon:contest_meta', kwargs={'pk': str(self.contest.id)}))
class ContestCreate(PolygonBaseMixin, View):
def post(self, request, *args, **kwargs):
contest = Contest.objects.create(title='Contest')
contest.title = 'Contest #%d' % contest.id
contest.save(update_fields=['title'])
contest.managers.add(request.user)
return redirect(reverse('polygon:contest_meta', kwargs={'pk': str(contest.id)}))
class HomeworkClone(PolygonBaseMixin, View):
def post(self, request, *args, **kwargs):
try:
n = request.POST['answer']
contest = Contest.objects.get(pk=n)
# 检查权限,如果不是比赛管理员,或者该比赛不是作业集(这是一个常规比赛),不允许复制
if not is_contest_manager(request.user, contest):
raise PermissionError
if contest.contest_type != 1:
raise PermissionError
# 复制的内容包括作业集的比赛的基本元信息、题目列表、命题人列表、管理员列表(不包括志愿者列表)
# 需要教师手动导入新学期的学生名单,并修改作业集的起始时间
# 除非复制者已具备作业集题目的管理权限,否则复制成功后不会将题目的管理权限赋予复制者,复制者只能管理比赛内容,并管理具备权限的题目
problem_list = contest.contestproblem_set.all()
contest_author = contest.authors.all()
contest_manager = contest.managers.all()
new_hw = Contest.objects.create(title='Contest')
new_hw.title = 'Contest #%d' % contest.id
new_hw.managers.add(request.user)
saved_id = new_hw.id
contest.id = saved_id
contest.title = contest.title + ' - 复制'
contest.save()
for p in problem_list:
contest.contestproblem_set.create(identifier=p.identifier, problem_id=p.problem_id, weight=p.weight)
for c in contest_author:
contest.authors.add(c.id) # 复制出题人
contest.managers.add(c.id) # 将出题人添加到该学期作业集的管理名单中
for m in contest_manager:
if m.polygon_enabled:
contest.managers.add(m.id) # 只把具有 Polygon 权限的教师添加到新学期作业集,排除上学期助教
contest.save()
except:
return redirect(reverse('polygon:contest_list'))
return redirect(reverse('polygon:contest_list') + "?exact=%d" % saved_id)
class ContestAccessManage(PolygonContestMixin, View):
def post(self, request, pk):
my_set = set(map(int, filter(lambda x: x, request.POST['admin'].split(','))))
self.contest.managers.clear()
for key in my_set:
self.contest.managers.add(User.objects.get(pk=key))
if self.request.user.pk not in my_set:
self.contest.managers.add(self.request.user)
return redirect(reverse('polygon:contest_meta', kwargs={'pk': str(pk)}))
class ContestAuthorsManage(PolygonContestMixin, View):
def post(self, request, pk):
my_set = set(map(int, filter(lambda x: x, request.POST['author'].split(','))))
self.contest.authors.clear()
for key in my_set:
self.contest.authors.add(User.objects.get(pk=key))
return redirect(reverse('polygon:contest_meta', kwargs={'pk': str(pk)}))
class ContestVolunteersManage(PolygonContestMixin, View):
def post(self, request, pk):
my_set = set(map(int, filter(lambda x: x, request.POST['volunteer'].split(','))))
self.contest.volunteers.clear()
for key in my_set:
self.contest.volunteers.add(User.objects.get(pk=key))
return redirect(reverse('polygon:contest_meta', kwargs={'pk': str(pk)}))
class ContestProblemManage(PolygonContestMixin, TemplateView):
template_name = 'polygon/contest/problem.jinja2'
def get(self, request, *args, **kwargs):
if 'data' in request.GET:
problems = self.contest.contest_problem_list
data = []
SUB_FIELDS = ["title", "id", "alias"]
for problem in problems:
d = {k: getattr(problem.problem, k) for k in SUB_FIELDS}
d.update(pid=problem.id, identifier=problem.identifier, weight=problem.weight)
d["user_ac"] = problem.ac_user_count
d["user_tot"] = problem.total_user_count
d["ac"] = problem.ac_count
d["tot"] = problem.total_count
d["user_ratio"] = problem.user_ratio
d["ratio"] = problem.ratio
data.append(d)
return HttpResponse(json.dumps(data))
return super(ContestProblemManage, self).get(request, *args, **kwargs)
class ContestProblemReorder(PolygonContestMixin, View):
def post(self, request, *args, **kwargs):
data = {k['pid']: index for (index, k) in enumerate(json.loads(request.POST['orders']))}
reorder_contest_problem_identifiers(self.contest, data)
return HttpResponse()
class ContestProblemCreate(PolygonContestMixin, View):
def post(self, request, pk):
def get_next_identifier(identifiers):
from collections import deque
q = deque()
q.append('')
while q:
u = q.popleft()
if u and u not in identifiers:
return u
for i in range(ord('A'), ord('Z') + 1):
q.append(u + chr(i))
problems = list(filter(lambda x: x, map(lambda x: x.strip(), request.POST['problems'].split(','))))
for problem in problems:
if not Problem.objects.filter(id=problem, visible=True).exists() and \
not is_admin_or_root(request.user) and \
not request.user.managing_problems.filter(id=problem).exists():
raise PermissionDenied
for problem in problems:
if self.contest.contestproblem_set.filter(problem_id=problem).exists():
continue
identifier = get_next_identifier([x.identifier for x in self.contest.contestproblem_set.all()])
self.contest.contestproblem_set.create(problem_id=problem, identifier=identifier)
reorder_contest_problem_identifiers(self.contest)
return HttpResponse()
class ContestProblemDelete(PolygonContestMixin, View):
def post(self, request, pk):
self.contest.contestproblem_set.filter(id=request.POST['pid']).delete()
reorder_contest_problem_identifiers(self.contest)
return HttpResponse()
class ContestProblemChangeWeight(PolygonContestMixin, View):
def post(self, request, pk):
problem = self.contest.contestproblem_set.get(id=request.POST['pid'])
problem.weight = int(request.POST['weight'])
assert 0 < problem.weight <= 10000
problem.save(update_fields=['weight'])
return HttpResponse()
class ContestProblemChangeIdentifier(PolygonContestMixin, View):
def post(self, request, pk):
problem = self.contest.contestproblem_set.get(id=request.POST['pid'])
problem.identifier = request.POST['identifier'].strip()
problem.save(update_fields=['identifier'])
return HttpResponse()
class ContestInvitationList(PolygonContestMixin, ListView):
template_name = 'polygon/contest/invitation.jinja2'
paginate_by = 100
context_object_name = 'invitation_list'
def get_queryset(self):
return Contest.objects.get(pk=self.kwargs.get('pk')).contestinvitation_set.all()
def get_context_data(self, **kwargs):
data = super(ContestInvitationList, self).get_context_data(**kwargs)
data['contest'] = Contest.objects.get(pk=self.kwargs.get('pk'))
return data
class ContestInvitationCreate(PolygonContestMixin, View):
@staticmethod
def _create(contest, comments):
random_gen = shortuuid.ShortUUID()
ContestInvitation.objects.bulk_create(
[ContestInvitation(contest=contest, code=random_gen.random(12), comment=comment) for comment in comments])
def post(self, request, pk):
try:
comments = [''] * int(request.POST['number'])
except KeyError:
comments = list(filter(lambda x: x, map(lambda x: x.strip(), request.POST['list'].split('\n'))))
contest = Contest.objects.get(pk=pk)
self._create(contest, comments)
return HttpResponseRedirect(request.POST['next'])
class ContestInvitationDelete(PolygonContestMixin, View):
def post(self, request, pk, invitation_pk):
contest = Contest.objects.get(pk=pk)
contest.contestinvitation_set.get(pk=invitation_pk).delete()
return redirect(reverse('polygon:contest_invitation', kwargs={'pk': contest.pk}))
class ContestInvitationAssign(PolygonContestMixin, View):
def post(self, request, pk, invitation_pk):
username = request.POST.get('username')
try:
user = User.objects.get(username=username)
add_participant_with_invitation(pk, invitation_pk, user)
messages.success(request,
'The user <strong>%s</strong> has been successfully added to the contest.' % username)
except User.DoesNotExist:
messages.error(request, 'The user <strong>%s</strong> does not exist. Please check again.' % username)
except IntegrityError:
messages.error(request, 'You cannot add one user twice.')
return HttpResponseRedirect(request.POST['next'])
class ContestParticipantList(PolygonContestMixin, ListView):
template_name = 'polygon/contest/participant.jinja2'
paginate_by = 100
context_object_name = 'participant_list'
def get_queryset(self):
return Contest.objects.get(pk=self.kwargs.get('pk')).contestparticipant_set.select_related('user').all()
def get_context_data(self, **kwargs):
data = super(ContestParticipantList, self).get_context_data(**kwargs)
data['contest'] = Contest.objects.get(pk=self.kwargs.get('pk'))
return data
class ContestParticipantCommentUpdate(PolygonContestMixin, View):
def post(self, request, pk, participant_pk):
comment = request.POST.get('comment')
with transaction.atomic():
participant = ContestParticipant.objects.select_for_update().get(pk=participant_pk)
participant.comment = comment
participant.save(update_fields=["comment"])
return HttpResponseRedirect(request.POST['next'])
class ContestParticipantStarToggle(PolygonContestMixin, View):
def post(self, request, pk, participant_pk):
with transaction.atomic():
participant = Contest.objects.get(pk=pk).contestparticipant_set.select_for_update().get(pk=participant_pk)
participant.star = True if not participant.star else False
participant.save(update_fields=["star"])
return HttpResponse()
class ContestParticipantStarListToggle(PolygonContestMixin, View):
def post(self, request, pk):
with transaction.atomic():
lst = list(filter(lambda x: x, map(lambda x: x.strip(), request.POST['list'].split('\n'))))
for x in lst:
if not x.isdigit():
raise PermissionDenied
Contest.objects.get(pk=pk).contestparticipant_set.filter(user_id__in=lst). \
select_for_update().update(star=True)
return HttpResponse()
class ContestParticipantClearIP(PolygonContestMixin, View):
def post(self, request, pk, participant_pk):
with transaction.atomic():
participant = Contest.objects.get(pk=pk).contestparticipant_set.select_for_update().get(pk=participant_pk)
participant.ip_address = None
participant.save(update_fields=["ip_address"])
return HttpResponse()
class ContestParticipantCreate(PolygonContestMixin, View):
@staticmethod
def _get_username(contest_id, user_id):
return "c%s#%04d" % (str(contest_id), int(user_id))
def post(self, request, pk):
namelist = list(filter(lambda x: x, map(lambda x: x.strip(), request.POST['list'].split('\n'))))
user_id = 1
contest = Contest.objects.get(pk=pk)
for name in namelist:
if name.startswith('*'):
comment = name[1:].strip()
star = True
else:
comment = name
star = False
password_gen = shortuuid.ShortUUID("23456789ABCDEF")
password = password_gen.random(8)
while True:
try:
username = self._get_username(pk, user_id)
email = '%s@fake.ecnu.edu.cn' % username
user = User.objects.create(username=username, email=email)
user.set_password(password)
user.save()
user.avatar.save('generated.png', Identicon(user.email).get_bytes())
ContestParticipant.objects.create(user=user, comment=comment, hidden_comment=password,
star=star, contest=contest)
break
except IntegrityError:
pass
user_id += 1
invalidate_contest(contest)
return HttpResponseRedirect(request.POST['next'])
class ContestClarificationAnswer(PolygonContestMixin, View):
def post(self, request, pk, clarification_id):
clarification = ContestClarification.objects.get(pk=clarification_id)
clarification.answer = request.POST['answer']
clarification.save(update_fields=["status", "answer"])
return HttpResponseRedirect(reverse('contest:clarification', kwargs={'cid': pk}))
class RejudgeContestProblemSubmission(PolygonContestMixin, View):
def post(self, request, pk):
my_problem = request.POST['problem']
if my_problem == 'all':
rejudge_all_submission_on_contest(self.contest)
else:
rejudge_all_submission_on_contest_problem(self.contest, get_object_or_404(Problem, pk=my_problem))
return redirect(reverse('polygon:contest_status', kwargs={'pk': self.contest.id}))
class ContestSystemTestView(PolygonContestMixin, View):
def post(self, request, pk):
# almost same as rejudge
if self.contest.scoring_method in ["subtask", "oi"]:
submission_set = self.contest.submission_set.all().order_by("create_time")
else:
# status private?
submission_set = self.contest.submission_set.filter(status__in=[SubmissionStatus.ACCEPTED,
SubmissionStatus.JUDGING,
SubmissionStatus.WAITING,
SubmissionStatus.SUBMITTED,
SubmissionStatus.PRETEST_PASSED]) \
.order_by("create_time")
if len(submission_set) > 0:
Thread(target=rejudge_submission_set, args=(submission_set,)).start()
return redirect(reverse('polygon:contest_status', kwargs={'pk': self.contest.id}))
class ContestStatusBackend(PolygonContestMixin, StatusList):
template_name = 'polygon/contest/status.jinja2'
contest_submission_visible = True
privileged = True
def get_selected_from(self):
return self.contest.submission_set.all()
def reinterpret_problem_identifier(self, value):
return self.contest.contestproblem_set.get(identifier=value).problem_id
def get_context_data(self, **kwargs):
data = super(ContestStatusBackend, self).get_context_data(**kwargs)
data['show_percent'] = self.contest.scoring_method in ('oi',)
self.contest.add_contest_problem_to_submissions(data['submission_list'])
return data
class ContestInvitationCodeDownload(PolygonContestMixin, View):
def get(self, request, pk):
data = [[user.comment, user.code] for user in self.contest.contestinvitation_set.all()]
filename = write_csv(data)
return respond_generate_file(request, filename, file_name_serve_as="InvitationCode - %s.csv" % self.contest.title)
class ContestParticipantsNoteDownload(PolygonContestMixin, View):
def get(self, request, pk):
data = [[user.comment, user.user.username, user.hidden_comment] for user in
self.contest.contestparticipant_set.select_related("user").all()]
filename = write_csv(data)
return respond_generate_file(request, filename,
file_name_serve_as="ContestParticipant - %s.csv" % self.contest.title)
class ContestAccountDisable(PolygonContestMixin, View):
def post(self, request, pk, participant_pk):
contest_participant = get_object_or_404(ContestParticipant, pk=participant_pk)
contest_participant.is_disabled = not contest_participant.is_disabled
contest_participant.save(update_fields=['is_disabled'])
return JsonResponse({})
class ContestParticipantAutoStarView(PolygonContestMixin, View):
def post(self, request, pk):
t = request.GET.get('type')
for participant in self.contest.contestparticipant_set.all():
starred = False
if t == "nosub":
starred = not self.contest.submission_set.filter(author_id=participant.user_id).exists()
elif t == "nologin":
starred = participant.user.last_login is None
if starred:
participant.star = True
participant.save(update_fields=['star'])
return redirect(reverse('polygon:contest_participant', kwargs={"pk": self.contest.id}))
class ContestParticipantFromActivity(PolygonContestMixin, View):
def post(self, request, pk):
if request.POST.get("answer") and is_admin_or_root(self.request.user):
activity = get_object_or_404(Activity, pk=request.POST["answer"])
with transaction.atomic():
for participant in activity.activityparticipant_set.filter(is_deleted=False):
p, _ = self.contest.contestparticipant_set.get_or_create(user=participant.user)
if not p.comment:
p.comment = participant.real_name
p.save(update_fields=['comment'])
return redirect(reverse('polygon:contest_participant', kwargs={"pk": self.contest.id}))
class ContestGhostRecordImport(PolygonContestMixin, FormView):
form_class = TestSysUploadForm
template_name = 'polygon/contest/ghost_import.jinja2'
def parse_line(self, line):
if line.startswith("@"):
try:
directive, content = re.split(r'\s+', line[1:], 1)
ret, cur = [], ''
quote_count = False
for token in content:
if token == ',' and not quote_count:
ret.append(cur.strip())
cur = ''
elif token == '"':
quote_count = not quote_count
else:
cur += token
assert not quote_count
ret.append(cur.strip())
return (directive.lower(), ret)
except:
return None
return None
def form_valid(self, form):
line_counter = 0
verdict_map = {
"OK": SubmissionStatus.ACCEPTED,
"WA": SubmissionStatus.WRONG_ANSWER,
"RT": SubmissionStatus.RUNTIME_ERROR,
"TL": SubmissionStatus.TIME_LIMIT_EXCEEDED,
"ML": SubmissionStatus.MEMORY_LIMIT_EXCEEDED,
"CE": SubmissionStatus.COMPILE_ERROR,
"RJ": SubmissionStatus.REJECTED,
}
try:
participant_count = 0
team_name_map = dict()
team_id_map = dict()
submissions = []
for line in form.cleaned_data["testsys_log"].split("\n"):
line_counter += 1
parse_result = self.parse_line(line)
if parse_result is None:
continue
directive, args = parse_result
logger.info("%s %s", directive, args)
if directive == "teams":
participant_count = int(args[0])
if participant_count >= 10000:
raise Exception("队伍数量过多。")
elif directive == "t":
team_number = args[0]
if len(team_name_map) >= participant_count:
raise Exception("队伍 %s 已经超出了场地容量 %d。" % (team_number, participant_count))
if team_number in team_name_map:
raise Exception("队伍 %s 重复注册了。")
l = len(team_name_map) + 1
team_name_map[team_number] = (args[3], l)
elif directive == "s":
team_number = args[0]
time_seconds = int(args[3])
verdict = verdict_map[args[4]]
if team_number not in team_name_map:
raise Exception("队伍 %s 没有注册过。" % team_number)
if not 0 <= time_seconds <= int(round(self.contest.length.total_seconds())):
raise Exception("提交时间 %d 不在范围内。" % time_seconds)
candidate_problem_list = [x for x in self.contest.contest_problem_list if x.identifier == args[1]]
if len(candidate_problem_list) != 1:
raise Exception("'%s' 没找到或者找到了多个相应的题目。" % args[1])
problem_id = candidate_problem_list[0].problem_id
submit_time = self.contest.start_time + timedelta(seconds=time_seconds)
submissions.append((team_number, problem_id, submit_time, verdict))
if len(team_name_map) != participant_count:
raise Exception("队伍信息与队伍数量不一致。")
if participant_count == 0 or len(submissions) == 0:
raise Exception("没有参赛者或没有提交。")
with transaction.atomic():
user_ids = list(User.objects.filter(username__icontains='ghost#').values_list("id", flat=True))
self.contest.contestparticipant_set.filter(user_id__in=user_ids).delete()
self.contest.submission_set.filter(author_id__in=user_ids).delete()
for team_name, team_id in team_name_map.values():
user, _ = User.objects.get_or_create(username=self.get_ghost_username(team_id),
email=self.get_ghost_username(team_id) + "@ghost.ecnu.edu.cn")
user.set_unusable_password()
user.save()
team_id_map[team_id] = user.id
self.contest.contestparticipant_set.create(user=user, comment=team_name)
for team_number, problem_id, submit_time, verdict in submissions:
_, team_id = team_name_map[team_number]
s = self.contest.submission_set.create(author_id=team_id_map[team_id],
status=verdict, problem_id=problem_id,
contest_time=submit_time - self.contest.start_time,
lang=None, status_time=None,
status_percent=100 if verdict == SubmissionStatus.ACCEPTED else 0)
s.create_time = submit_time
s.save(update_fields=["create_time"])
return redirect(reverse('polygon:contest_status', kwargs={'pk': self.contest.pk}))
except Exception as e:
messages.error(self.request, "在第 %d 行有错误:" % line_counter + str(e))
return redirect(self.request.path)
def get_ghost_username(self, num):
return "ghost#%04d" % num
|
highlights.py | """Highlights API handler."""
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib'))
import json
import time
from time import sleep
import requests
import threading
try:
from gateway_addon import APIHandler, APIResponse, Adapter, Device, Property, Database
#print("succesfully loaded APIHandler and APIResponse from gateway_addon")
except:
print("Import APIHandler and APIResponse from gateway_addon failed. Use at least WebThings Gateway version 0.10")
sys.exit(1)
_TIMEOUT = 3
_CONFIG_PATHS = [
os.path.join(os.path.expanduser('~'), '.webthings', 'config'),
]
if 'WEBTHINGS_HOME' in os.environ:
_CONFIG_PATHS.insert(0, os.path.join(os.environ['WEBTHINGS_HOME'], 'config'))
class HighlightsAPIHandler(APIHandler):
"""Highlights API handler."""
def __init__(self, verbose=False):
"""Initialize the object."""
#print("INSIDE API HANDLER INIT")
self.addon_name = 'highlights'
self.running = True
self.api_server = 'http://127.0.0.1:8080'
self.DEV = True
self.DEBUG = False
self.things = [] # Holds all the things, updated via the API. Used to display a nicer thing name instead of the technical internal ID.
self.data_types_lookup_table = {}
self.token = None
self.clock_delay = 1 # how many seconds between updating in the clock loop
# LOAD CONFIG
try:
self.add_from_config()
except Exception as ex:
print("Error loading config: " + str(ex))
# Temporary moving of persistence files
try:
old_location = os.path.join(os.path.expanduser('~'), '.mozilla-iot.old', 'data', self.addon_name,'persistence.json')
new_location = os.path.join(os.path.expanduser('~'), '.webthings', 'data', self.addon_name,'persistence.json')
if os.path.isfile(old_location) and not os.path.isfile(new_location):
print("moving persistence file to new location: " + str(new_location))
os.rename(old_location, new_location)
except Exception as ex:
print("Error copying old persistence file to new location: " + str(ex))
# Paths
# Get persistent data
try:
#print("self.user_profile['dataDir'] = " + str(self.user_profile))
self.persistence_file_path = os.path.join(self.user_profile['dataDir'], self.addon_name, 'persistence.json')
except:
try:
if self.DEBUG:
print("setting persistence file path failed, will try older method.")
self.persistence_file_path = os.path.join(os.path.expanduser('~'), '.webthings', 'data', self.addon_name,'persistence.json')
except:
if self.DEBUG:
print("Double error making persistence file path")
self.persistence_file_path = "/home/pi/.webthings/data/" + self.addon_name + "/persistence.json"
if self.DEBUG:
print("Current working directory: " + str(os.getcwd()))
first_run = False
try:
with open(self.persistence_file_path) as f:
self.persistent_data = json.load(f)
if self.DEBUG:
print("Persistence data was loaded succesfully.")
except:
first_run = True
print("Could not load persistent data (if you just installed the add-on then this is normal)")
self.persistent_data = {'items':[]}
if self.DEBUG:
print("Highlights self.persistent_data is now: " + str(self.persistent_data))
try:
self.adapter = HighlightsAdapter(self,verbose=False)
#self.manager_proxy.add_api_handler(self.extension)
#print("ADAPTER created")
except Exception as e:
print("Failed to start ADAPTER. Error: " + str(e))
# Is there user profile data?
#try:
# print(str(self.user_profile))
#except:
# print("no user profile data")
if self.token == None:
if 'token' in self.persistent_data:
self.token = self.persistent_data['token']
# Get complete things dictionary via API
try:
self.things = self.api_get("/things")
print("Did the things API call. Self.things is now:")
print(str(self.things))
except Exception as ex:
print("Error getting updated things data via API: " + str(ex))
# Intiate extension addon API handler
try:
manifest_fname = os.path.join(
os.path.dirname(__file__),
'..',
'manifest.json'
)
with open(manifest_fname, 'rt') as f:
manifest = json.load(f)
APIHandler.__init__(self, manifest['id'])
self.manager_proxy.add_api_handler(self)
if self.DEBUG:
print("self.manager_proxy = " + str(self.manager_proxy))
print("Created new API HANDLER: " + str(manifest['id']))
except Exception as e:
print("Failed to init UX extension API handler: " + str(e))
# Respond to gateway version
try:
if self.DEBUG:
print(self.gateway_version)
except:
print("self.gateway_version did not exist")
# Start the internal clock
if self.DEBUG:
print("Starting the internal clock")
try:
t = threading.Thread(target=self.clock)
t.daemon = True
t.start()
except:
print("Error starting the clock thread")
# Read the settings from the add-on settings page
def add_from_config(self):
"""Attempt to read config data."""
try:
database = Database(self.addon_name)
if not database.open():
print("Could not open settings database")
return
config = database.load_config()
database.close()
except:
print("Error! Failed to open settings database.")
if not config:
print("Error loading config from database")
return
# Api token
try:
if 'Authorization token' in config:
if str(config['Authorization token']) != "":
self.token = str(config['Authorization token'])
print("-Authorization token is present in the config data.")
except:
print("Error loading api token from settings")
if 'Frequency' in config:
self.clock_delay = int(config['Frequency'])
if self.DEBUG:
print("Frequency was in config: " + str(config['Frequency']))
if 'Debugging' in config:
self.DEBUG = bool(config['Debugging'])
if self.DEBUG:
print("-Debugging preference was in config: " + str(self.DEBUG))
#
# CLOCK
#
def clock(self):
#print("in clock thread. self.DEBUG: " + str(self.DEBUG))
#print(str(self.persistent_data['items']))
""" Runs every second """
while self.running:
time.sleep(self.clock_delay)
#print("tick")
try:
for item in self.persistent_data['items']:
#print(str(item))
if 'thing1' in item and 'property1' in item and 'thing1_atype' in item and 'property1_atype' in item and 'enabled' in item:
#print("everything is there")
if bool(item['enabled']) == False:
#print("not enabled, skipping")
continue
try:
#print("calling api")
api_get_result = self.api_get( '/things/' + str(item['thing1']) + '/properties/' + str(item['property1']))
#print("api_get_result = " + str(api_get_result))
key = list(api_get_result.keys())[0]
except:
if self.DEBUG:
print("error calling api or parsing the returned json")
#continue
try:
if key == "error":
if api_get_result[key] == 500:
if self.DEBUG:
print("API GET failed (500 - thing not currently connected)")
time.sleep(.5)
#pass
#return
else:
#print("API GET was succesfull")
original_value = api_get_result[key]
if self.DEBUG:
print("got this original value from API: " + str(original_value))
if original_value == "" or original_value == None:
#print("original value is an empty string.") # this happens if the gateway has just been rebooted, and the property doesn not have a value yet.
continue
if 'previous_value' not in item:
item['previous_value'] = None
try:
if str(item['previous_value']) != str(original_value):
#print("new value")
#print("get_devices = " + str(self.adapter.get_devices()))
targetDevice = self.adapter.get_device( "highlights-" + str(item['thing1']) + "-" + str(item['property1']) ) # targetDevice will be 'None' if it wasn't found.
if targetDevice == None:
if self.DEBUG:
print("target device did not exist yet: " + str(item['thing1']) )
# figure out optimal thing and property type
try:
for thing in self.things:
thing_id = str(thing['id'].rsplit('/', 1)[-1])
#print("__id: " + str(thing_id))
if str(item['thing1']) == thing_id:
#print("thing1 was thing_id at: " + str(thing_id))
#print("thing['properties'] = " + str(thing['properties']))
for thing_property_key in thing['properties']:
property_id = None
use_forms = False
#property_id = thing['properties'][thing_property_key]['links'][0]['href'].rsplit('/', 1)[-1]
try:
found_links = False
if 'links' in thing['properties'][thing_property_key]:
if len(thing['properties'][thing_property_key]['links']) > 0:
property_id = thing['properties'][thing_property_key]['links'][0]['href'].rsplit('/', 1)[-1]
found_links = True
if found_links == False:
if 'forms' in thing['properties'][thing_property_key]:
if len(thing['properties'][thing_property_key]['forms']) > 0:
use_forms = True
property_id = thing['properties'][thing_property_key]['forms'][0]['href'].rsplit('/', 1)[-1]
except Exception as ex:
if self.DEBUG:
print("Error extracting links/forms again: " + str(ex))
continue
if self.DEBUG:
print("property_id = " + str(property_id))
#property_id = thing['properties'][thing_property_key]['links'][0]['href'].rsplit('/', 1)[-1]
if str(item['property1']) == property_id:
#print("full property: " + str(thing['properties'][thing_property_key]))
#print("___type: " + str(thing['properties'][thing_property_key]['type']))
#if '@type' in thing['properties'][thing_property_key]:
# print("___atype: " + str(thing['properties'][thing_property_key]['@type']))
clone_property = thing['properties'][thing_property_key].copy()
clone_property['@type'] = item['property1_atype']
if use_forms:
if not clone_property['forms'][0]['href'].startswith("/things/highlights-"):
new_href = clone_property['forms'][0]['href'].replace("/things/","/things/highlights-")
clone_property['forms'][0]['href'] = new_href
else:
if not clone_property['links'][0]['href'].startswith("/things/highlights-"):
new_href = clone_property['links'][0]['href'].replace("/things/","/things/highlights-")
clone_property['links'][0]['href'] = new_href
new_thing_title = "highlights " + str(item['thing1'])
if 'title' in thing:
new_thing_title = "highlights " + str(thing['title'])
elif 'label' in thing:
new_thing_title = "highlights " + str(thing['label'])
new_thing_id = "highlights-" + str(item['thing1']) + '-' + str(item['property1'])
device = HighlightsDevice(self, self.adapter, new_thing_id, new_thing_title, item['thing1_atype']) # note that it's wrapped in an array
self.adapter.handle_device_added(device)
targetDevice = self.adapter.get_device(new_thing_id)
if targetDevice != None:
if self.DEBUG:
print("Creating cloned property. It's original_value will be: " + str(original_value))
targetDevice.properties[ str(item['property1']) ] = HighlightsProperty(
targetDevice,
str(item['property1']),
clone_property,
original_value,
str(item['thing1']),
str(item['property1']) )
targetProperty = targetDevice.find_property( str(item['property1']) )
self.adapter.handle_device_added(device)
targetProperty.update(original_value)
targetDevice.notify_property_changed(targetProperty)
else:
if self.DEBUG:
print("Error: the target device was still None after just creating it.")
except Exception as ex:
if self.DEBUG:
print("Error creating new thing: " + str(ex))
try:
if targetDevice != None:
targetProperty = targetDevice.find_property( str(item['property1']) )
if targetProperty != None:
targetProperty.update(original_value)
else:
if self.DEBUG:
print("Error: missing property wasn't created?")
else:
if self.DEBUG:
print("Error: missing device wasn't created?")
except Exception as ex:
if self.DEBUG:
print("Error updating property: " + str(ex))
except Exception as ex:
if self.DEBUG:
print("Error finding and updating property: " + str(ex))
continue
except Exception as ex:
if self.DEBUG:
print("Error putting via API: " + str(ex))
else:
if self.DEBUG:
print("missing values in item")
except Exception as ex:
if self.DEBUG:
print("Clock error: " + str(ex))
#
# HANDLE REQUEST
#
def handle_request(self, request):
"""
Handle a new API request for this handler.
request -- APIRequest object
"""
try:
if request.method != 'POST':
return APIResponse(status=404)
if request.path == '/init' or request.path == '/update_items':
try:
if request.path == '/init':
if self.DEBUG:
print("Getting the initialisation data")
try:
state = 'ok'
if 'jwt' in request.body:
if request.body['jwt'] != '' and request.body['jwt'] != None:
self.token = request.body['jwt']
self.persistent_data['token'] = request.body['jwt']
self.save_persistent_data()
# Check if a token is present
if self.token == None:
state = 'This addon requires an authorization token to work. Visit the settings page of this addon to learn more.'
return APIResponse(
status=200,
content_type='application/json',
content=json.dumps({'state' : state, 'items' : self.persistent_data['items']}),
)
except Exception as ex:
print("Error getting init data: " + str(ex))
return APIResponse(
status=500,
content_type='application/json',
content=json.dumps({'state' : "Internal error: no thing data", 'items' : []}),
)
elif request.path == '/update_items':
try:
self.persistent_data['items'] = request.body['items']
#print("")
# Get all the things via the API.
try:
self.things = self.api_get("/things")
#print("Did the things API call")
except Exception as ex:
if self.DEBUG:
print("Error getting updated things data via API: " + str(ex))
# try to get the correct property type (integer/float)
try:
for item in self.persistent_data['items']:
#print("_item: " + str(item))
if 'thing1' in item and 'property1' in item:
try:
for thing in self.things:
thing_id = str(thing['id'].rsplit('/', 1)[-1])
if str(item['thing1']) == thing_id:
#print("__id SPOTTED: " + str(thing_id))
try:
#print("thing = " + str(thing))
potential_atype = None
#print("Thing = " + str(thing))
#if '@type' in thing:
try:
if hasattr(thing, '@type'):
#print("existing @type spotted in thing")
if len(thing['@type']) == 1:
potential_atype = str(thing['@type'][0])
except Exception as ex:
if self.DEBUG:
print("Error checking for @type in thing: " + str(ex))
#print("thing['properties'] = " + str(thing['properties']))
for thing_property_key in thing['properties']:
property_id = None
#property_id = thing['properties'][thing_property_key]['links'][0]['href'].rsplit('/', 1)[-1]
try:
found_links = False
if 'links' in thing['properties'][thing_property_key]:
if len(thing['properties'][thing_property_key]['links']) > 0:
property_id = thing['properties'][thing_property_key]['links'][0]['href'].rsplit('/', 1)[-1]
found_links = True
if found_links == False:
if 'forms' in thing['properties'][thing_property_key]:
if len(thing['properties'][thing_property_key]['forms']) > 0:
property_id = thing['properties'][thing_property_key]['forms'][0]['href'].rsplit('/', 1)[-1]
if self.DEBUG:
print("property_id = " + str(property_id))
except Exception as ex:
if self.DEBUG:
print("Error extracting links/forms: " + str(ex))
continue
if self.DEBUG:
print("property_id = " + str(property_id))
if str(item['property1']) == property_id:
try:
#print("full property: " + str(thing['properties'][thing_property_key]))
done = False
item['property1_type'] = str(thing['properties'][thing_property_key]['type'])
if '@type' in thing['properties'][thing_property_key]:
if potential_atype != None:
# Cloning a property that already has a capability
#print("___atype: " + str(thing['properties'][thing_property_key]['@type']))
item['thing1_atype'] = potential_atype
item['property1_atype'] = str(thing['properties'][thing_property_key]['@type'])
done = True
else:
atype = str(thing['properties'][thing_property_key]['@type'])
if atype == 'AlarmProperty':
item['thing1_atype'] = 'Alarm'
item['property1_atype'] = atype
done = True
elif atype == 'OpenProperty':
item['thing1_atype'] = 'DoorSensor'
item['property1_atype'] = atype
done = True
elif atype == 'LockedProperty':
item['thing1_atype'] = 'Lock'
item['property1_atype'] = atype
done = True
elif atype == 'MotionProperty':
item['thing1_atype'] = 'MotionSensor'
item['property1_atype'] = atype
done = True
#elif atype == 'BrightnessProperty': # doesn't work, creates a boolean
# item['thing1_atype'] = 'Light'
# item['property1_atype'] = atype
# done = True
# Todo: we could look up the corresponding thing @type if a property @type is provided.
if done == False:
# here we figure out a fitting capabiliy if the property doesn't have one yet. This is required for it to show up as the highlighted property.
if item['property1_type'] == 'number' or item['property1_type'] == 'integer':
item['property1_atype'] = 'LevelProperty'
item['thing1_atype'] = 'MultiLevelSensor'
if 'unit' in thing['properties'][thing_property_key]:
if thing['properties'][thing_property_key]['unit'].lower() == 'watt': # or thing['properties'][thing_property_key]['unit'].lower() == 'kwh':
if self.DEBUG:
print("spotted a kwh or watt")
# technically using kwh is wrong here. But hey, I want that icon!
item['property1_atype'] = 'InstantaneousPowerProperty'
item['thing1_atype'] = 'EnergyMonitor'
if item['property1_type'] == 'boolean':
item['property1_atype'] = 'OnOffProperty'
item['thing1_atype'] = 'OnOffSwitch'
if 'readOnly' in thing['properties'][thing_property_key]:
if bool(thing['properties'][thing_property_key]['readOnly']) == True:
item['property1_atype'] = 'BooleanProperty'
item['thing1_atype'] = 'BinarySensor'
if self.DEBUG:
print("item['thing1_atype'] has been deduced as: " + str(item['thing1_atype']))
print("item['property1_atype'] has been deduced as: " + str(item['property1_atype']))
except Exception as ex:
if self.DEBUG:
print("Error while analysing properties: " + str(ex))
except Exception as ex:
if self.DEBUG:
print("Error while checking for @type in thing: " + str(ex))
except Exception as ex:
if self.DEBUG:
print("Error while while looping over things: " + str(ex))
continue
except Exception as ex:
if self.DEBUG:
print("Error finding if property should be int or float: " + str(ex))
self.save_persistent_data()
return APIResponse(
status=200,
content_type='application/json',
content=json.dumps({'state' : 'ok'}),
)
except Exception as ex:
if self.DEBUG:
print("Error saving updated items: " + str(ex))
return APIResponse(
status=500,
content_type='application/json',
content=json.dumps("Error updating items: " + str(ex)),
)
else:
return APIResponse(
status=500,
content_type='application/json',
content=json.dumps("API error"),
)
except Exception as ex:
if self.DEBUG:
print("Init issue: " + str(ex))
return APIResponse(
status=500,
content_type='application/json',
content=json.dumps("Error in API handler"),
)
else:
return APIResponse(status=404)
except Exception as e:
if self.DEBUG:
print("Failed to handle UX extension API request: " + str(e))
return APIResponse(
status=500,
content_type='application/json',
content=json.dumps("API Error"),
)
def unload(self):
self.running = False
if self.DEBUG:
print("Highlights api handler shutting down")
#def cancel_pairing(self):
# """Cancel the pairing process."""
# Get all the things via the API.
# try:
# self.things = self.api_get("/things")
# #print("Did the things API call")
# except Exception as ex:
# print("Error, couldn't load things at init: " + str(ex))
#
# API
#
def api_get(self, api_path,intent='default'):
"""Returns data from the WebThings Gateway API."""
if self.DEBUG:
print("GET PATH = " + str(api_path))
#print("intent in api_get: " + str(intent))
#print("GET TOKEN = " + str(self.token))
if self.token == None:
print("API GET: PLEASE ENTER AN AUTHORIZATION CODE IN THE SETTINGS PAGE")
return []
try:
r = requests.get(self.api_server + api_path, headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer ' + str(self.token),
}, verify=False, timeout=5)
if self.DEBUG:
print("API GET: " + str(r.status_code) + ", " + str(r.reason))
if r.status_code != 200:
if self.DEBUG:
print("API returned a status code that was not 200. It was: " + str(r.status_code))
return {"error": str(r.status_code)}
else:
to_return = r.text
try:
if self.DEBUG:
print("api_get: received: " + str(r))
#for prop_name in r:
# print(" -> " + str(prop_name))
if not '{' in r.text:
if self.DEBUG:
print("api_get: response was not json (gateway 1.1.0 does that). Turning into json...")
if 'things/' in api_path and '/properties/' in api_path:
if self.DEBUG:
print("properties was in api path: " + str(api_path))
likely_property_name = api_path.rsplit('/', 1)[-1]
to_return = {}
to_return[ likely_property_name ] = json.loads(r.text)
if self.DEBUG:
print("returning fixed: " + str(to_return))
return to_return
except Exception as ex:
print("api_get_fix error: " + str(ex))
if self.DEBUG:
print("returning without 1.1.0 fix")
return json.loads(r.text)
except Exception as ex:
print("Error doing http request/loading returned json: " + str(ex))
return {"error": 500}
def api_put(self, api_path, json_dict, intent='default'):
"""Sends data to the WebThings Gateway API."""
try:
if self.DEBUG:
print("PUT > api_path = " + str(api_path))
print("PUT > json dict = " + str(json_dict))
print("PUT > self.api_server = " + str(self.api_server))
print("PUT > intent = " + str(intent))
print("self.gateway_version: " + str(self.gateway_version))
simplified = False
property_was = None
if self.gateway_version != "1.0.0":
if 'things/' in api_path and '/properties/' in api_path:
if self.DEBUG:
print("PUT: properties was in api path: " + str(api_path))
for bla in json_dict:
property_was = bla
simpler_value = json_dict[bla]
json_dict = simpler_value
#simpler_value = [elem[0] for elem in json_dict.values()]
if self.DEBUG:
print("simpler 1.1.0 value to put: " + str(simpler_value))
simplified = True
#likely_property_name = api_path.rsplit('/', 1)[-1]
#to_return = {}
except Exception as ex:
print("Error preparing PUT: " + str(ex))
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer {}'.format(self.token),
}
try:
r = requests.put(
self.api_server + api_path,
json=json_dict,
headers=headers,
verify=False,
timeout=5
)
if self.DEBUG:
print("API PUT: " + str(r.status_code) + ", " + str(r.reason))
print("PUT returned: " + str(r.text))
if r.status_code != 200:
if self.DEBUG:
print("Error communicating: " + str(r.status_code))
return {"error": str(r.status_code)}
else:
if simplified:
return_value = {property_was:json.loads(r.text)} # json.loads('{"' + property_was + '":' + r.text + '}')
else:
return_value = json.loads(r.text)
return_value['succes'] = True
return return_value
except Exception as ex:
print("Error doing http request/loading returned json: " + str(ex))
return {"error": 500}
#
# Delete a thing
#
def delete_thing(self, device_id):
if self.DEBUG:
print("Deleting a highlighted thing")
try:
for i in range(len(self.persistent_data['items'])):
if 'thing1' in self.persistent_data['items'][i]:
if str(device_id) == 'highlights-' + str(self.persistent_data['items'][i]['thing1']):
del self.persistent_data['items'][i]
self.save_persistent_data()
break
except Exception as ex:
print("Error removing highligh thing from persistence data: " + str(ex))
#
# SAVE TO PERSISTENCE
#
def save_persistent_data(self):
#if self.DEBUG:
print("Saving to persistence data store at path: " + str(self.persistence_file_path))
try:
if not os.path.isfile(self.persistence_file_path):
open(self.persistence_file_path, 'a').close()
if self.DEBUG:
print("Created an empty persistence file")
#else:
# if self.DEBUG:
# print("Persistence file existed. Will try to save to it.")
with open(self.persistence_file_path) as f:
if self.DEBUG:
print("saving persistent data: " + str(self.persistent_data))
json.dump( self.persistent_data, open( self.persistence_file_path, 'w+' ) )
return True
except Exception as ex:
print("Error: could not store data in persistent store: " + str(ex) )
return False
def get_int_or_float(v):
number_as_float = float(v)
number_as_int = int(number_as_float)
if number_as_float == number_as_int:
return number_as_int
else:
return float( int( number_as_float * 1000) / 1000)
#
# ADAPTER
#
class HighlightsAdapter(Adapter):
"""Adapter that can hold and manage things"""
def __init__(self, api_handler, verbose=False):
"""
Initialize the object.
verbose -- whether or not to enable verbose logging
"""
self.api_handler = api_handler
self.name = self.api_handler.addon_name #self.__class__.__name__
#print("adapter name = " + self.name)
self.adapter_name = self.api_handler.addon_name #'Highlights-adapter'
Adapter.__init__(self, self.adapter_name, self.adapter_name, verbose=verbose)
self.DEBUG = self.api_handler.DEBUG
def remove_thing(self, device_id):
if self.DEBUG:
print("Removing highlight thing: " + str(device_id))
try:
self.api_handler.delete_thing(device_id)
obj = self.get_device(device_id)
self.handle_device_removed(obj) # Remove from device dictionary
except Exception as ex:
print("Could not remove thing from highligh adapter devices: " + str(ex))
#
# DEVICE
#
class HighlightsDevice(Device):
"""Highlight device type."""
def __init__(self, handler, adapter, device_name, device_title, device_type):
"""
Initialize the object.
adapter -- the Adapter managing this device
"""
Device.__init__(self, adapter, device_name)
#print("Creating Highlight thing")
self._id = device_name
self.id = device_name
self.adapter = adapter
self.handler = handler
self._type.append(device_type)
self.name = device_name
self.title = device_title
self.description = 'Highlight device'
#if self.adapter.DEBUG:
#print("Empty Highlight thing has been created. device_name = " + str(self.name))
#print("new thing's adapter = " + str(self.adapter))
#
# PROPERTY
#
class HighlightsProperty(Property):
"""Highlight property type."""
def __init__(self, device, name, description, value, original_thing_id, original_property_id):
Property.__init__(self, device, name, description)
self.original_thing_id = original_thing_id
self.original_property_id = original_property_id
self.device = device
self.name = name
self.title = name
self.description = description # dictionary
self.value = value
self.set_cached_value(value)
self.device.notify_property_changed(self)
def set_value(self, value):
#print("set_value is called on a Highlight property.")
#todo: call the API here using the provided endpoint, in order to sync the values.
try:
data_to_put = { str(self.original_property_id) : value }
#print("data_to_put = " + str(data_to_put))
api_put_result = self.device.handler.api_put( '/things/' + str(self.original_thing_id) + '/properties/' + str(self.original_property_id), data_to_put )
#print("api_put_result = " + str(api_put_result))
except Exception as ex:
print("property:set value:error: " + str(ex))
#pass
def update(self, value):
#print("highlight property -> update to: " + str(value))
#print("--prop details: " + str(self.title) + " - " + str(self.original_property_id))
#print("--pro device: " + str(self.device))
if value != self.value:
self.value = value
self.set_cached_value(value)
self.device.notify_property_changed(self)
|
peer.py | # ######################################
# TODO : SOROOSH NOORZAD - 99205372
# ######################################
# ######################################
# TODO : Import libraries
# ######################################
import os
import sys
import time
import json
import base64
import socket
import hashlib
import requests
import threading
import numpy as np
import pandas as pd
from os import walk
from random import randint
# below libraries probably can be removed from here
import datetime
import bencodepy # pip install bencode.py
# ######################################
# TODO : Classes and functions
# ######################################
from functions import *
def respond(client_socket):
while True:
received_msg = client_socket.recv(BUFFER_SIZE).decode(FORMAT)
if received_msg == 'connect':
client_socket.send(bytes("unchoke", FORMAT))
piece_number = client_socket.recv(BUFFER_SIZE).decode(FORMAT)
seeds = pd.read_csv(peer_seeds_DB, delimiter=',')
index_counts = len(seeds.index)
exist = False
for j in range(index_counts):
if int(piece_number) == seeds["number"][j]:
exist = True
if exist:
piece = seeds['piece'][int(piece_number)-1]
piece_size = len(piece)
client_socket.send(bytes(str(piece_size), FORMAT))
ok = client_socket.recv(BUFFER_SIZE).decode(FORMAT)
if ok == "ok":
bytes_to_send = [piece[i:i + BUFFER_SIZE] for i in range(0, len(piece), BUFFER_SIZE)]
for j in range(len(bytes_to_send)):
client_socket.send(bytes(str(bytes_to_send[j]), FORMAT))
else:
print('not ok!')
else:
print('not exist')
else:
print("not connect")
client_socket.close()
break
class SeedMode:
def __init__(self):
# INITIALISING :
# creating an INET socket (IPV4 socket) with Streaming mode(TCP connections)
# socket.SOCK_DGRAM is for UDP(user datagram protocol)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# The SO_REUSEADDR flag tells the kernel to reuse a local socket in TIME_WAIT state.
# TIME_WAIT indicates that local endpoint (this side) has closed the connection.
# The connection is being kept around so that any delayed packets can be matched to
# the connection and handled appropriately. The connections will be removed when
# they time out within four minutes.
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Set the listening porn on server for incoming connections
server_socket.bind((myOwnIP, myOwnPort))
# we want it to queue up as many as q connect requests (the normal max is 5)
# before refusing outside connections. If the rest of the code is written
# properly, q = 5 should be plenty.
q = 5
server_socket.listen(q)
print("Seed Mode. Listening at ", (myOwnIP, myOwnPort))
while True:
# Waiting mode. Wait until accept connections from outside
client_socket, peer_address = server_socket.accept()
listening_ip = client_socket.getsockname()[0]
listening_port = client_socket.getsockname()[1]
# Incoming is the one who wants to connect server. = peer_address
incoming_ip = client_socket.getpeername()[0]
incoming_port = client_socket.getpeername()[1]
print("Established: ", (incoming_ip, incoming_port), "->", (listening_ip, listening_port))
logger(myOwnID, "IN___CONN", "Established: "
+ incoming_ip + ":" + str(incoming_port) + "->" + listening_ip + ":" + str(listening_port))
# # We response using thread.
c_thread = threading.Thread(target=respond, args=(client_socket,))
c_thread.daemon = True
c_thread.start()
class LeechMode:
def __init__(self, p_ip, p_port, list_to_take):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client_socket:
client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# clientSock.bind((myOwnIP, myOwnPort)) # We don't want to set outgoing port. It's random! but we could!
client_socket.connect((p_ip, p_port))
print("Connected: ", (myOwnIP, myOwnPort), " -> ", (p_ip, p_port))
while len(list_to_take) > 0:
print("download piece #", list_to_take[0])
client_socket.send(bytes("connect", FORMAT))
received_data = client_socket.recv(BUFFER_SIZE).decode(FORMAT)
if received_data == "unchoke":
client_socket.send(bytes(str(list_to_take[0]), FORMAT))
chunk_size = client_socket.recv(BUFFER_SIZE).decode(FORMAT)
client_socket.send(bytes("ok", FORMAT))
piece_data = str(client_socket.recv(BUFFER_SIZE).decode(FORMAT))
received_size = len(piece_data)
while received_size < int(chunk_size):
piece_data = piece_data + str(client_socket.recv(BUFFER_SIZE).decode(FORMAT))
received_size = len(piece_data)
percent = int((received_size/float(chunk_size)) * 100)
print(str(percent) + "% of piece #" + str(list_to_take[0]) + " downloaded!") if percent < 100 else False
time.sleep(0.2)
# os.system('cls||clear')
# For testing
# with open("_AAA.txt", "w") as AAA:
# AAA.write(piece_data)
check_sha1 = hashlib.sha1()
piece_data = piece_data.encode(FORMAT)
check_sha1.update(piece_data)
check_hash = check_sha1.hexdigest()
pieces_hash = str_to_list(Dictionary['info']['pieces'], "str")
# print(list_to_take[0])
# print(pieces_hash[list_to_take[0]-1])
# if pieces_hash[list_to_take[0]-1] == check_hash:
print("Piece #" + str(list_to_take[0]) + " downloaded!")
add_seed = pd.read_csv(peer_seeds_DB, delimiter=',')
# Add chunk in !
received_seed = \
np.array([[list_to_take[0], file_to_leech_name, piece_data, check_hash]])
add_seed = add_seed.append(
pd.DataFrame(received_seed, columns=['number', 'file_name', 'piece', 'piece_hash']))
add_seed.to_csv(peer_seeds_DB, index=False)
del list_to_take[0]
# else:
# print('hash err')
# time.sleep(10)
else:
print('choke err')
time.sleep(10)
new_file_parts = pd.read_csv(peer_seeds_DB, delimiter=',')
new_file_parts.sort_values(by=["number"], ascending=True)
index_counts = len(new_file_parts.index)
with open(myOwnID + " - " + file_to_leech_name, "wb") as new_file:
for j in range(index_counts):
binary = new_file_parts["piece"][j]
binary = base64.b64decode(binary)
new_file.write(binary)
# base64_message = base64_bytes.decode('ascii')
logger(myOwnID, "GOT___ALL", "All the pieces taken")
print('All gotten. Nothing to leech anymore.')
client_socket.close()
def request_tracker():
logger(myOwnID, "REQ__TRAK", "Leecher will request tracker for our swarm members identity.")
# What I have? current seeds!
current_seeds = pd.read_csv(peer_seeds_DB, delimiter=',')
index_counts = len(current_seeds.index)
# row_count, column_count = current_seeds.shape
csn = '{' # csn : current seeds numbers
if index_counts >= 1:
for j in range(index_counts):
csn = csn + str(current_seeds["number"][j]) + ','
csn = csn[:-1] + '}'
else:
csn = csn + '}'
# sending get request and saving the response as response object
payload = {'id': myOwnID, 'ip': myOwnIP, 'port': myOwnPort, 'file': file_to_leech_name, 'pieces': csn}
response = requests.get(url=trackerAddress, params=payload, stream=True)
# # To get the sender and receiver addresses of get request we can use lines below :
# # Note : get request should be stream type!
# getReqSocket = socket.fromfd(response.raw.fileno(), socket.AF_INET, socket.SOCK_STREAM)
# print("GetRequest receiver address : ", getReqSocket.getpeername())
# print("GetRequest sender address : ", getReqSocket.getsockname())
print("Update peers list from tracker.")
logger(myOwnID, "UP___DATA", "Leecher will update its own database of peers list.")
response = response.json()
req_interval_time = response['req_interval_time']
if len(response['peers']) > 1:
# Refresh the pieces list
every_body_pieces.clear()
# List is not empty. But we don't want to connect ourself or
# those who we have a connection with them already. Let's do
# something about it. First of all, let's read our peers DB :
for peer in response['peers']:
# append pieces of every peer to the list
every_body_pieces.append(peer['pieces'])
# sort by frequency from rare to common with our defined function "sort_and_clean"
# Why clean? It's remove the duplicates too! :)
# "if it looks good, we look good!" quote by Vidal Sassoon
pieces_sorted = sort_and_clean(every_body_pieces)
# we have some pieces :
csn_sorted = sort_and_clean([csn])
# what pieces we should request for? list_to_take!
# It doesn't has any of our pieces. let's remove them
list_to_take = [item for item in pieces_sorted if item not in csn_sorted]
connected_ps = pd.read_csv(csvFileName, delimiter=',')
for peer in response['peers']:
# searching
search_ip = connected_ps[connected_ps["dest_ip"].isin([peer['ip']])]
search_port = connected_ps[connected_ps["dest_port"].isin([peer['port']])]
# check to avoid attempting to connect again
if search_ip.shape[0] == 0 and search_port.shape[0] == 0:
# check to avoid attempting to connect ourself :)
if str(peer['id']) != myOwnID:
# New PPP connection
logger(myOwnID, "NEW__PEER", "Get piece ") # + str(piece) + " from " + str(peer['id']) + " .")
print("Connecting to get piece number ") # , str(piece), " from ", (peer['ip'], peer['port']))
# Starting the connection
i_thread = threading.Thread(target=LeechMode, args=(peer['ip'], peer['port'], list_to_take))
i_thread.daemon = True
i_thread.start()
# Let's save the connection on our temporary database
new_connected_peer = np.array([[myOwnPort, str(peer['id']), peer['ip'], peer['port']]])
connected_ps = connected_ps.append(
pd.DataFrame(new_connected_peer, columns=['my_port', 'dest_id', 'dest_ip', 'dest_port']))
connected_ps.to_csv(csvFileName, index=False)
# After each interval time we start the thread again to update our DB.
threading.Timer(req_interval_time, request_tracker).start()
# ######################################
# TODO : Procedure : Starting PEER
# ######################################
os.system('cls||clear')
# Files to transfer between peers
# files are on "files/" subdirectory of project
root, dirs, file_name = next(walk('files'))
# Define hypothetical indexer
# We don't have an indexer in this project.
# But we will use a csv file as its database
indexer_DB_name = '_indexer_seeds.csv'
if not os.path.isfile(indexer_DB_name):
# # Create indexer seeds database if not exist
# ########### Indexer DB ################
# # file_number | file_name | seeder_id #
# #######################################
init_inx = {'number': [], 'file_name': [], 'primary_seeder': []}
inx_db = pd.DataFrame(init_inx)
inx_db = inx_db.fillna(0) # with 0s rather than NaNs
inx_db.to_csv(indexer_DB_name, index=False)
# Defining peer identity
myOwnIP = '127.' + str(randint(2, 254)) + '.' + str(randint(2, 254)) + '.' + str(randint(2, 254))
myOwnPort = randint(6881, 6889)
myOwnID = str(round(time.time()*1000)) # [::-1] # the commented section will use for reversing the string
# Each peer has its own seeds DB. Peer keeps its pieces there.
# Seeder wants this file to keep data to send
# Leecher wants this file to keep received data
# Let's create it!
# ########### PEER seeds DB #################
# # number | file_name | piece | piece_hash #
# ###########################################
peer_seeds_DB = myOwnID + "_seeds.csv"
init_seeds = {'number': [], 'file_name': [], 'piece': [], 'piece_hash': []}
seeds_db = pd.DataFrame(init_seeds)
seeds_db = seeds_db.fillna(0) # with 0s rather than NaNs
seeds_db.to_csv(peer_seeds_DB, index=False)
# Defining chunk
kilobytes = 1024
megabytes = 1024 * kilobytes
CHUNK_SIZE = 20 * kilobytes # 20 KB
# buffer size
BUFFER_SIZE = 1024
# msg encoding
FORMAT = "utf-8"
# Interactions with indexer.
# This section will be used to fill indexer place!
leecher = True # We suppose that peer is leecher except if it's a seeder!
file_to_leech = 0 # If each file has a number, We consider 0 as no specific file
logger(myOwnID, "CONN_INDX", "A hypothetical connection with indexer established.")
p_type = input('Indexer : What are you? ( Just enter its number )\n[1] seeder\n[2] leecher\n')
if int(p_type) == 1:
os.system('cls||clear')
msg = 'Indexer : Ok seeder. Send me your torrent file! ( Just enter its number )\n'
for i in range(len(file_name)):
msg = msg + '[' + str(i+1) + '] ' + file_name[i] + '\n'
file = input(msg)
# Reading indexer DB for torrent of file. If exist peer cant be seeder!
inx_db = pd.read_csv(indexer_DB_name, delimiter=',')
# We search for file in indexer seed list to see if it is exist or not?
search = inx_db[inx_db["file_name"].isin([file_name[int(file)-1]])]
if search.shape[0] == 0:
# # First attempt to seed this file!
# Add this attempt to indexer DB
new_file = np.array([[file, file_name[int(file)-1], myOwnID]])
inx_db = inx_db.append(pd.DataFrame(new_file, columns=['number', 'file_name', 'primary_seeder']))
inx_db.to_csv(indexer_DB_name, index=False)
# Indexer will send this peer infos for tracker and
# tracker will save its identity in its DB.
# So we should add seeder data to tracker DB.
# There is a new peer connecting, so we append itself to list!
file_stat = os.stat('files/' + file_name[int(file) - 1])
file_size = file_stat.st_size
piece_count = -(-file_size // CHUNK_SIZE) # c++ : ceil(3.6/2) = 2 -> python : -(-3.6//2) = 2
pieces = '{'
for i in range(piece_count):
pieces = pieces + str(i+1) + ','
pieces = pieces[:-1] + '}'
tracker_list = pd.read_csv('_tracker.csv', delimiter=',')
new_seeder = np.array([[file_name[int(file)-1], myOwnID, myOwnIP, myOwnPort, pieces]])
tracker_list = \
tracker_list.append(pd.DataFrame(new_seeder, columns=['file_name', 'p_id', 'p_ip', 'p_port', 'pieces']))
tracker_list.to_csv('_tracker.csv', index=False)
# then store the chunks in seeds DB of seeder and create .torrent file of it
file_number = 1
with open('files/' + file_name[int(file) - 1], 'rb') as f:
sha1 = hashlib.sha1()
SHA1_str = '{'
chunk = f.read(CHUNK_SIZE)
seeds_db = pd.read_csv(peer_seeds_DB, delimiter=',')
while chunk:
if not chunk:
break
base64_chunk = base64.b64encode(chunk)
sha1.update(base64_chunk)
SHA1_str = SHA1_str + sha1.hexdigest() + ','
# base64_message = base64_bytes.decode('ascii')
# Add chunk in !
new_seed = np.array([[file_number, file_name[int(file) - 1], base64_chunk, sha1.hexdigest()]])
seeds_db = \
seeds_db.append(pd.DataFrame(new_seed, columns=['number', 'file_name', 'piece', 'piece_hash']))
seeds_db.to_csv(peer_seeds_DB, index=False)
# # We can also create chunked files using below lines :
# with open(file_name[int(file)-1]+'_' + str(file_number), 'wb') as chunk_file:
# chunk_file.write(chunk)
# Next Chunk
file_number += 1
chunk = f.read(CHUNK_SIZE)
SHA1_str = SHA1_str[:-1] # remove last character (last comma)
SHA1_str = SHA1_str + ' }'
torrentFile = open(file_name[int(file) - 1] + ".torrent", "w")
# Indexer will give the tracker address to seeder
trackerAddress = "127.0.0.1:5372"
metaInfo = \
'{"announce":"' + trackerAddress + '", "info":{ "length":' + str(file_size) + \
',"name":"' + file_name[int(file) - 1] + '","piece length":' + str(CHUNK_SIZE) + \
', "pieces":"' + SHA1_str + '" } } '
# metaInfo = bencodepy.encode(
# {
# 'announce': trackerAddress,
# 'info':
# {
# 'length': str(file_size),
# 'name': file_name[int(file) - 1],
# 'piece length': str(CHUNK_SIZE),
# 'pieces': SHA1_str
# }
# })
torrentFile.write(metaInfo)
torrentFile.close()
os.system('cls||clear')
print('Peer : torrent file sent to indexer!')
logger(myOwnID, "TORR_SENT", "torrent of " + file_name[int(file) - 1] + " sent to indexer.")
leecher = False
else:
os.system('cls||clear')
decision = input('We have it file on our swarms! Do you want to...?\n[1] Leech it\nor\n[0] Cancel operation\n')
if int(decision) == 0:
logger(myOwnID, "PEER_ABRT", "Peer will abort the operation.")
sys.exit(0)
else:
file_to_leech = int(file)
# Now the seeder going to server mode, waiting for requests
# But leechers will connect to tracker for seeder addresses
if leecher:
# Leecher section
os.system('cls||clear')
if file_to_leech == 0:
msg = 'Indexer : Ok leecher, search your file! ( Just enter its number )\n'
for i in range(len(file_name)):
msg = msg + '[' + str(i + 1) + '] ' + file_name[i] + '\n'
file_to_leech = input(msg)
os.system('cls||clear')
file_to_leech_name = file_name[int(file_to_leech) - 1]
# Reading indexer DB for torrent of file. If exist peer cant be seeder!
inx_db = pd.read_csv(indexer_DB_name, delimiter=',')
# We search for file in indexer seed list to see if it is exist or not?
search = inx_db[inx_db["file_name"].isin([file_to_leech_name])]
if search.shape[0] == 0:
os.system('cls||clear')
print('This file is not seeded yet! Ending Operation...')
logger(myOwnID, "NOT__LICH", "The file to leech is not seeded yet! Ending Operation!")
logger(myOwnID, "PEER_ABRT", "Peer will abort the operation.")
sys.exit(0)
else:
file_to_leech_name = file_name[int(file_to_leech) - 1]
# We want to have a list of every body pieces and update it with every response from tracker
every_body_pieces = []
list_to_take = []
# what is our pieces? current seeds numbers = csn
csn = ''
logger(myOwnID, "GOT__TORR", "Peer got the torrent file from indexer and now connecting to tracker.")
with open(file_to_leech_name + ".torrent", "r") as torrentFile:
mInfo = torrentFile.read()
Dictionary = json.loads(mInfo)
# print(Dictionary['info']['name']) # will print fileName.ext
announce = Dictionary['announce'].split(":")
serverIP = announce[0] # "127.0.0.1"
serverPort = announce[1] # "5372"
trackerAddress = "http://" + serverIP + ":" + serverPort
# Start tracking of others by the use of tracker server
data = {'my_port': [], 'dest_id': [], 'dest_ip': [], 'dest_port': []}
csvFileName = myOwnID + "_PPP_list.csv"
df = pd.DataFrame(data)
df = df.fillna(0) # with 0s rather than NaNs
df.to_csv(csvFileName, index=False)
request_tracker()
while True:
try:
print('Waiting for requests.')
logger(myOwnID, "WAIT__REQ", "Seeder waits for requests for files.")
# we are going to seeding mode simultaneously
SeedMode()
except KeyboardInterrupt:
sys.exit(0)
|
__init__.py | import importlib
from fastapi import FastAPI, Request
from fastapi.responses import Response
import json
import base64
from multiprocessing import Process
import uvicorn
app = FastAPI()
def start_webserver():
def _start_webserver():
uvicorn.run(app, host="127.0.0.1", port=8080, log_level="debug")
p = Process(target=_start_webserver)
p.start()
return p
@app.post("/v1/algo/{username}/{algoname}")
async def process_algo_req(request: Request, username, algoname):
metadata = {"request_id": "req-55c0480d-6af3-4a21-990a-5c51d29f5725", "duration": 0.000306774}
content_type = request.headers['Content-Type']
request = await request.body()
if algoname == "500":
return Response("Internal Server Error", status_code=500)
elif algoname == "raise_exception":
return {"error": {"message": "This is an exception"}}
else:
if content_type != "application/octet-stream":
request = request.decode('utf-8')
if content_type == "text/plain":
metadata['content_type'] = "text"
elif content_type == "application/json":
request = json.loads(request)
metadata['content_type'] = "json"
else:
metadata['content_type'] = "binary"
request = base64.b64encode(request)
output = {"result": request, "metadata": metadata}
return output
@app.post("/v1/algo/{username}/{algoname}/{githash}")
async def process_hello_world(request: Request, username, algoname, githash):
metadata = {"request_id": "req-55c0480d-6af3-4a21-990a-5c51d29f5725", "duration": 0.000306774,
'content_type': "text"}
request = await request.body()
request = request.decode('utf-8')
return {"result": f"hello {request}", "metadata": metadata}
### Algorithm Routes
@app.get("/v1/algorithms/{username}/{algoname}/builds/{buildid}")
async def get_build_id(username, algoname, buildid):
return {"status": "succeeded", "build_id": buildid, "commit_sha": "bcdadj",
"started_at": "2021-09-27T22:54:20.786Z", "finished_at": "2021-09-27T22:54:40.898Z",
"version_info": {"semantic_version": "0.1.1"}}
@app.get("/v1/algorithms/{username}/{algoname}/builds/{buildid}/logs")
async def get_build_log(username, algoname, buildid):
return {"logs": "This is a log"}
@app.get("/v1/algorithms/{username}/{algoname}/scm/status")
async def get_scm_status(username, algoname):
return {"scm_connection_status": "active"}
@app.get("/v1/algorithms/{algo_id}/errors")
async def get_algo_errors(algo_id):
return {"error": {"message": "not found"}}
@app.post("/v1/algorithms/{username}")
async def create_algorithm(request: Request, username):
payload = await request.json()
return {"id": "2938ca9f-54c8-48cd-b0d0-0fb7f2255cdc", "name": payload["name"],
"details": {"label": payload["details"]["label"]},
"settings": {"algorithm_callability": "private", "source_visibility": "open",
"package_set": "tensorflow-gpu-2.3-python38", "license": "apl", "network_access": "isolated",
"pipeline_enabled": False, "insights_enabled": False,
"algorithm_environment": "fd980f4f-1f1c-4b2f-a128-d60b40c6567a"},
"source": {"scm": {"id": "internal", "provider": "internal", "default": True, "enabled": True}},
"resource_type": "algorithm"}
@app.post("/v1/algorithms/{username}/{algoname}/compile")
async def compile_algorithm(username, algoname):
return {
"id": "2938ca9f-54c8-48cd-b0d0-0fb7f2255cdc",
"name": algoname,
"details": {
"summary": "Example Summary",
"label": "QA",
"tagline": "Example Tagline"
},
"settings": {
"algorithm_callability": "private",
"source_visibility": "open",
"package_set": "tensorflow-gpu-2.3-python38",
"license": "apl",
"network_access": "isolated",
"pipeline_enabled": False,
"insights_enabled": False,
"algorithm_environment": "fd980f4f-1f1c-4b2f-a128-d60b40c6567a"
},
"version_info": {
"git_hash": "e85db9bca2fad519f540b445f30d12523e4dec9c",
"version_uuid": "1d9cb91d-11ca-49cb-a7f4-28f67f277654"
},
"source": {
"scm": {
"id": "internal",
"provider": "internal",
"default": True,
"enabled": True
}
},
"compilation": {
"successful": True,
"output": ""
},
"self_link": f"http://localhost:8080/v1/algorithms/{username}/{algoname}/versions/e85db9bca2fad519f540b445f30d12523e4dec9c",
"resource_type": "algorithm"
}
@app.post("/v1/algorithms/{username}/{algoname}/versions")
async def publish_algorithm(request: Request, username, algoname):
return {"id": "2938ca9f-54c8-48cd-b0d0-0fb7f2255cdc", "name": algoname,
"details": {"summary": "Example Summary", "label": "QA", "tagline": "Example Tagline"},
"settings": {"algorithm_callability": "private", "source_visibility": "open",
"package_set": "tensorflow-gpu-2.3-python38", "license": "apl", "network_access": "isolated",
"pipeline_enabled": False, "insights_enabled": False,
"algorithm_environment": "fd980f4f-1f1c-4b2f-a128-d60b40c6567a"},
"version_info": {"semantic_version": "0.1.0", "git_hash": "e85db9bca2fad519f540b445f30d12523e4dec9c",
"release_notes": "created programmatically", "sample_input": "payload",
"version_uuid": "e85db9bca2fad519f540b445f30d12523e4dec9c"},
"source": {"scm": {"id": "internal", "provider": "internal", "default": True, "enabled": True}},
"compilation": {"successful": True},
"self_link": f"http://localhost:8080/v1/algorithms/{username}/{algoname}/versions/e85db9bca2fad519f540b445f30d12523e4dec9c",
"resource_type": "algorithm"}
@app.get("/v1/algorithms/{username}/{algoname}/versions/{algohash}")
async def get_algorithm_info(username, algoname, algohash):
return {
"id": "2938ca9f-54c8-48cd-b0d0-0fb7f2255cdc",
"name": algoname,
"details": {
"summary": "Example Summary",
"label": "QA",
"tagline": "Example Tagline"
},
"settings": {
"algorithm_callability": "private",
"source_visibility": "open",
"language": "python3",
"environment": "gpu",
"package_set": "tensorflow-gpu-2.3-python38",
"license": "apl",
"network_access": "isolated",
"pipeline_enabled": False,
"insights_enabled": False,
"algorithm_environment": "fd980f4f-1f1c-4b2f-a128-d60b40c6567a"
},
"version_info": {
"semantic_version": "0.1.0",
"git_hash": algohash,
"release_notes": "created programmatically",
"sample_input": "\"payload\"",
"sample_output": "Exception encountered while running sample input",
"version_uuid": "1d9cb91d-11ca-49cb-a7f4-28f67f277654"
},
"source": {
"scm": {
"id": "internal",
"provider": "internal",
"default": True,
"enabled": True
}
},
"compilation": {
"successful": True,
"output": ""
},
"resource_type": "algorithm"
}
### Admin Routes
@app.post("/v1/users")
async def create_user(request: Request):
payload = await request.body()
data = json.loads(payload)
username = data['username']
email = data['email']
return {
"id": "1e5c89ab-3d5c-4bad-b8a3-6c8a294d4418",
"username": username,
"email": email,
"fullname": username,
"self_link": f"http://localhost:8080/v1/users/{username}", "resource_type": "user"
}
@app.get("/v1/users/{user_id}/errors")
async def get_user_errors(user_id):
return []
@app.get("/v1/organization/types")
async def get_org_types():
return [
{"id": "d0c85ea6-ddfa-11ea-a0c8-12a811be4db3", "name": "basic"},
{"id": "d0bff917-ddfa-11ea-a0c8-12a811be4db3", "name": "legacy"},
{"id": "d0c9d825-ddfa-11ea-a0c8-12a811be4db3", "name": "pro"}
]
@app.post("/v1/organizations")
async def create_org(request: Request):
payload = await request.body()
data = json.loads(payload)
org_name = data["org_name"]
org_email = data["org_email"]
return {"id": "55073c92-5f8e-4d7e-a14d-568f94924fd9",
"org_name": org_name,
"org_label": "some label",
"org_contact_name": "Some owner",
"org_email": org_email,
"org_created_at": "2021-10-22T16:41:32",
"org_url": None,
"type_id": "d0c85ea6-ddfa-11ea-a0c8-12a811be4db3",
"stripe_customer_id": None,
"external_admin_group": None,
"external_member_group": None,
"external_id": None,
"owner_ids": None,
"resource_type": "organization",
"self_link": "http://localhost:8080/v1/organizations/a_myOrg1542"
}
@app.put("/v1/organizations/{orgname}/members/{username}")
async def add_user_to_org(orgname, username):
return Response(status_code=200)
@app.get("/v1/organizations/{orgname}/errors")
async def org_errors(orgname):
return []
@app.put("/v1/organizations/{org_name}")
async def edit_org(org_name):
return Response(status_code=204)
@app.get("/v1/organizations/{org_name}")
async def get_org_by_name(org_name):
return {
"id": "55073c92-5f8e-4d7e-a14d-568f94924fd9",
"org_name": org_name,
"org_label": "some label",
"org_contact_name": "Some owner",
"org_email": "a_myOrg1542@algo.com",
"org_created_at": "2021-10-22T16:41:32",
"org_url": None,
"type_id": "d0c85ea6-ddfa-11ea-a0c8-12a811be4db3",
"stripe_customer_id": None,
"external_admin_group": None,
"external_member_group": None,
"external_id": None,
"owner_ids": None,
"resource_type": "organization",
"self_link": "http://localhost:8080/v1/organizations/a_myOrg1542"
}
@app.get("/v1/algorithm-environments/edge/languages")
async def get_supported_langs():
return [{"name": "anaconda3", "display_name": "Conda (Environments) - beta",
"configuration": "{\n \"display_name\": \"Conda (Environments) - beta\",\n \"req_files\": [\n \"environment.yml\"\n ],\n \"artifacts\": [\n {\"source\":\"/home/algo/.cache\", \"destination\":\"/home/algo/.cache/\"},\n {\"source\":\"/home/algo/anaconda_environment\", \"destination\": \"/home/algo/anaconda_environment/\"},\n {\"source\":\"/opt/algorithm\", \"destination\":\"/opt/algorithm/\"}\n ]\n}\n"},
{"name": "csharp-dotnet-core2", "display_name": "C# .NET Core 2.x+ (Environments)",
"configuration": "{\n \"display_name\": \"C# .NET Core 2.x+ (Environments)\",\n \"artifacts\": [\n {\"source\":\"/opt/algorithm/bin/Release/*/*\", \"destination\":\"/opt/algorithm/\"},\n {\"source\":\"/opt/algorithm/resources\", \"destination\":\"/opt/algorithm/resources/\"},\n {\"source\":\"/home/algo/.nuget\", \"destination\":\"/home/algo/.nuget/\"}\n ]\n}\n"},
{"name": "java11", "display_name": "Java OpenJDK 11.0 (Environments)",
"configuration": "{\n \"display_name\": \"Java OpenJDK 11.0 (Environments)\",\n \"artifacts\": [\n {\"source\":\"/opt/algorithm/target/*.jar\", \"destination\":\"/opt/algorithm/target/algorithm.jar\"},\n {\"source\":\"/opt/algorithm/target/lib\", \"destination\":\"/opt/algorithm/target/lib/\"}\n ]\n}\n"},
{"name": "python2", "display_name": "Python 2.x (Environments)",
"configuration": "{\n \"display_name\": \"Python 2.x (Environments)\",\n \"req_files\": [\n \"requirements.txt\"\n ],\n \"artifacts\": [\n {\"source\":\"/home/algo/.local\", \"destination\":\"/home/algo/.local/\"},\n {\"source\":\"/opt/algorithm\", \"destination\":\"/opt/algorithm/\"}\n ]\n}\n"},
{"name": "python3", "display_name": "Python 3.x (Environments)",
"configuration": "{\n \"display_name\": \"Python 3.x (Environments)\",\n \"req_files\": [\n \"requirements.txt\"\n ],\n \"artifacts\": [\n {\"source\":\"/home/algo/.local\", \"destination\":\"/home/algo/.local/\"},\n {\"source\":\"/opt/algorithm\", \"destination\":\"/opt/algorithm/\"}\n ]\n}\n"},
{"name": "r36", "display_name": "R 3.6.x (Environments)",
"configuration": "{\n \"display_name\": \"R 3.6.x (Environments)\",\n \"req_files\": [\n \"packages.txt\"\n ],\n \"artifacts\": [\n {\"source\":\"/opt/algorithm\", \"destination\":\"/opt/algorithm/\"},\n {\"source\":\"/usr/local/lib/R/site-library\", \"destination\":\"/usr/local/lib/R/site-library/\"}\n ]\n}\n\n"},
{"name": "scala-2", "display_name": "Scala 2.x & sbt 1.3.x (Environments)",
"configuration": "{\n \"display_name\": \"Scala 2.x & sbt 1.3.x (Environments)\",\n \"artifacts\": [\n {\"source\":\"/opt/algorithm/target/universal/stage\", \"destination\":\"/opt/algorithm/stage/\"}\n ]\n}\n\n"}]
@app.get("/v1/algorithm-environments/edge/languages/{language}/environments")
async def get_environments_by_lang(language):
return {
"environments": [
{
"id": "717d36e0-222c-44a0-9aa8-06f4ebc1b82a",
"environment_specification_id": "f626effa-e519-431e-9d7a-0d3a7563ae1e",
"display_name": "Python 2.7",
"description": "Generic Python 2.7 installation",
"created_at": "2020-12-21T21:47:53.239",
"language": {
"name": language,
"display_name": "Python 2.x (Environments)",
"configuration": "{\n \"display_name\": \"Python 2.x (Environments)\",\n \"req_files\": [\n "
" \"requirements.txt\"\n ],\n \"artifacts\": [\n {"
"\"source\":\"/home/algo/.local\", \"destination\":\"/home/algo/.local/\"},"
"\n {\"source\":\"/opt/algorithm\", "
"\"destination\":\"/opt/algorithm/\"}\n ]\n}\n "
},
"machine_type": "CPU"
},
{
"id": "6f57e041-54e0-4e1a-8b2f-4589bb2c06f8",
"environment_specification_id": "faf81400-eb15-4f64-81c0-3d4ed7181e77",
"display_name": "Python 2.7 + GPU support",
"description": "Python2.7 installation with CUDA 9.0 and CUDNN7",
"created_at": "2020-08-14T07:22:32.955",
"language": {
"name": language,
"display_name": "Python 2.x (Environments)",
"configuration": "{\n \"display_name\": \"Python 2.x (Environments)\",\n \"req_files\": [\n "
" \"requirements.txt\"\n ],\n \"artifacts\": [\n {"
"\"source\":\"/home/algo/.local\", \"destination\":\"/home/algo/.local/\"},"
"\n {\"source\":\"/opt/algorithm\", "
"\"destination\":\"/opt/algorithm/\"}\n ]\n}\n "
},
"machine_type": "GPU"
}
]
}
|
tests.py | import sys
import threading
import time
from unittest import skipIf, skipUnless
from django.db import (
DatabaseError, Error, IntegrityError, OperationalError, connection,
transaction,
)
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from .models import Reporter
@skipUnlessDBFeature('uses_savepoints')
class AtomicTests(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
available_apps = ['transactions']
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(
Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>']
)
def test_nested_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_nested_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(
Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>']
)
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with atomic:
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_force_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# atomic block shouldn't rollback, but force it.
self.assertFalse(transaction.get_rollback())
transaction.set_rollback(True)
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_prevent_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
sid = transaction.savepoint()
# trigger a database error inside an inner atomic without savepoint
with self.assertRaises(DatabaseError):
with transaction.atomic(savepoint=False):
with connection.cursor() as cursor:
cursor.execute(
"SELECT no_such_col FROM transactions_reporter")
# prevent atomic from rolling back since we're recovering manually
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
transaction.savepoint_rollback(sid)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
class AtomicInsideTransactionTests(AtomicTests):
"""All basic tests for atomic should also pass within an existing transaction."""
def setUp(self):
self.atomic = transaction.atomic()
self.atomic.__enter__()
def tearDown(self):
self.atomic.__exit__(*sys.exc_info())
@skipIfDBFeature('autocommits_when_autocommit_is_off')
class AtomicWithoutAutocommitTests(AtomicTests):
"""All basic tests for atomic should also pass when autocommit is turned off."""
def setUp(self):
transaction.set_autocommit(False)
def tearDown(self):
# The tests access the database after exercising 'atomic', initiating
# a transaction ; a rollback is required before restoring autocommit.
transaction.rollback()
transaction.set_autocommit(True)
@skipUnlessDBFeature('uses_savepoints')
class AtomicMergeTests(TransactionTestCase):
"""Test merging transactions with savepoint=False."""
available_apps = ['transactions']
def test_merged_outer_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The first block has a savepoint and must roll back.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_inner_savepoint_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second block has a savepoint and must roll back.
self.assertEqual(Reporter.objects.count(), 1)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
@skipUnlessDBFeature('uses_savepoints')
class AtomicErrorsTests(TransactionTestCase):
available_apps = ['transactions']
forbidden_atomic_msg = "This is forbidden when an 'atomic' block is active."
def test_atomic_prevents_setting_autocommit(self):
autocommit = transaction.get_autocommit()
with transaction.atomic():
with self.assertRaisesMessage(transaction.TransactionManagementError, self.forbidden_atomic_msg):
transaction.set_autocommit(not autocommit)
# Make sure autocommit wasn't changed.
self.assertEqual(connection.autocommit, autocommit)
def test_atomic_prevents_calling_transaction_methods(self):
with transaction.atomic():
with self.assertRaisesMessage(transaction.TransactionManagementError, self.forbidden_atomic_msg):
transaction.commit()
with self.assertRaisesMessage(transaction.TransactionManagementError, self.forbidden_atomic_msg):
transaction.rollback()
def test_atomic_prevents_queries_in_broken_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# The transaction is marked as needing rollback.
msg = (
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block."
)
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Haddock")
@skipIfDBFeature('atomic_transactions')
def test_atomic_allows_queries_after_fixing_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# Mark the transaction as no longer needing rollback.
transaction.set_rollback(False)
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Calculus")
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_atomic_prevents_queries_in_broken_transaction_after_client_close(self):
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
connection.close()
# The connection is closed and the transaction is marked as
# needing rollback. This will raise an InterfaceError on databases
# that refuse to create cursors on closed connections (PostgreSQL)
# and a TransactionManagementError on other databases.
with self.assertRaises(Error):
Reporter.objects.create(first_name="Cuthbert", last_name="Calculus")
# The connection is usable again .
self.assertEqual(Reporter.objects.count(), 0)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific behaviors")
class AtomicMySQLTests(TransactionTestCase):
available_apps = ['transactions']
@skipIf(threading is None, "Test requires threading")
def test_implicit_savepoint_rollback(self):
"""MySQL implicitly rolls back savepoints when it deadlocks (#22291)."""
Reporter.objects.create(id=1)
Reporter.objects.create(id=2)
main_thread_ready = threading.Event()
def other_thread():
try:
with transaction.atomic():
Reporter.objects.select_for_update().get(id=1)
main_thread_ready.wait()
# 1) This line locks... (see below for 2)
Reporter.objects.exclude(id=1).update(id=2)
finally:
# This is the thread-local connection, not the main connection.
connection.close()
other_thread = threading.Thread(target=other_thread)
other_thread.start()
with self.assertRaisesMessage(OperationalError, 'Deadlock found'):
# Double atomic to enter a transaction and create a savepoint.
with transaction.atomic():
with transaction.atomic():
Reporter.objects.select_for_update().get(id=2)
main_thread_ready.set()
# The two threads can't be synchronized with an event here
# because the other thread locks. Sleep for a little while.
time.sleep(1)
# 2) ... and this line deadlocks. (see above for 1)
Reporter.objects.exclude(id=2).update(id=1)
other_thread.join()
class AtomicMiscTests(TransactionTestCase):
available_apps = []
def test_wrap_callable_instance(self):
"""#20028 -- Atomic must support wrapping callable instances."""
class Callable:
def __call__(self):
pass
# Must not raise an exception
transaction.atomic(Callable())
@skipUnlessDBFeature('can_release_savepoints')
def test_atomic_does_not_leak_savepoints_on_failure(self):
"""#23074 -- Savepoints must be released after rollback."""
# Expect an error when rolling back a savepoint that doesn't exist.
# Done outside of the transaction block to ensure proper recovery.
with self.assertRaises(Error):
# Start a plain transaction.
with transaction.atomic():
# Swallow the intentional error raised in the sub-transaction.
with self.assertRaisesMessage(Exception, "Oops"):
# Start a sub-transaction with a savepoint.
with transaction.atomic():
sid = connection.savepoint_ids[-1]
raise Exception("Oops")
# This is expected to fail because the savepoint no longer exists.
connection.savepoint_rollback(sid)
@skipIfDBFeature('autocommits_when_autocommit_is_off')
class NonAutocommitTests(TransactionTestCase):
available_apps = []
def test_orm_query_after_error_and_rollback(self):
"""
ORM queries are allowed after an error and a rollback in non-autocommit
mode (#27504).
"""
transaction.set_autocommit(False)
r1 = Reporter.objects.create(first_name='Archibald', last_name='Haddock')
r2 = Reporter(first_name='Cuthbert', last_name='Calculus', id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
transaction.rollback()
Reporter.objects.last()
def test_orm_query_without_autocommit(self):
"""#24921 -- ORM queries must be possible after set_autocommit(False)."""
transaction.set_autocommit(False)
try:
Reporter.objects.create(first_name="Tintin")
finally:
transaction.rollback()
transaction.set_autocommit(True)
|
clang_format.py | #! /usr/bin/env python
"""
A script that provides:
1. Ability to grab binaries where possible from LLVM.
2. Ability to download binaries from MongoDB cache for clang-format.
3. Validates clang-format is the right version.
4. Has support for checking which files are to be checked.
5. Supports validating and updating a set of files to the right coding style.
"""
from __future__ import print_function, absolute_import
import Queue
import difflib
import itertools
import os
import re
import shutil
import string
import subprocess
import sys
import tarfile
import tempfile
import threading
import time
import urllib
from distutils import spawn
from optparse import OptionParser
from multiprocessing import cpu_count
# Get relative imports to work when the package is not installed on the PYTHONPATH.
if __name__ == "__main__" and __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__)))))
from buildscripts import moduleconfig
##############################################################################
#
# Constants for clang-format
#
#
# Expected version of clang-format
CLANG_FORMAT_VERSION = "3.6.0"
# Name of clang-format as a binary
CLANG_FORMAT_PROGNAME = "clang-format"
# URL location of the "cached" copy of clang-format to download
# for users which do not have clang-format installed
CLANG_FORMAT_HTTP_LINUX_CACHE = "https://s3.amazonaws.com/boxes.10gen.com/build/clang-format-rhel55.tar.gz"
# URL on LLVM's website to download the clang tarball
CLANG_FORMAT_SOURCE_URL_BASE = string.Template("http://llvm.org/releases/$version/clang+llvm-$version-$llvm_distro.tar.xz")
# Path in the tarball to the clang-format binary
CLANG_FORMAT_SOURCE_TAR_BASE = string.Template("clang+llvm-$version-$tar_path/bin/" + CLANG_FORMAT_PROGNAME)
# Path to the modules in the mongodb source tree
# Has to match the string in SConstruct
MODULE_DIR = "src/mongo/db/modules"
# Copied from python 2.7 version of subprocess.py
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return ("Command '%s' returned non-zero exit status %d with output %s" %
(self.cmd, self.returncode, self.output))
# Copied from python 2.7 version of subprocess.py
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output)
return output
def callo(args):
"""Call a program, and capture its output
"""
return check_output(args)
def get_llvm_url(version, llvm_distro):
"""Get the url to download clang-format from llvm.org
"""
return CLANG_FORMAT_SOURCE_URL_BASE.substitute(
version=version,
llvm_distro=llvm_distro)
def get_tar_path(version, tar_path):
""" Get the path to clang-format in the llvm tarball
"""
return CLANG_FORMAT_SOURCE_TAR_BASE.substitute(
version=version,
tar_path=tar_path)
def extract_clang_format(tar_path):
# Extract just the clang-format binary
# On OSX, we shell out to tar because tarfile doesn't support xz compression
if sys.platform == 'darwin':
subprocess.call(['tar', '-xzf', tar_path, '*clang-format*'])
# Otherwise we use tarfile because some versions of tar don't support wildcards without
# a special flag
else:
tarfp = tarfile.open(tar_path)
for name in tarfp.getnames():
if name.endswith('clang-format'):
tarfp.extract(name)
tarfp.close()
def get_clang_format_from_llvm(llvm_distro, tar_path, dest_file):
"""Download clang-format from llvm.org, unpack the tarball,
and put clang-format in the specified place
"""
# Build URL
url = get_llvm_url(CLANG_FORMAT_VERSION, llvm_distro)
dest_dir = tempfile.gettempdir()
temp_tar_file = os.path.join(dest_dir, "temp.tar.xz")
# Download from LLVM
print("Downloading clang-format %s from %s, saving to %s" % (CLANG_FORMAT_VERSION,
url, temp_tar_file))
urllib.urlretrieve(url, temp_tar_file)
extract_clang_format(temp_tar_file)
# Destination Path
shutil.move(get_tar_path(CLANG_FORMAT_VERSION, tar_path), dest_file)
def get_clang_format_from_linux_cache(dest_file):
"""Get clang-format from mongodb's cache
"""
# Get URL
url = CLANG_FORMAT_HTTP_LINUX_CACHE
dest_dir = tempfile.gettempdir()
temp_tar_file = os.path.join(dest_dir, "temp.tar.xz")
# Download the file
print("Downloading clang-format %s from %s, saving to %s" % (CLANG_FORMAT_VERSION,
url, temp_tar_file))
urllib.urlretrieve(url, temp_tar_file)
extract_clang_format(temp_tar_file)
# Destination Path
shutil.move("llvm/Release/bin/clang-format", dest_file)
class ClangFormat(object):
"""Class encapsulates finding a suitable copy of clang-format,
and linting/formating an individual file
"""
def __init__(self, path, cache_dir):
if os.path.exists('/usr/bin/clang-format-3.6'):
clang_format_progname = 'clang-format-3.6'
else:
clang_format_progname = CLANG_FORMAT_PROGNAME
# Initialize clang-format configuration information
if sys.platform.startswith("linux"):
#"3.6.0/clang+llvm-3.6.0-x86_64-linux-gnu-ubuntu-14.04.tar.xz
self.platform = "linux_x64"
self.llvm_distro = "x86_64-linux-gnu-ubuntu"
self.tar_path = "x86_64-linux-gnu"
elif sys.platform == "win32":
self.platform = "windows_x64"
self.llvm_distro = "windows_x64"
self.tar_path = None
clang_format_progname += ".exe"
elif sys.platform == "darwin":
#"3.6.0/clang+llvm-3.6.0-x86_64-apple-darwin.tar.xz
self.platform = "darwin_x64"
self.llvm_distro = "x86_64-apple-darwin"
self.tar_path = "x86_64-apple-darwin"
self.path = None
# Find Clang-Format now
if path is not None:
if os.path.isfile(path):
self.path = path
else:
print("WARNING: Could not find clang-format %s" % (path))
# Check the envionrment variable
if "MONGO_CLANG_FORMAT" in os.environ:
self.path = os.environ["MONGO_CLANG_FORMAT"]
if self.path and not self._validate_version(warn=True):
self.path = None
# Check the users' PATH environment variable now
if self.path is None:
self.path = spawn.find_executable(clang_format_progname)
if self.path and not self._validate_version(warn=True):
self.path = None
# If Windows, try to grab it from Program Files
if sys.platform == "win32":
win32bin = os.path.join(os.environ["ProgramFiles(x86)"], "LLVM\\bin\\clang-format.exe")
if os.path.exists(win32bin):
self.path = win32bin
# Have not found it yet, download it from the web
if self.path is None:
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
self.path = os.path.join(cache_dir, clang_format_progname)
if not os.path.isfile(self.path):
if sys.platform.startswith("linux"):
get_clang_format_from_linux_cache(self.path)
elif sys.platform == "darwin":
get_clang_format_from_llvm(self.llvm_distro, self.tar_path, self.path)
else:
print("ERROR: clang-format.py does not support downloading clang-format " +
" on this platform, please install clang-format " + CLANG_FORMAT_VERSION)
# Validate we have the correct version
self._validate_version()
self.print_lock = threading.Lock()
def _validate_version(self, warn=False):
"""Validate clang-format is the expected version
"""
cf_version = callo([self.path, "--version"])
if CLANG_FORMAT_VERSION in cf_version:
return True
if warn:
print("WARNING: clang-format found in path, but incorrect version found at " +
self.path + " with version: " + cf_version)
return False
def lint(self, file_name):
"""Check the specified file has the correct format
"""
with open(file_name, 'r') as original_text:
original_file = original_text.read()
# Get formatted file as clang-format would format the file
formatted_file = callo([self.path, "--style=file", file_name])
if original_file != formatted_file:
original_lines = original_file.splitlines()
formatted_lines = formatted_file.splitlines()
result = difflib.unified_diff(original_lines, formatted_lines)
# Take a lock to ensure diffs do not get mixed
with self.print_lock:
print("ERROR: Found diff for " + file_name)
print("To fix formatting errors, run %s --style=file -i %s" %
(self.path, file_name))
for line in result:
print(line.rstrip())
return False
return True
def format(self, file_name):
"""Update the format of the specified file
"""
# Update the file with clang-format
return not subprocess.call([self.path, "--style=file", "-i", file_name])
def parallel_process(items, func):
"""Run a set of work items to completion
"""
try:
cpus = cpu_count()
except NotImplementedError:
cpus = 1
task_queue = Queue.Queue()
# Use a list so that worker function will capture this variable
pp_event = threading.Event()
pp_result = [True]
pp_lock = threading.Lock()
def worker():
"""Worker thread to process work items in parallel
"""
while not pp_event.is_set():
try:
item = task_queue.get_nowait()
except Queue.Empty:
# if the queue is empty, exit the worker thread
pp_event.set()
return
try:
ret = func(item)
finally:
# Tell the queue we finished with the item
task_queue.task_done()
# Return early if we fail, and signal we are done
if not ret:
with pp_lock:
pp_result[0] = False
pp_event.set()
return
# Enqueue all the work we want to process
for item in items:
task_queue.put(item)
# Process all the work
threads = []
for cpu in range(cpus):
thread = threading.Thread(target=worker)
thread.daemon = True
thread.start()
threads.append(thread)
# Wait for the threads to finish
# Loop with a timeout so that we can process Ctrl-C interrupts
# Note: On Python 2.6 wait always returns None so we check is_set also,
# This works because we only set the event once, and never reset it
while not pp_event.wait(1) and not pp_event.is_set():
time.sleep(1)
for thread in threads:
thread.join()
return pp_result[0]
def get_base_dir():
"""Get the base directory for mongo repo.
This script assumes that it is running in buildscripts/, and uses
that to find the base directory.
"""
try:
return subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).rstrip()
except:
# We are not in a valid git directory. Use the script path instead.
return os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_repos():
"""Get a list of Repos to check clang-format for
"""
base_dir = get_base_dir()
# Get a list of modules
# TODO: how do we filter rocks, does it matter?
mongo_modules = moduleconfig.discover_module_directories(
os.path.join(base_dir, MODULE_DIR), None)
paths = [os.path.join(base_dir, MODULE_DIR, m) for m in mongo_modules]
paths.append(base_dir)
return [Repo(p) for p in paths]
class Repo(object):
"""Class encapsulates all knowledge about a git repository, and its metadata
to run clang-format.
"""
def __init__(self, path):
self.path = path
self.root = self._get_root()
def _callgito(self, args):
"""Call git for this repository
"""
# These two flags are the equivalent of -C in newer versions of Git
# but we use these to support versions pre 1.8.5 but it depends on the command
# and what the current directory is
return callo(['git', '--git-dir', os.path.join(self.path, ".git"),
'--work-tree', self.path] + args)
def _get_local_dir(self, path):
"""Get a directory path relative to the git root directory
"""
if os.path.isabs(path):
return os.path.relpath(path, self.root)
return path
def get_candidates(self, candidates):
"""Get the set of candidate files to check by querying the repository
Returns the full path to the file for clang-format to consume.
"""
if candidates is not None and len(candidates) > 0:
candidates = [self._get_local_dir(f) for f in candidates]
valid_files = list(set(candidates).intersection(self.get_candidate_files()))
else:
valid_files = list(self.get_candidate_files())
# Get the full file name here
valid_files = [os.path.normpath(os.path.join(self.root, f)) for f in valid_files]
return valid_files
def get_root(self):
"""Get the root directory for this repository
"""
return self.root
def _get_root(self):
"""Gets the root directory for this repository from git
"""
gito = self._callgito(['rev-parse', '--show-toplevel'])
return gito.rstrip()
def _git_ls_files(self, cmd):
"""Run git-ls-files and filter the list of files to a valid candidate list
"""
gito = self._callgito(cmd)
# This allows us to pick all the interesting files
# in the mongo and mongo-enterprise repos
file_list = [line.rstrip()
for line in gito.splitlines()
if (line.startswith("jstests") or line.startswith("src"))
and not line.startswith("src/third_party")]
files_match = re.compile('\\.(h|cpp|js)$')
file_list = [a for a in file_list if files_match.search(a)]
return file_list
def get_candidate_files(self):
"""Query git to get a list of all files in the repo to consider for analysis
"""
return self._git_ls_files(["ls-files", "--cached"])
def get_working_tree_candidate_files(self):
"""Query git to get a list of all files in the working tree to consider for analysis
"""
return self._git_ls_files(["ls-files", "--cached", "--others"])
def get_working_tree_candidates(self):
"""Get the set of candidate files to check by querying the repository
Returns the full path to the file for clang-format to consume.
"""
valid_files = list(self.get_working_tree_candidate_files())
# Get the full file name here
valid_files = [os.path.normpath(os.path.join(self.root, f)) for f in valid_files]
# Filter out files that git thinks exist but were removed.
valid_files = [f for f in valid_files if os.path.exists(f)]
return valid_files
def get_files_to_check_working_tree():
"""Get a list of files to check form the working tree.
This will pick up files not managed by git.
"""
repos = get_repos()
valid_files = list(itertools.chain.from_iterable([r.get_working_tree_candidates() for r in repos]))
return valid_files
def get_files_to_check():
"""Get a list of files that need to be checked
based on which files are managed by git.
"""
repos = get_repos()
valid_files = list(itertools.chain.from_iterable([r.get_candidates(None) for r in repos]))
return valid_files
def get_files_to_check_from_patch(patches):
"""Take a patch file generated by git diff, and scan the patch for a list of files to check.
"""
candidates = []
# Get a list of candidate_files
check = re.compile(r"^diff --git a\/([a-z\/\.\-_0-9]+) b\/[a-z\/\.\-_0-9]+")
lines = []
for patch in patches:
with open(patch, "rb") as infile:
lines += infile.readlines()
candidates = [check.match(line).group(1) for line in lines if check.match(line)]
repos = get_repos()
valid_files = list(itertools.chain.from_iterable([r.get_candidates(candidates) for r in repos]))
return valid_files
def _get_build_dir():
"""Get the location of the scons' build directory in case we need to download clang-format
"""
return os.path.join(get_base_dir(), "build")
def _lint_files(clang_format, files):
"""Lint a list of files with clang-format
"""
clang_format = ClangFormat(clang_format, _get_build_dir())
lint_clean = parallel_process([os.path.abspath(f) for f in files], clang_format.lint)
if not lint_clean:
print("ERROR: Code Style does not match coding style")
sys.exit(1)
def lint_patch(clang_format, infile):
"""Lint patch command entry point
"""
files = get_files_to_check_from_patch(infile)
# Patch may have files that we do not want to check which is fine
if files:
_lint_files(clang_format, files)
def lint(clang_format):
"""Lint files command entry point
"""
files = get_files_to_check()
_lint_files(clang_format, files)
return True
def lint_all(clang_format):
"""Lint files command entry point based on working tree
"""
files = get_files_to_check_working_tree()
_lint_files(clang_format, files)
return True
def _format_files(clang_format, files):
"""Format a list of files with clang-format
"""
clang_format = ClangFormat(clang_format, _get_build_dir())
format_clean = parallel_process([os.path.abspath(f) for f in files], clang_format.format)
if not format_clean:
print("ERROR: failed to format files")
sys.exit(1)
def format_func(clang_format):
"""Format files command entry point
"""
files = get_files_to_check()
_format_files(clang_format, files)
def usage():
"""Print usage
"""
print("clang-format.py supports 4 commands [ lint, lint-all, lint-patch, format ].")
def main():
"""Main entry point
"""
parser = OptionParser()
parser.add_option("-c", "--clang-format", type="string", dest="clang_format")
(options, args) = parser.parse_args(args=sys.argv)
if len(args) > 1:
command = args[1]
if command == "lint":
lint(options.clang_format)
elif command == "lint-all":
lint_all(options.clang_format)
elif command == "lint-patch":
lint_patch(options.clang_format, args[2:])
elif command == "format":
format_func(options.clang_format)
else:
usage()
else:
usage()
if __name__ == "__main__":
main()
|
runner.py | #!/usr/bin/env python3
# Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""This is the Emscripten test runner. To run some tests, specify which tests
you want, for example
python3 tests/runner.py asm1.test_hello_world
There are many options for which tests to run and how to run them. For details,
see
http://kripken.github.io/emscripten-site/docs/getting_started/test-suite.html
"""
# XXX Use EMTEST_ALL_ENGINES=1 in the env to test all engines!
from enum import Enum
from functools import wraps
from subprocess import PIPE, STDOUT
import argparse
import atexit
import contextlib
import difflib
import fnmatch
import glob
import hashlib
import json
import logging
import math
import multiprocessing
import operator
import os
import random
import shlex
import shutil
import string
import subprocess
import stat
import sys
import tempfile
import time
import unittest
import webbrowser
from http.server import HTTPServer, SimpleHTTPRequestHandler
from urllib.parse import unquote, unquote_plus
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(__rootpath__)
import clang_native
import jsrun
import parallel_testsuite
from jsrun import NON_ZERO
from tools.config import EM_CONFIG
from tools.shared import TEMP_DIR, EMCC, EMXX, DEBUG
from tools.shared import EMSCRIPTEN_TEMP_DIR
from tools.shared import EM_BUILD_VERBOSE
from tools.shared import asstr, get_canonical_temp_dir, try_delete
from tools.shared import asbytes, Settings, config
from tools.utils import MACOS, WINDOWS
from tools import shared, line_endings, building
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
def delete_contents(pathname):
for entry in os.listdir(pathname):
try_delete(os.path.join(pathname, entry))
sys.path.append(path_from_root('third_party/websockify'))
logger = logging.getLogger("runner")
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser. Setting '0' as the browser disables running a browser (but we still
# see tests compile)
EMTEST_BROWSER = os.getenv('EMTEST_BROWSER')
EMTEST_DETECT_TEMPFILE_LEAKS = int(os.getenv('EMTEST_DETECT_TEMPFILE_LEAKS', '0'))
# TODO(sbc): Remove this check for the legacy name once its been around for a while.
assert 'EM_SAVE_DIR' not in os.environ, "Please use EMTEST_SAVE_DIR instead of EM_SAVE_DIR"
EMTEST_SAVE_DIR = int(os.getenv('EMTEST_SAVE_DIR', '0'))
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = os.getenv('EMTEST_ALL_ENGINES')
EMTEST_SKIP_SLOW = os.getenv('EMTEST_SKIP_SLOW')
EMTEST_LACKS_NATIVE_CLANG = os.getenv('EMTEST_LACKS_NATIVE_CLANG')
EMTEST_VERBOSE = int(os.getenv('EMTEST_VERBOSE', '0')) or shared.DEBUG
if EMTEST_VERBOSE:
logging.root.setLevel(logging.DEBUG)
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
@wraps(func)
def decorated(self, *args, **kwargs):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self, *args, **kwargs)
return decorated
def needs_dlfcn(func):
assert callable(func)
@wraps(func)
def decorated(self):
self.check_dlfcn()
return func(self)
return decorated
def is_slow_test(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
def disabled(note=''):
assert not callable(note)
return unittest.skip(note)
def no_mac(note=''):
assert not callable(note)
if MACOS:
return unittest.skip(note)
return lambda f: f
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
def requires_native_clang(func):
assert callable(func)
def decorated(self, *args, **kwargs):
if EMTEST_LACKS_NATIVE_CLANG:
return self.skipTest('native clang tests are disabled')
return func(self, *args, **kwargs)
return decorated
def node_pthreads(f):
def decorated(self):
self.set_setting('USE_PTHREADS', 1)
if '-fsanitize=address' in self.emcc_args:
self.skipTest('asan ends up using atomics that are not yet supported in node 12')
if self.get_setting('MINIMAL_RUNTIME'):
self.skipTest('node pthreads not yet supported with MINIMAL_RUNTIME')
self.js_engines = [config.NODE_JS]
self.node_args += ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory']
f(self)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
def decorated(f):
def modified(self):
with env_modify(updates):
return f(self)
return modified
return decorated
@contextlib.contextmanager
def chdir(dir):
"""A context manager that performs actions in the given directory."""
orig_cwd = os.getcwd()
os.chdir(dir)
try:
yield
finally:
os.chdir(orig_cwd)
def ensure_dir(dirname):
if not os.path.isdir(dirname):
os.makedirs(dirname)
def limit_size(string, maxbytes=800000 * 20, maxlines=100000, max_line=5000):
lines = string.splitlines()
for i, line in enumerate(lines):
if len(line) > max_line:
lines[i] = line[:max_line] + '[..]'
if len(lines) > maxlines:
lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:]
string = '\n'.join(lines) + '\n'
if len(string) > maxbytes:
string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:]
return string
def create_test_file(name, contents, binary=False):
assert not os.path.isabs(name)
mode = 'wb' if binary else 'w'
with open(name, mode) as f:
f.write(contents)
def make_executable(name):
os.chmod(name, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
# The core test modes
core_test_modes = [
'wasm0',
'wasm1',
'wasm2',
'wasm3',
'wasms',
'wasmz',
'strict',
'wasm2js0',
'wasm2js1',
'wasm2js2',
'wasm2js3',
'wasm2jss',
'wasm2jsz',
]
# The default core test mode, used when none is specified
default_core_test_mode = 'wasm0'
# The non-core test modes
non_core_test_modes = [
'other',
'browser',
'sanity',
'sockets',
'interactive',
'benchmark',
'asan',
'lsan',
'wasm2ss',
'posixtest',
'posixtest_browser',
]
def parameterized(parameters):
"""
Mark a test as parameterized.
Usage:
@parameterized({
'subtest1': (1, 2, 3),
'subtest2': (4, 5, 6),
})
def test_something(self, a, b, c):
... # actual test body
This is equivalent to defining two tests:
def test_something_subtest1(self):
# runs test_something(1, 2, 3)
def test_something_subtest2(self):
# runs test_something(4, 5, 6)
"""
def decorator(func):
func._parameterize = parameters
return func
return decorator
class RunnerMeta(type):
@classmethod
def make_test(mcs, name, func, suffix, args):
"""
This is a helper function to create new test functions for each parameterized form.
:param name: the original name of the function
:param func: the original function that we are parameterizing
:param suffix: the suffix to append to the name of the function for this parameterization
:param args: the positional arguments to pass to the original function for this parameterization
:returns: a tuple of (new_function_name, new_function_object)
"""
# Create the new test function. It calls the original function with the specified args.
# We use @functools.wraps to copy over all the function attributes.
@wraps(func)
def resulting_test(self):
return func(self, *args)
# Add suffix to the function name so that it displays correctly.
if suffix:
resulting_test.__name__ = f'{name}_{suffix}'
else:
resulting_test.__name__ = name
# On python 3, functions have __qualname__ as well. This is a full dot-separated path to the
# function. We add the suffix to it as well.
resulting_test.__qualname__ = f'{func.__qualname__}_{suffix}'
return resulting_test.__name__, resulting_test
def __new__(mcs, name, bases, attrs):
# This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`.
new_attrs = {}
for attr_name, value in attrs.items():
# Check if a member of the new class has _parameterize, the tag inserted by @parameterized.
if hasattr(value, '_parameterize'):
# If it does, we extract the parameterization information, build new test functions.
for suffix, args in value._parameterize.items():
new_name, func = mcs.make_test(attr_name, value, suffix, args)
assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name
new_attrs[new_name] = func
else:
# If not, we just copy it over to new_attrs verbatim.
assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name
new_attrs[attr_name] = value
# We invoke type, the default metaclass, to actually create the new class, with new_attrs.
return type.__new__(mcs, name, bases, new_attrs)
class RunnerCore(unittest.TestCase, metaclass=RunnerMeta):
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
def is_wasm(self):
return self.get_setting('WASM') != 0
def check_dlfcn(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dlfcn with memory growth (without wasm)')
if not self.get_setting('WASM'):
self.skipTest('no dynamic library support in wasm2js yet')
if '-fsanitize=address' in self.emcc_args:
self.skipTest('no dynamic library support in asan yet')
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or \
(self.get_setting('WASM') and not self.get_setting('WASM2JS')):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super(RunnerCore, cls).setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super(RunnerCore, self).setUp()
self.settings_mods = {}
self.emcc_args = ['-Werror']
self.node_args = []
self.v8_args = []
self.env = {}
self.temp_files_before_run = []
self.uses_es6 = False
self.js_engines = list(config.JS_ENGINES)
self.wasm_engines = list(config.WASM_ENGINES)
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
if EMTEST_SAVE_DIR:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
if os.path.exists(self.working_dir):
if EMTEST_SAVE_DIR == 2:
print('Not clearing existing test directory')
else:
print('Clearing existing test directory')
# Even when EMTEST_SAVE_DIR we still try to start with an empty directoy as many tests
# expect this. EMTEST_SAVE_DIR=2 can be used to keep the old contents for the new test
# run. This can be useful when iterating on a given test with extra files you want to keep
# around in the output directory.
delete_contents(self.working_dir)
else:
print('Creating new test output directory')
ensure_dir(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
if not EMTEST_SAVE_DIR:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not EMTEST_SAVE_DIR:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not os.environ.get('EMCC_DEBUG'):
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir.
# They may not be due to us, but e.g. the browser when running browser
# tests. Until we figure out a proper solution, ignore some temp file
# names that we see on our CI infrastructure.
ignorable_file_prefixes = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout',
'/tmp/wasmer'
]
left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run)
left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])]
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
def get_setting(self, key):
if key in self.settings_mods:
return self.settings_mods[key]
return Settings[key]
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
self.settings_mods[key] = value
def has_changed_setting(self, key):
return key in self.settings_mods
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret += ['-s', key]
elif type(value) == str:
ret += ['-s', f'{key}={value}']
else:
ret += ['-s', f'{key}={json.dumps(value)}']
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def add_pre_run(self, code):
create_test_file('prerun.js', 'Module.preRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'prerun.js']
def add_post_run(self, code):
create_test_file('postrun.js', 'Module.postRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'postrun.js']
def add_on_exit(self, code):
create_test_file('onexit.js', 'Module.onExit = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'onexit.js']
# returns the full list of arguments to pass to emcc
# param @main_file whether this is the main file of the test. some arguments
# (like --pre-js) do not need to be passed when building
# libraries, for example
def get_emcc_args(self, main_file=False):
args = self.serialize_settings() + self.emcc_args
if not main_file:
for i, arg in enumerate(args):
if arg in ('--pre-js', '--post-js'):
args[i] = None
args[i + 1] = None
args = [arg for arg in args if arg is not None]
return args
def verify_es5(self, filename):
es_check = shared.get_npm_cmd('es-check')
# use --quiet once its available
# See: https://github.com/dollarshaveclub/es-check/pull/126/
try:
shared.run_process(es_check + ['es5', os.path.abspath(filename)], stderr=PIPE)
except subprocess.CalledProcessError as e:
print(e.stderr)
self.fail('es-check failed to verify ES5 output compliance')
# Build JavaScript code from source code
def build(self, filename, libraries=[], includes=[], force_c=False,
post_build=None, js_outfile=True):
suffix = '.js' if js_outfile else '.wasm'
if shared.suffix(filename) in ('.cc', '.cxx', '.cpp') and not force_c:
compiler = [EMXX]
else:
# TODO(https://github.com/emscripten-core/emscripten/issues/11121)
# We link with C++ stdlibs, even when linking with emcc for historical reasons. We can remove
# this if this issues is fixed.
compiler = [EMCC, '-nostdlib++']
dirname, basename = os.path.split(filename)
output = shared.unsuffixed(basename) + suffix
cmd = compiler + [filename, '-o', output] + self.get_emcc_args(main_file=True) + \
['-I.', '-I' + dirname, '-I' + os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
libraries
self.run_process(cmd, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(output)
if js_outfile and not self.uses_es6:
self.verify_es5(output)
if post_build:
post_build(output)
if js_outfile and self.uses_memory_init_file():
src = open(output).read()
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
with open(javascript_file, 'rt') as f:
blob = "".join(f.readlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def run_js(self, filename, engine=None, args=[], output_nicerizer=None, assert_returncode=0):
# use files, as PIPE can get too full and hang us
stdout = self.in_dir('stdout')
stderr = self.in_dir('stderr')
error = None
if not engine:
engine = config.JS_ENGINES[0]
if engine == config.NODE_JS:
engine = engine + self.node_args
if engine == config.V8_ENGINE:
engine = engine + self.v8_args
if EMTEST_VERBOSE:
print(f"Running '{filename}' under '{shared.shlex_join(engine)}'")
try:
jsrun.run_js(filename, engine, args,
stdout=open(stdout, 'w'),
stderr=open(stderr, 'w'),
assert_returncode=assert_returncode)
except subprocess.CalledProcessError as e:
error = e
# Make sure that we produced proper line endings to the .js file we are about to run.
if not filename.endswith('.wasm'):
self.assertEqual(line_endings.check_line_endings(filename), 0)
out = open(stdout, 'r').read()
err = open(stderr, 'r').read()
if output_nicerizer:
ret = output_nicerizer(out, err)
else:
ret = out + err
if error or EMTEST_VERBOSE:
ret = limit_size(ret)
print('-- begin program output --')
print(ret, end='')
print('-- end program output --')
if error:
if assert_returncode == NON_ZERO:
self.fail('JS subprocess unexpectedly succeeded (%s): Output:\n%s' % (error.cmd, ret))
else:
self.fail('JS subprocess failed (%s): %s. Output:\n%s' % (error.cmd, error.returncode, ret))
# We should pass all strict mode checks
self.assertNotContained('strict warning:', ret)
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = 'Expected file not found: ' + filename
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line
# ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2, msg=None,
fromfile='expected', tofile='actual'):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2, msg, fromfile, tofile)
def assertIdentical(self, values, y, msg=None,
fromfile='expected', tofile='actual'):
if type(values) not in (list, tuple):
values = [values]
for x in values:
if x == y:
return # success
diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(),
fromfile=fromfile, tofile=tofile)
diff = ''.join([a.rstrip() + '\n' for a in diff_lines])
if EMTEST_VERBOSE:
print("Expected to have '%s' == '%s'" % (limit_size(values[0]), limit_size(y)))
fail_message = 'Unexpected difference:\n' + limit_size(diff)
if not EMTEST_VERBOSE:
fail_message += '\nFor full output run with EMTEST_VERBOSE=1.'
if msg:
fail_message += '\n' + msg
self.fail(fail_message)
def assertIdenticalUrlEncoded(self, expected, actual, **kwargs):
"""URL decodes the `actual` parameter before checking for equality."""
self.assertIdentical(expected, unquote(actual), **kwargs)
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
values = list(map(asstr, values))
if callable(string):
string = string()
if not any(v in string for v in values):
diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')
diff = ''.join(a.rstrip() + '\n' for a in diff)
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string), limit_size(diff),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertContainedIf(self, value, string, condition):
if condition:
self.assertContained(value, string)
else:
self.assertNotContained(value, string)
def assertBinaryEqual(self, file1, file2):
self.assertEqual(os.path.getsize(file1),
os.path.getsize(file2))
self.assertEqual(open(file1, 'rb').read(),
open(file2, 'rb').read())
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
ensure_dir(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args=None,
env_init={}, cache_name_extra='', native=False):
if make_args is None:
make_args = ['-j', str(building.get_num_cores())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
emcc_args = self.get_emcc_args()
hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
with open(bc_file, 'wb') as f:
f.write(contents)
generated_libs.append(bc_file)
return generated_libs
print(f'<building and saving {cache_name} into cache>', file=sys.stderr)
return build_library(name, build_dir, output_dir, generated_libs, configure,
configure_args, make, make_args, self.library_cache,
cache_name, env_init=env_init, native=native, cflags=self.get_emcc_args())
def clear(self):
for name in os.listdir(self.get_dir()):
try_delete(os.path.join(self.get_dir(), name))
if EMSCRIPTEN_TEMP_DIR:
for name in os.listdir(EMSCRIPTEN_TEMP_DIR):
try_delete(os.path.join(EMSCRIPTEN_TEMP_DIR, name))
def run_process(self, cmd, check=True, **args):
# Wrapper around shared.run_process. This is desirable so that the tests
# can fail (in the unittest sense) rather than error'ing.
# In the long run it would nice to completely remove the dependency on
# core emscripten code (shared.py) here.
try:
return shared.run_process(cmd, check=check, **args)
except subprocess.CalledProcessError as e:
if check and e.returncode != 0:
self.fail('subprocess exited with non-zero return code(%d): `%s`' %
(e.returncode, shared.shlex_join(cmd)))
# Shared test code between main suite and others
def expect_fail(self, cmd, **args):
"""Run a subprocess and assert that it returns non-zero.
Return the stderr of the subprocess.
"""
proc = self.run_process(cmd, check=False, stderr=PIPE, **args)
self.assertNotEqual(proc.returncode, 0, 'subprocess unexpectedly succeeded. stderr:\n' + proc.stderr)
# When we check for failure we expect a user-visible error, not a traceback.
# However, on windows a python traceback can happen randomly sometimes,
# due to "Access is denied" https://github.com/emscripten-core/emscripten/issues/718
if not WINDOWS or 'Access is denied' not in proc.stderr:
self.assertNotContained('Traceback', proc.stderr)
return proc.stderr
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_test_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
extern "C" {
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
}
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_test_file('libb.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc();
}
void bfunc() {
afunc("b");
}
''')
create_test_file('libc.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc();
}
void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
self.clear_setting('RUNTIME_LINKED_LIBS')
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', '32mb')
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [EMCC, src, '-o', shared.unsuffixed(src) + so] + self.get_emcc_args()
cmdv += ['-s', 'SIDE_MODULE=1', '-s', 'RUNTIME_LINKED_LIBS=' + str(linkto)]
self.run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.cpp', ['liba' + so])
ccshared('libc.cpp', ['liba' + so])
self.set_setting('MAIN_MODULE', 1)
self.set_setting('RUNTIME_LINKED_LIBS', ['libb' + so, 'libc' + so])
do_run(r'''
extern "C" {
void bfunc();
void cfunc();
}
int test_main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
self.set_setting('RUNTIME_LINKED_LIBS', [])
for libname in ['liba', 'libb', 'libc']:
self.emcc_args += ['--embed-file', libname + so]
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int test_main() {
void *bdso, *cdso;
void (*bfunc)(), (*cfunc)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currently working
bdso = dlopen("libb%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(cdso != NULL);
bfunc = (void (*)())dlsym(bdso, "bfunc");
assert(bfunc != NULL);
cfunc = (void (*)())dlsym(cdso, "cfunc");
assert(cfunc != NULL);
bfunc();
cfunc();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = self.js_engines
for engine in js_engines:
assert engine in config.JS_ENGINES, "js engine does not exist in config.JS_ENGINES"
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) in (list, type(None))
banned = [b[0] for b in self.banned_js_engines if b]
return [engine for engine in js_engines if engine and engine[0] not in banned]
def do_run(self, src, expected_output, force_c=False, **kwargs):
if 'no_build' in kwargs:
filename = src
else:
if force_c:
filename = 'src.c'
else:
filename = 'src.cpp'
with open(filename, 'w') as f:
f.write(src)
self._build_and_run(filename, expected_output, **kwargs)
def do_runf(self, filename, expected_output=None, **kwargs):
self._build_and_run(filename, expected_output, **kwargs)
## Just like `do_run` but with filename of expected output
def do_run_from_file(self, filename, expected_output_filename, **kwargs):
self._build_and_run(filename, open(expected_output_filename).read(), **kwargs)
def do_run_in_out_file_test(self, *path, **kwargs):
srcfile = path_from_root(*path)
outfile = shared.unsuffixed(srcfile) + '.out'
expected = open(outfile).read()
self._build_and_run(srcfile, expected, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def _build_and_run(self, filename, expected_output, args=[], output_nicerizer=None,
no_build=False,
js_engines=None, post_build=None, libraries=[],
includes=[],
assert_returncode=0, assert_identical=False, assert_all=False,
check_for_error=True, force_c=False):
logger.debug(f'_build_and_run: {filename}')
if no_build:
js_file = filename
else:
self.build(filename, libraries=libraries, includes=includes, post_build=post_build,
force_c=force_c)
js_file = shared.unsuffixed(os.path.basename(filename)) + '.js'
self.assertExists(js_file)
engines = self.filtered_js_engines(js_engines)
if len(engines) > 1 and not self.use_all_engines:
engines = engines[:1]
# In standalone mode, also add wasm vms as we should be able to run there too.
if self.get_setting('STANDALONE_WASM'):
# TODO once standalone wasm support is more stable, apply use_all_engines
# like with js engines, but for now as we bring it up, test in all of them
if not self.wasm_engines:
logger.warning('no wasm engine was found to run the standalone part of this test')
engines += self.wasm_engines
if self.get_setting('WASM2C') and not EMTEST_LACKS_NATIVE_CLANG:
# compile the c file to a native executable.
c = shared.unsuffixed(js_file) + '.wasm.c'
executable = shared.unsuffixed(js_file) + '.exe'
cmd = [shared.CLANG_CC, c, '-o', executable] + clang_native.get_clang_native_args()
self.run_process(cmd, env=clang_native.get_clang_native_env())
# we can now run the executable directly, without an engine, which
# we indicate with None as the engine
engines += [[None]]
if len(engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % EM_CONFIG)
for engine in engines:
js_output = self.run_js(js_file, engine, args, output_nicerizer=output_nicerizer, assert_returncode=assert_returncode)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
elif assert_all:
for o in expected_output:
self.assertContained(o, js_output)
else:
self.assertContained(expected_output, js_output)
if check_for_error:
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
def get_freetype_library(self):
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
return self.get_library(os.path.join('third_party', 'freetype'), os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared', '--without-zlib'])
def get_poppler_library(self, env_init=None):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args += [
'-I' + path_from_root('tests', 'third_party', 'freetype', 'include'),
'-I' + path_from_root('tests', 'third_party', 'poppler', 'include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
self.emcc_args += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
env_init = env_init.copy() if env_init else {}
env_init['FONTCONFIG_CFLAGS'] = ' '
env_init['FONTCONFIG_LIBS'] = ' '
poppler = self.get_library(
os.path.join('third_party', 'poppler'),
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init=env_init,
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self):
if WINDOWS:
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'),
configure=[path_from_root('emconfigure.bat')],
configure_args=['cmake', '.'],
make=['mingw32-make'],
make_args=[])
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a'])
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.send_header('Connection', 'close')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
# Add COOP, COEP, CORP, and no-caching headers
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Cross-Origin-Opener-Policy', 'same-origin')
self.send_header('Cross-Origin-Embedder-Policy', 'require-corp')
self.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
return SimpleHTTPRequestHandler.end_headers(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(open(path_from_root('tests', 'browser_harness.html'), 'rb').read())
elif 'report_' in self.path:
# the test is reporting its result. first change dir away from the
# test dir, as it will be deleted now that the test is finishing, and
# if we got a ping at that time, we'd return an error
os.chdir(path_from_root())
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', unquote_plus(self.path), ']')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
# there is a new test ready to be served
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write(b'COMMAND:' + url)
# move us to the right place to serve the files for the new test
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write(b'(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class Reporting(Enum):
"""When running browser tests we normally automatically include support
code for reporting results back to the browser. This enum allows tests
to decide what type of support code they need/want.
"""
NONE = 0
# Include the JS helpers for reporting results
JS_ONLY = 1
# Include C/C++ reporting code (REPORT_RESULT mactros) as well as JS helpers
FULL = 2
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super(BrowserCore, self).__init__(*args, **kwargs)
@staticmethod
def browser_open(url):
if not EMTEST_BROWSER:
logger.info('Using default system browser')
webbrowser.open_new(url)
return
browser_args = shlex.split(EMTEST_BROWSER)
# If the given browser is a scalar, treat it like one of the possible types
# from https://docs.python.org/2/library/webbrowser.html
if len(browser_args) == 1:
try:
# This throws if the type of browser isn't available
webbrowser.get(browser_args[0]).open_new(url)
logger.info('Using Emscripten browser: %s', browser_args[0])
return
except webbrowser.Error:
# Ignore the exception and fallback to the custom command logic
pass
# Else assume the given browser is a specific program with additional
# parameters and delegate to that
logger.info('Using Emscripten browser: %s', str(browser_args))
subprocess.Popen(browser_args + [url])
@classmethod
def setUpClass(cls):
super(BrowserCore, cls).setUpClass()
cls.also_asmjs = int(os.getenv('EMTEST_BROWSER_ALSO_ASMJS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser():
return
cls.browser_timeout = 60
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
cls.browser_open('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super(BrowserCore, cls).tearDownClass()
if not has_browser():
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
# @param extra_tries: how many more times to try this test, if it fails. browser tests have
# many more causes of flakiness (in particular, they do not run
# synchronously, so we have a timeout, which can be hit if the VM
# we run on stalls temporarily), so we let each test try more than
# once by default
def run_browser(self, html_file, message, expectedResult=None, timeout=None, extra_tries=1):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
asbytes('http://localhost:%s/%s' % (self.port, html_file)),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
# verify the result, and try again if we should do so
try:
self.assertIdenticalUrlEncoded(expectedResult, output)
except Exception as e:
if extra_tries > 0:
print('[test error (see below), automatically retrying]')
print(e)
return self.run_browser(html_file, message, expectedResult, timeout, extra_tries - 1)
else:
raise e
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
with open('reftest.js', 'w') as out:
with open(path_from_root('tests', 'browser_reporting.js')) as reporting:
out.write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
window.requestAnimationFrame = /** @suppress{checkTypes} */ (function(func) {
realRAF(function() {
func();
realRAF(doReftest);
});
});
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
''' % (reporting.read(), basename, int(manually_trigger)))
def compile_btest(self, args, reporting=Reporting.FULL):
# Inject support code for reporting results. This adds an include a header so testcases can
# use REPORT_RESULT, and also adds a cpp file to be compiled alongside the testcase, which
# contains the implementation of REPORT_RESULT (we can't just include that implementation in
# the header as there may be multiple files being compiled here).
args += ['-s', 'IN_TEST_HARNESS=1']
if reporting != Reporting.NONE:
# For basic reporting we inject JS helper funtions to report result back to server.
args += ['-DEMTEST_PORT_NUMBER=%d' % self.port,
'--pre-js', path_from_root('tests', 'browser_reporting.js')]
if reporting == Reporting.FULL:
# If C reporting (i.e. REPORT_RESULT macro) is required
# also compile in report_result.cpp and forice-include report_result.h
args += ['-I', path_from_root('tests'),
'-include', path_from_root('tests', 'report_result.h'),
path_from_root('tests', 'report_result.cpp')]
self.run_process([EMCC] + self.get_emcc_args() + args)
def btest_exit(self, filename, expected, *args, **kwargs):
"""Special case of btest that reports its result solely via exiting
with a give result code.
In this case we set EXIT_RUNTIME and we don't need to provide the
REPORT_RESULT macro to the C code.
"""
self.set_setting('EXIT_RUNTIME')
kwargs['reporting'] = Reporting.JS_ONLY
kwargs['expected'] = 'exit:%s' % expected
return self.btest(filename, *args, **kwargs)
def btest(self, filename, expected=None, reference=None, force_c=False,
reference_slack=0, manual_reference=False, post_build=None,
args=None, message='.', also_proxied=False,
url_suffix='', timeout=None, also_asmjs=False,
manually_trigger_reftest=False, extra_tries=1,
reporting=Reporting.FULL):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
if args is None:
args = []
# if we are provided the source and not a path, use that
filename_is_src = '\n' in filename
src = filename if filename_is_src else ''
original_args = args[:]
if filename_is_src:
filepath = os.path.join(self.get_dir(), 'main.c' if force_c else 'main.cpp')
with open(filepath, 'w') as f:
f.write(src)
else:
filepath = path_from_root('tests', filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(path_from_root('tests', reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args += ['--pre-js', 'reftest.js', '-s', 'GL_TESTING=1']
outfile = 'test.html'
args = [filepath, '-o', outfile] + args
# print('all args:', args)
try_delete(outfile)
self.compile_btest(args, reporting=reporting)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout, extra_tries=extra_tries)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in original_args and (also_asmjs or self.also_asmjs):
print('WASM=0')
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['-s', 'WASM=0'], message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-s', 'GL_TESTING=1'], message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure=['sh', './configure'],
configure_args=[],
make=['make'],
make_args=[],
cache=None,
cache_name=None,
env_init={},
native=False,
cflags=[]):
"""Build a library and cache the result. We build the library file
once and cache it for all our tests. (We cache in memory since the test
directory is destroyed and recreated for each test. Note that we cache
separately for different compilers). This cache is just during the test
runner. There is a different concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
source_dir = path_from_root('tests', name.replace('_native', ''))
temp_dir = build_dir
project_dir = os.path.join(temp_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
shutil.copytree(source_dir, project_dir) # Useful in debugging sometimes to comment this out, and two lines above
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
if native:
env = clang_native.get_clang_native_env()
else:
env = building.get_building_env(cflags=cflags)
for k, v in env_init.items():
env[k] = v
if configure:
try:
with open(os.path.join(project_dir, 'configure_out'), 'w') as out:
with open(os.path.join(project_dir, 'configure_err'), 'w') as err:
stdout = out if EM_BUILD_VERBOSE < 2 else None
stderr = err if EM_BUILD_VERBOSE < 1 else None
building.configure(configure + configure_args, env=env,
stdout=stdout,
stderr=stderr,
cwd=project_dir)
except subprocess.CalledProcessError:
with open(os.path.join(project_dir, 'configure_out')) as f:
print('-- configure stdout --')
print(f.read())
print('-- end configure stdout --')
with open(os.path.join(project_dir, 'configure_err')) as f:
print('-- configure stderr --')
print(f.read())
print('-- end configure stderr --')
raise
def open_make_out(mode='r'):
return open(os.path.join(project_dir, 'make.out'), mode)
def open_make_err(mode='r'):
return open(os.path.join(project_dir, 'make.err'), mode)
if EM_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
try:
with open_make_out('w') as make_out:
with open_make_err('w') as make_err:
stdout = make_out if EM_BUILD_VERBOSE < 2 else None
stderr = make_err if EM_BUILD_VERBOSE < 1 else None
building.make(make + make_args, stdout=stdout, stderr=stderr, env=env,
cwd=project_dir)
except subprocess.CalledProcessError:
with open_make_out() as f:
print('-- make stdout --')
print(f.read())
print('-- end make stdout --')
with open_make_err() as f:
print('-- make stderr --')
print(f.read())
print('-- end stderr --')
raise
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, open(f, 'rb').read()))
return generated_libs
def check_js_engines():
working_engines = list(filter(jsrun.check_engine, config.JS_ENGINES))
if len(working_engines) < len(config.JS_ENGINES):
print('Not all the JS engines in JS_ENGINES appears to work.')
exit(1)
if EMTEST_ALL_ENGINES:
print('(using ALL js engines)')
else:
logger.warning('use EMTEST_ALL_ENGINES=1 in the env to run against all JS '
'engines, which is slower but provides more coverage')
def get_and_import_modules():
modules = []
for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'test*.py')):
module_dir, module_file = os.path.split(filename)
module_name, module_ext = os.path.splitext(module_file)
__import__(module_name)
modules.append(sys.modules[module_name])
return modules
def get_all_tests(modules):
# Create a list of all known tests so that we can choose from them based on a wildcard search
all_tests = []
suites = core_test_modes + non_core_test_modes
for m in modules:
for s in suites:
if hasattr(m, s):
tests = [t for t in dir(getattr(m, s)) if t.startswith('test_')]
all_tests += [s + '.' + t for t in tests]
return all_tests
def tests_with_expanded_wildcards(args, all_tests):
# Process wildcards, e.g. "browser.test_pthread_*" should expand to list all pthread tests
new_args = []
for i, arg in enumerate(args):
if '*' in arg:
if arg.startswith('skip:'):
arg = arg[5:]
matching_tests = fnmatch.filter(all_tests, arg)
new_args += ['skip:' + t for t in matching_tests]
else:
new_args += fnmatch.filter(all_tests, arg)
else:
new_args += [arg]
if not new_args and args:
print('No tests found to run in set: ' + str(args))
sys.exit(1)
return new_args
def skip_requested_tests(args, modules):
for i, arg in enumerate(args):
if arg.startswith('skip:'):
which = [arg.split('skip:')[1]]
print(','.join(which), file=sys.stderr)
skipped = False
for test in which:
print('will skip "%s"' % test, file=sys.stderr)
suite_name, test_name = test.split('.')
for m in modules:
suite = getattr(m, suite_name, None)
if suite:
setattr(suite, test_name, lambda s: s.skipTest("requested to be skipped"))
skipped = True
break
assert skipped, "Not able to skip test " + test
args[i] = None
return [a for a in args if a is not None]
def args_for_random_tests(args, modules):
if not args:
return args
first = args[0]
if first.startswith('random'):
random_arg = first[6:]
num_tests, base_module, relevant_modes = get_random_test_parameters(random_arg)
for m in modules:
if hasattr(m, base_module):
base = getattr(m, base_module)
new_args = choose_random_tests(base, num_tests, relevant_modes)
print_random_test_statistics(num_tests)
return new_args
return args
def get_random_test_parameters(arg):
num_tests = 1
base_module = default_core_test_mode
relevant_modes = core_test_modes
if len(arg):
num_str = arg
if arg.startswith('other'):
base_module = 'other'
relevant_modes = ['other']
num_str = arg.replace('other', '')
elif arg.startswith('browser'):
base_module = 'browser'
relevant_modes = ['browser']
num_str = arg.replace('browser', '')
num_tests = int(num_str)
return num_tests, base_module, relevant_modes
def choose_random_tests(base, num_tests, relevant_modes):
tests = [t for t in dir(base) if t.startswith('test_')]
print()
chosen = set()
while len(chosen) < num_tests:
test = random.choice(tests)
mode = random.choice(relevant_modes)
new_test = mode + '.' + test
before = len(chosen)
chosen.add(new_test)
if len(chosen) > before:
print('* ' + new_test)
else:
# we may have hit the limit
if len(chosen) == len(tests) * len(relevant_modes):
print('(all possible tests chosen! %d = %d*%d)' % (len(chosen), len(tests), len(relevant_modes)))
break
return list(chosen)
def print_random_test_statistics(num_tests):
std = 0.5 / math.sqrt(num_tests)
expected = 100.0 * (1.0 - std)
print()
print('running those %d randomly-selected tests. if they all pass, then there is a '
'greater than 95%% chance that at least %.2f%% of the test suite will pass'
% (num_tests, expected))
print()
def show():
print('if all tests passed then there is a greater than 95%% chance that at least '
'%.2f%% of the test suite will pass'
% (expected))
atexit.register(show)
def load_test_suites(args, modules):
loader = unittest.TestLoader()
unmatched_test_names = set(args)
suites = []
for m in modules:
names_in_module = []
for name in list(unmatched_test_names):
try:
operator.attrgetter(name)(m)
names_in_module.append(name)
unmatched_test_names.remove(name)
except AttributeError:
pass
if len(names_in_module):
loaded_tests = loader.loadTestsFromNames(sorted(names_in_module), m)
tests = flattened_tests(loaded_tests)
suite = suite_for_module(m, tests)
for test in tests:
suite.addTest(test)
suites.append((m.__name__, suite))
return suites, unmatched_test_names
def flattened_tests(loaded_tests):
tests = []
for subsuite in loaded_tests:
for test in subsuite:
tests.append(test)
return tests
def suite_for_module(module, tests):
suite_supported = module.__name__ in ('test_core', 'test_other', 'test_posixtest')
if not EMTEST_SAVE_DIR:
has_multiple_tests = len(tests) > 1
has_multiple_cores = parallel_testsuite.num_cores() > 1
if suite_supported and has_multiple_tests and has_multiple_cores:
return parallel_testsuite.ParallelTestSuite(len(tests))
return unittest.TestSuite()
def run_tests(options, suites):
resultMessages = []
num_failures = 0
print('Test suites:')
print([s[0] for s in suites])
# Run the discovered tests
testRunner = unittest.TextTestRunner(verbosity=2)
for mod_name, suite in suites:
print('Running %s: (%s tests)' % (mod_name, suite.countTestCases()))
res = testRunner.run(suite)
msg = ('%s: %s run, %s errors, %s failures, %s skipped' %
(mod_name, res.testsRun, len(res.errors), len(res.failures), len(res.skipped)))
num_failures += len(res.errors) + len(res.failures)
resultMessages.append(msg)
if len(resultMessages) > 1:
print('====================')
print()
print('TEST SUMMARY')
for msg in resultMessages:
print(' ' + msg)
# Return the number of failures as the process exit code for automating success/failure reporting.
return min(num_failures, 255)
def parse_args(args):
parser = argparse.ArgumentParser(prog='runner.py', description=__doc__)
parser.add_argument('tests', nargs='*')
return parser.parse_args()
def main(args):
options = parse_args(args)
check_js_engines()
def prepend_default(arg):
if arg.startswith('test_'):
return default_core_test_mode + '.' + arg
return arg
tests = [prepend_default(t) for t in options.tests]
modules = get_and_import_modules()
all_tests = get_all_tests(modules)
tests = tests_with_expanded_wildcards(tests, all_tests)
tests = skip_requested_tests(tests, modules)
tests = args_for_random_tests(tests, modules)
suites, unmatched_tests = load_test_suites(tests, modules)
if unmatched_tests:
print('ERROR: could not find the following tests: ' + ' '.join(unmatched_tests))
return 1
return run_tests(options, suites)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
|
trace_event_unittest.py | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import json
import logging
import math
import multiprocessing
import os
import tempfile
import time
import unittest
from py_trace_event import trace_event
from py_trace_event import trace_time
from py_trace_event.trace_event_impl import log
class TraceEventTests(unittest.TestCase):
def setUp(self):
tf = tempfile.NamedTemporaryFile(delete=False)
self._log_path = tf.name
tf.close()
def tearDown(self):
if os.path.exists(self._log_path):
os.remove(self._log_path)
@contextlib.contextmanager
def _test_trace(self, disable=True):
try:
trace_event.trace_enable(self._log_path)
yield
finally:
if disable:
trace_event.trace_disable()
def testNoImpl(self):
orig_impl = trace_event.trace_event_impl
try:
trace_event.trace_event_impl = None
self.assertFalse(trace_event.trace_can_enable())
finally:
trace_event.trace_event_impl = orig_impl
def testImpl(self):
self.assertTrue(trace_event.trace_can_enable())
def testIsEnabledFalse(self):
self.assertFalse(trace_event.trace_is_enabled())
def testIsEnabledTrue(self):
with self._test_trace():
self.assertTrue(trace_event.trace_is_enabled())
def testEnable(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 1)
self.assertTrue(trace_event.trace_is_enabled())
log_output = log_output.pop()
self.assertEquals(log_output['category'], 'process_argv')
self.assertEquals(log_output['name'], 'process_argv')
self.assertTrue(log_output['args']['argv'])
self.assertEquals(log_output['ph'], 'M')
def testDoubleEnable(self):
try:
with self._test_trace():
with self._test_trace():
pass
except log.TraceException:
return
assert False
def testDisable(self):
with self._test_trace(disable=False):
with open(self._log_path, 'r') as f:
self.assertTrue(trace_event.trace_is_enabled())
trace_event.trace_disable()
self.assertEquals(len(json.loads(f.read() + ']')), 1)
self.assertFalse(trace_event.trace_is_enabled())
def testDoubleDisable(self):
with self._test_trace():
pass
trace_event.trace_disable()
def testFlushChanges(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.clock_sync('1')
self.assertEquals(len(json.loads(f.read() + ']')), 1)
f.seek(0)
trace_event.trace_flush()
self.assertEquals(len(json.loads(f.read() + ']')), 2)
def testFlushNoChanges(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
self.assertEquals(len(json.loads(f.read() + ']')),1)
f.seek(0)
trace_event.trace_flush()
self.assertEquals(len(json.loads(f.read() + ']')), 1)
def testDoubleFlush(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.clock_sync('1')
self.assertEquals(len(json.loads(f.read() + ']')), 1)
f.seek(0)
trace_event.trace_flush()
trace_event.trace_flush()
self.assertEquals(len(json.loads(f.read() + ']')), 2)
def testTraceBegin(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.trace_begin('test_event', this='that')
trace_event.trace_flush()
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 2)
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue( current_entry['args']['argv'])
self.assertEquals( current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'test_event')
self.assertEquals(current_entry['args']['this'], '\'that\'')
self.assertEquals(current_entry['ph'], 'B')
def testTraceEnd(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.trace_end('test_event')
trace_event.trace_flush()
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 2)
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue(current_entry['args']['argv'])
self.assertEquals(current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'test_event')
self.assertEquals(current_entry['args'], {})
self.assertEquals(current_entry['ph'], 'E')
def testTrace(self):
with self._test_trace():
with trace_event.trace('test_event', this='that'):
pass
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 3)
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue(current_entry['args']['argv'])
self.assertEquals(current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'test_event')
self.assertEquals(current_entry['args']['this'], '\'that\'')
self.assertEquals(current_entry['ph'], 'B')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'test_event')
self.assertEquals(current_entry['args'], {})
self.assertEquals(current_entry['ph'], 'E')
def testTracedDecorator(self):
@trace_event.traced("this")
def test_decorator(this="that"):
pass
with self._test_trace():
test_decorator()
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 3)
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue(current_entry['args']['argv'])
self.assertEquals(current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], '__main__.test_decorator')
self.assertEquals(current_entry['args']['this'], '\'that\'')
self.assertEquals(current_entry['ph'], 'B')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], '__main__.test_decorator')
self.assertEquals(current_entry['args'], {})
self.assertEquals(current_entry['ph'], 'E')
def testClockSyncWithTs(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.clock_sync('id', issue_ts=trace_time.Now())
trace_event.trace_flush()
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 2)
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue(current_entry['args']['argv'])
self.assertEquals(current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'clock_sync')
self.assertTrue(current_entry['args']['issue_ts'])
self.assertEquals(current_entry['ph'], 'c')
def testClockSyncWithoutTs(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.clock_sync('id')
trace_event.trace_flush()
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 2)
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue(current_entry['args']['argv'])
self.assertEquals(current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'clock_sync')
self.assertFalse(current_entry['args'].get('issue_ts'))
self.assertEquals(current_entry['ph'], 'c')
def testTime(self):
actual_diff = []
def func1():
trace_begin("func1")
start = time.time()
time.sleep(0.25)
end = time.time()
actual_diff.append(end-start) # Pass via array because of Python scoping
trace_end("func1")
with self._test_trace():
start_ts = time.time()
trace_event.trace_begin('test')
end_ts = time.time()
trace_event.trace_end('test')
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 3)
meta_data = log_output[0]
open_data = log_output[1]
close_data = log_output[2]
self.assertEquals(meta_data['category'], 'process_argv')
self.assertEquals(meta_data['name'], 'process_argv')
self.assertTrue(meta_data['args']['argv'])
self.assertEquals(meta_data['ph'], 'M')
self.assertEquals(open_data['category'], 'python')
self.assertEquals(open_data['name'], 'test')
self.assertEquals(open_data['ph'], 'B')
self.assertEquals(close_data['category'], 'python')
self.assertEquals(close_data['name'], 'test')
self.assertEquals(close_data['ph'], 'E')
event_time_diff = close_data['ts'] - open_data['ts']
recorded_time_diff = (end_ts - start_ts) * 1000000
self.assertLess(math.fabs(event_time_diff - recorded_time_diff), 1000)
def testNestedCalls(self):
with self._test_trace():
trace_event.trace_begin('one')
trace_event.trace_begin('two')
trace_event.trace_end('two')
trace_event.trace_end('one')
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 5)
meta_data = log_output[0]
one_open = log_output[1]
two_open = log_output[2]
two_close = log_output[3]
one_close = log_output[4]
self.assertEquals(meta_data['category'], 'process_argv')
self.assertEquals(meta_data['name'], 'process_argv')
self.assertTrue(meta_data['args']['argv'])
self.assertEquals(meta_data['ph'], 'M')
self.assertEquals(one_open['category'], 'python')
self.assertEquals(one_open['name'], 'one')
self.assertEquals(one_open['ph'], 'B')
self.assertEquals(one_close['category'], 'python')
self.assertEquals(one_close['name'], 'one')
self.assertEquals(one_close['ph'], 'E')
self.assertEquals(two_open['category'], 'python')
self.assertEquals(two_open['name'], 'two')
self.assertEquals(two_open['ph'], 'B')
self.assertEquals(two_close['category'], 'python')
self.assertEquals(two_close['name'], 'two')
self.assertEquals(two_close['ph'], 'E')
self.assertLessEqual(one_open['ts'], two_open['ts'])
self.assertGreaterEqual(one_close['ts'], two_close['ts'])
def testInterleavedCalls(self):
with self._test_trace():
trace_event.trace_begin('one')
trace_event.trace_begin('two')
trace_event.trace_end('one')
trace_event.trace_end('two')
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 5)
meta_data = log_output[0]
one_open = log_output[1]
two_open = log_output[2]
two_close = log_output[4]
one_close = log_output[3]
self.assertEquals(meta_data['category'], 'process_argv')
self.assertEquals(meta_data['name'], 'process_argv')
self.assertTrue(meta_data['args']['argv'])
self.assertEquals(meta_data['ph'], 'M')
self.assertEquals(one_open['category'], 'python')
self.assertEquals(one_open['name'], 'one')
self.assertEquals(one_open['ph'], 'B')
self.assertEquals(one_close['category'], 'python')
self.assertEquals(one_close['name'], 'one')
self.assertEquals(one_close['ph'], 'E')
self.assertEquals(two_open['category'], 'python')
self.assertEquals(two_open['name'], 'two')
self.assertEquals(two_open['ph'], 'B')
self.assertEquals(two_close['category'], 'python')
self.assertEquals(two_close['name'], 'two')
self.assertEquals(two_close['ph'], 'E')
self.assertLessEqual(one_open['ts'], two_open['ts'])
self.assertLessEqual(one_close['ts'], two_close['ts'])
def testMultiprocess(self):
def child_function():
with trace_event.trace('child_event'):
pass
with self._test_trace():
trace_event.trace_begin('parent_event')
trace_event.trace_flush()
p = multiprocessing.Process(target=child_function)
p.start()
self.assertTrue(hasattr(p, "_shimmed_by_trace_event"))
p.join()
trace_event.trace_end('parent_event')
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 5)
meta_data = log_output[0]
parent_open = log_output[1]
child_open = log_output[2]
child_close = log_output[3]
parent_close = log_output[4]
self.assertEquals(meta_data['category'], 'process_argv')
self.assertEquals(meta_data['name'], 'process_argv')
self.assertTrue(meta_data['args']['argv'])
self.assertEquals(meta_data['ph'], 'M')
self.assertEquals(parent_open['category'], 'python')
self.assertEquals(parent_open['name'], 'parent_event')
self.assertEquals(parent_open['ph'], 'B')
self.assertEquals(child_open['category'], 'python')
self.assertEquals(child_open['name'], 'child_event')
self.assertEquals(child_open['ph'], 'B')
self.assertEquals(child_close['category'], 'python')
self.assertEquals(child_close['name'], 'child_event')
self.assertEquals(child_close['ph'], 'E')
self.assertEquals(parent_close['category'], 'python')
self.assertEquals(parent_close['name'], 'parent_event')
self.assertEquals(parent_close['ph'], 'E')
def testMultiprocessExceptionInChild(self):
def bad_child():
trace_event.trace_disable()
with self._test_trace():
p = multiprocessing.Pool(1)
trace_event.trace_begin('parent')
self.assertRaises(Exception, lambda: p.apply(bad_child, ()))
p.close()
p.terminate()
p.join()
trace_event.trace_end('parent')
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 3)
meta_data = log_output[0]
parent_open = log_output[1]
parent_close = log_output[2]
self.assertEquals(parent_open['category'], 'python')
self.assertEquals(parent_open['name'], 'parent')
self.assertEquals(parent_open['ph'], 'B')
self.assertEquals(parent_close['category'], 'python')
self.assertEquals(parent_close['name'], 'parent')
self.assertEquals(parent_close['ph'], 'E')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.presentation.main(verbosity=2)
|
google_nest_controller.py | import time
import threading
import requests
from googlecontroller import GoogleAssistant as _GA_
import librosa
class GoogleNestDevice(_GA_):
def __init__(self, ip, default_volume=18):
self.ip = ip
_GA_.__init__(self, ip)
self.nest_device = _GA_(host=self.ip)
if not default_volume == None:
self.volume_num = default_volume
else:
self.volume_num = 18
def play(self, url, ignore = False, contenttype = "audio/mp3"):
if self.cc.media_controller.status.player_state != "PLAYING" or ignore == True:
self.cc.wait()
media = self.cc.media_controller
media.play_media(url, contenttype)
media.block_until_active()
def say(self, text:str, ignore:bool=False, lang:str="en-UK"):
volume_num = self.volume_num + 18
self.volume(volume_num)
speed = "1"
url = u"https://translate.google.com/translate_tts?ie=UTF-8&q=" + text + "%21&tl=" + lang + "&ttsspeed=" + speed + "&total=1&idx=0&client=tw-ob&textlen=14&tk=594228.1040269"
open("./temp.mp3", "wb").write(requests.get(url).content)
threading.Thread(target=self.play, args=(url, False,)).start()
self.block_until_done_playing(audio_length=librosa.get_duration(filename="./temp.mp3"))
volume_num = volume_num - 18
self.volume(volume_num)
def block_until_done_playing(self, audio_length):
while True:
if self.nest_device.cc.media_controller.status.player_is_idle:
time.sleep(audio_length)
break
time.sleep(0.1) |
caching_test.py | # -*- coding: utf-8 -*-
# Copyright 2018-2019 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""st.caching unit tests."""
import threading
import unittest
import pytest
from mock import patch
import streamlit as st
from streamlit import caching
class CacheTest(unittest.TestCase):
def tearDown(self):
# Some of these tests reach directly into _cache_info and twiddle it.
# Reset default values on teardown.
st.caching._cache_info.within_cached_func = 0
st.caching._cache_info.suppress_st_function_warning = 0
def test_simple(self):
@st.cache
def foo():
return 42
self.assertEqual(foo(), 42)
self.assertEqual(foo(), 42)
def test_deprecated_kwarg(self):
with pytest.raises(Exception) as e:
@st.cache(ignore_hash=True)
def foo():
return 42
assert (
"The `ignore_hash` argument has been renamed to `allow_output_mutation`."
in str(e.value)
)
@patch.object(st, "warning")
def test_args(self, warning):
called = [False]
@st.cache
def f(x):
called[0] = True
return x
self.assertFalse(called[0])
f(0)
self.assertTrue(called[0])
called = [False] # Reset called
f(0)
self.assertFalse(called[0])
f(1)
self.assertTrue(called[0])
warning.assert_not_called()
@patch.object(st, "warning")
def test_mutate_return(self, warning):
@st.cache
def f():
return [0, 1]
r = f()
r[0] = 1
warning.assert_not_called()
f()
warning.assert_called()
@patch.object(st, "warning")
def test_mutate_args(self, warning):
@st.cache
def foo(d):
d["answer"] += 1
return d["answer"]
d = {"answer": 0}
self.assertNotEqual(foo(d), foo(d))
warning.assert_not_called()
@patch("streamlit.caching._show_cached_st_function_warning")
def test_cached_st_function_warning(self, warning):
st.text("foo")
warning.assert_not_called()
@st.cache
def cached_func():
st.text("Inside cached func")
cached_func()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
# Test warning suppression
@st.cache(suppress_st_warning=True)
def suppressed_cached_func():
st.text("No warnings here!")
suppressed_cached_func()
warning.assert_not_called()
# Test nested st.cache functions
@st.cache
def outer():
@st.cache
def inner():
st.text("Inside nested cached func")
return inner()
outer()
warning.assert_called_once()
warning.reset_mock()
# Test st.cache functions that raise errors
with self.assertRaises(RuntimeError):
@st.cache
def cached_raise_error():
st.text("About to throw")
raise RuntimeError("avast!")
cached_raise_error()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
# Test st.cache functions with widgets
@st.cache
def cached_widget():
st.button("Press me!")
cached_widget()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
def test_caching_counter(self):
"""Test that _within_cached_function_counter behaves properly in
multiple threads."""
def get_counter():
return caching._cache_info.within_cached_func
def set_counter(val):
caching._cache_info.within_cached_func = val
self.assertEqual(0, get_counter())
set_counter(1)
self.assertEqual(1, get_counter())
values_in_thread = []
def thread_test():
values_in_thread.append(get_counter())
set_counter(55)
values_in_thread.append(get_counter())
thread = threading.Thread(target=thread_test)
thread.start()
thread.join()
self.assertEqual([0, 55], values_in_thread)
# The other thread should not have modified the main thread
self.assertEqual(1, get_counter())
# Temporarily turn off these tests since there's no Cache object in __init__
# right now.
class CachingObjectTest(unittest.TestCase):
def off_test_simple(self):
val = 42
for _ in range(2):
c = st.Cache()
if c:
c.value = val
self.assertEqual(c.value, val)
def off_test_allow_output_mutation(self):
val = 42
for _ in range(2):
c = st.Cache(allow_output_mutation=True)
if c:
c.value = val
self.assertEqual(c.value, val)
def off_test_has_changes(self):
val = 42
for _ in range(2):
c = st.Cache()
if c.has_changes():
c.value = val
self.assertEqual(c.value, val)
@patch.object(st, "warning")
def off_test_mutate(self, warning):
for _ in range(2):
c = st.Cache()
if c:
c.value = [0, 1]
c.value[0] = 1
warning.assert_called()
|
ringbuffer.py | # ----------------------------------------------------------------------------
# Copyright (c) 2017 Massachusetts Institute of Technology (MIT)
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
#
# The full license is in the LICENSE file, distributed with this software.
# ----------------------------------------------------------------------------
"""Module for watching a directory and deleting the oldest Digital RF files."""
from __future__ import absolute_import, division, print_function
import datetime
import errno
import os
import re
import sys
import threading
import time
import traceback
from collections import OrderedDict, defaultdict, deque, namedtuple
from . import list_drf, util, watchdog_drf
__all__ = ("DigitalRFRingbufferHandler", "DigitalRFRingbuffer")
class DigitalRFRingbufferHandlerBase(watchdog_drf.DigitalRFEventHandler):
"""Base event handler for implementing a ringbuffer of Digital RF files.
This handler tracks files but does nothing to expire them. At least one
expirer mixin must be used with this class in order to create a complete
ringbuffer.
"""
FileRecord = namedtuple("FileRecord", ("key", "size", "path", "group"))
def __init__(
self,
verbose=False,
dryrun=False,
starttime=None,
endtime=None,
include_drf=True,
include_dmd=True,
):
"""Create a ringbuffer handler.
Other Parameters
----------------
starttime : datetime.datetime
Data covering this time or after will be included. This has no
effect on property files.
endtime : datetime.datetime
Data covering this time or earlier will be included. This has no
effect on property files.
include_drf : bool
If True, include Digital RF files.
include_dmd : bool
If True, include Digital Metadata files.
"""
self.verbose = verbose
self.dryrun = dryrun
# separately track file groups (ch path, name) with different queues
self.queues = defaultdict(deque)
self.records = {}
# acquire the record lock to modify the queue or record dicts
self._record_lock = threading.RLock()
super(DigitalRFRingbufferHandlerBase, self).__init__(
starttime=starttime,
endtime=endtime,
include_drf=include_drf,
include_dmd=include_dmd,
include_drf_properties=False,
include_dmd_properties=False,
)
def status(self):
"""Return status string about state of the ringbuffer."""
nfiles = sum(len(q) for q in self.queues.values())
return "{0} files".format(nfiles)
def _get_file_record(self, path):
"""Return self.FileRecord tuple for file at path."""
# get time key (seconds) from file path
key = None
for r in self.regexes:
m = r.match(path)
try:
secs = int(m.group("secs"))
except (AttributeError, IndexError, TypeError):
# no match, or regex matched but there is no 'secs' in regex
continue
else:
try:
frac = int(m.group("frac"))
except (IndexError, TypeError):
frac = 0
# key is time in milliseconds
key = secs * 1000 + frac
break
if key is None:
return
# ringbuffer by file groups, which are a channel path and name
group = (m.group("chpath"), m.group("name"))
try:
stat = os.stat(path)
except OSError:
if self.verbose:
traceback.print_exc()
return
else:
size = stat.st_size
return self.FileRecord(key=key, size=size, path=path, group=group)
def _add_to_queue(self, rec):
"""Add record to queue."""
# find insertion index for record (queue sorted in ascending order)
# we expect new records to go near the end (most recent)
queue = self.queues[rec.group]
with self._record_lock:
for k, (kkey, kpath) in enumerate(reversed(queue)):
if rec.key > kkey:
# we've found the insertion point at index k from end
break
elif rec.path == kpath:
# already in ringbuffer, so simply return
return
else:
# new key is oldest (or queue is empty),
# needs to be put at beginning
queue.appendleft((rec.key, rec.path))
k = None
if k is not None:
# insert record at index k
queue.rotate(k)
queue.append((rec.key, rec.path))
queue.rotate(-k)
def _remove_from_queue(self, rec):
"""Remove record from queue."""
queue = self.queues[rec.group]
with self._record_lock:
queue.remove((rec.key, rec.path))
def _expire_oldest_from_group(self, group):
"""Expire oldest record from group and delete corresponding file."""
# oldest file is at start of sorted records deque
# (don't just popleft on the queue because we want to call
# _remove_from_queue, which is overridden by subclasses)
with self._record_lock:
key, path = self.queues[group][0]
rec = self.records.pop(path)
self._remove_from_queue(rec)
if self.verbose:
now = datetime.datetime.utcnow().replace(microsecond=0)
print("{0} | Expired {1}".format(now, rec.path))
# delete file
if not self.dryrun:
try:
os.remove(rec.path)
except EnvironmentError as e:
# Python 2 and 3 compatible substitute for FileNotFoundError
if e.errno == errno.ENOENT:
# path doesn't exist like we thought it did, oh well
if self.verbose:
traceback.print_exc()
else:
# try again if path still exists, otherwise it's ok to move on
if os.path.exists(rec.path):
os.remove(rec.path)
# try to clean up directory in case it is empty
head, tail = os.path.split(rec.path)
try:
os.rmdir(head)
except OSError:
# directory not empty, just move on
pass
def _expire(self, group):
"""Expire records until ringbuffer constraint is met."""
# must override with mixins for any expiration to occur
pass
def _add_record(self, rec):
"""Add a record to the ringbuffer and expire old ones if necesssary."""
with self._record_lock:
# make sure record does not already exist, remove if it does
if rec.path in self.records:
if self.verbose:
msg = (
"Adding record for {0} but it already exists in"
" ringbuffer, modify instead."
).format(rec.path)
print(msg)
self._modify_record(rec)
# add record to dict so record information can be looked up by path
self.records[rec.path] = rec
# add record to expiration queue
self._add_to_queue(rec)
if self.verbose:
now = datetime.datetime.utcnow().replace(microsecond=0)
print("{0} | Added {1}".format(now, rec.path))
# expire oldest files until size constraint is met
self._expire(rec.group)
def _modify_record(self, rec):
"""Modify a record in the ringbuffer, return whether it was done."""
with self._record_lock:
if rec.path not in self.records:
# don't have record in ringbuffer when we should, add instead
if self.verbose:
msg = (
"Missing modified file {0} from ringbuffer, adding" " instead."
).format(rec.path)
print(msg)
self._add_record(rec)
return True
# nothing to do otherwise
return False
def _remove_record(self, path):
"""Remove a record from the ringbuffer."""
# get and remove record id if path is in the ringbuffer, return if not
with self._record_lock:
try:
rec = self.records.pop(path)
except KeyError:
# we probably got here from a FileDeletedEvent after expiring
# an old record, but in any case, no harm to just ignore
return
# remove record from ringbuffer
self._remove_from_queue(rec)
if self.verbose:
now = datetime.datetime.utcnow().replace(microsecond=0)
print("{0} | Removed {1}".format(now, rec.path))
def add_files(self, paths, sort=True):
"""Create file records from paths and add to ringbuffer."""
# get records and add from oldest to newest by key (time)
records = (self._get_file_record(p) for p in paths)
# filter out invalid paths (can't extract a time, doesn't exist)
records = (r for r in records if r is not None)
if sort:
records = sorted(records)
for rec in records:
self._add_record(rec)
def modify_files(self, paths, sort=True):
"""Create file records from paths and update in ringbuffer."""
# get records and add from oldest to newest by key (time)
records = (self._get_file_record(p) for p in paths)
# filter out invalid paths (can't extract a time, doesn't exist)
records = (r for r in records if r is not None)
if sort:
records = sorted(records)
for rec in records:
self._modify_record(rec)
def remove_files(self, paths):
"""Retrieve file records from paths and remove from ringbuffer."""
for p in paths:
self._remove_record(p)
def on_created(self, event):
"""Add new file to ringbuffer."""
self.add_files([event.src_path])
def on_deleted(self, event):
"""Remove file from ringbuffer if it was deleted externally."""
self.remove_files([event.src_path])
def on_modified(self, event):
"""Update modified file in ringbuffer."""
self.modify_files([event.src_path])
def on_moved(self, event):
"""Track moved file in ringbuffer."""
self.remove_files([event.src_path])
self.add_files([event.dest_path])
class CountExpirer(object):
"""Ringbuffer handler mixin to track the number of files in each channel.
When the count threshold of a channel is exceeded, the oldest files in that
channel are deleted until the count constraint is met.
"""
def __init__(self, *args, **kwargs):
"""Create a ringbuffer handler."""
self.count = kwargs.pop("count")
super(CountExpirer, self).__init__(*args, **kwargs)
def status(self):
"""Return status string about state of the ringbuffer."""
status = super(CountExpirer, self).status()
try:
max_count = max(len(q) for q in self.queues.values())
except ValueError:
max_count = 0
pct_full = int(float(max_count) / self.count * 100)
return ", ".join((status, "{0}% count".format(pct_full)))
def _expire(self, group):
"""Expire records until file count constraint is met."""
with self._record_lock:
queue = self.queues[group]
while len(queue) > self.count:
self._expire_oldest_from_group(group)
super(CountExpirer, self)._expire(group)
class SizeExpirer(object):
"""Ringbuffer handler mixin to track the space used by all channels.
This expirer tracks the amount of space that new or modified files consume
for all channels together. When the space threshold is exceeded, the oldest
file of any channel is deleted (unless it would empty the channel) until
the size constraint is met.
"""
def __init__(self, *args, **kwargs):
"""Create a ringbuffer handler."""
self.size = kwargs.pop("size")
self.active_size = 0
super(SizeExpirer, self).__init__(*args, **kwargs)
def status(self):
"""Return status string about state of the ringbuffer."""
status = super(SizeExpirer, self).status()
pct_full = int(float(self.active_size) / self.size * 100)
return ", ".join((status, "{0}% size".format(pct_full)))
def _add_to_queue(self, rec):
"""Add record to queue, tracking file size."""
with self._record_lock:
super(SizeExpirer, self)._add_to_queue(rec)
self.active_size += rec.size
def _remove_from_queue(self, rec):
"""Remove record from queue, tracking file size."""
with self._record_lock:
super(SizeExpirer, self)._remove_from_queue(rec)
self.active_size -= rec.size
def _expire_oldest(self, group):
"""Expire oldest record overall, preferring group if tied."""
with self._record_lock:
# remove oldest regardless of group unless it would empty group,
# but prefer `group` if tie
removal_group = group
# oldest file is at start of sorted records deque
try:
oldest_key, oldest_path = self.queues[group][0]
except IndexError:
oldest_key = float("inf")
for grp in self.queues.keys():
if grp != group:
queue = self.queues[grp]
if len(queue) > 1:
key, path = queue[0]
if key < oldest_key:
oldest_key = key
removal_group = grp
self._expire_oldest_from_group(removal_group)
def _expire(self, group):
"""Expire records until overall file size constraint is met."""
with self._record_lock:
while self.active_size > self.size:
self._expire_oldest(group)
super(SizeExpirer, self)._expire(group)
def _modify_record(self, rec):
"""Modify a record in the ringbuffer."""
with self._record_lock:
# have parent handle cases where we actually need to add or delete
handled = super(SizeExpirer, self)._modify_record(rec)
if not handled:
# if we're here, we know that the record exists and needs to
# be modified (in this case, update the size)
oldrec = self.records[rec.path]
self.records[rec.path] = rec
self.active_size -= oldrec.size
self.active_size += rec.size
if self.verbose:
now = datetime.datetime.utcnow().replace(microsecond=0)
print("{0} | Updated {1}".format(now, rec.path))
class TimeExpirer(object):
"""Ringbuffer handler mixin to track the time span of each channel.
This handler tracks the sample timestamp of files in each channel. When the
duration threshold of a channel is exceeded (newest timestamp minus
oldest), the oldest files in the channel are deleted until the duration
constraint is met.
"""
def __init__(self, *args, **kwargs):
"""Create a ringbuffer handler."""
# duration is time span in milliseconds
self.duration = kwargs.pop("duration")
super(TimeExpirer, self).__init__(*args, **kwargs)
@staticmethod
def _queue_duration(queue):
"""Get time span in milliseconds of files in a queue."""
try:
oldkey, _ = queue[0]
newkey, _ = queue[-1]
except IndexError:
return 0
return newkey - oldkey
def status(self):
"""Return status string about state of the ringbuffer."""
status = super(TimeExpirer, self).status()
try:
max_duration = max(self._queue_duration(q) for q in self.queues.values())
except ValueError:
max_duration = 0
pct_full = int(float(max_duration) / self.duration * 100)
return ", ".join((status, "{0}% duration".format(pct_full)))
def _expire(self, group):
"""Expire records until time span constraint is met."""
with self._record_lock:
queue = self.queues[group]
while self._queue_duration(queue) > self.duration:
self._expire_oldest_from_group(group)
super(TimeExpirer, self)._expire(group)
def DigitalRFRingbufferHandler(size=None, count=None, duration=None, **kwargs):
"""Create ringbuffer handler given constraints.
Parameters
----------
size : float | int | None
Size of the ringbuffer in bytes. Negative values are used to
indicate all available space except the given amount. If None, no
size constraint is used.
count : int | None
Maximum number of files *for each channel*. If None, no count
constraint is used.
duration : int | float | None
Maximum time span *for each channel* in milliseconds. If None, no
duration constraint is used.
Other Parameters
----------------
verbose : bool
If True, print debugging info about the files that are created and
deleted and how much space they consume.
dryrun : bool
If True, do not actually delete files when expiring them from the
ringbuffer. Use for testing only!
starttime : datetime.datetime
Data covering this time or after will be included. This has no
effect on property files.
endtime : datetime.datetime
Data covering this time or earlier will be included. This has no
effect on property files.
include_drf : bool
If True, include Digital RF files. If False, ignore Digital RF
files.
include_dmd : bool
If True, include Digital Metadata files. If False, ignore Digital
Metadata files.
"""
if size is None and count is None and duration is None:
errstr = "One of `size`, `count`, or `duration` must not be None."
raise ValueError(errstr)
bases = (DigitalRFRingbufferHandlerBase,)
# add mixins in this particular order for expected results
if size is not None:
bases = (SizeExpirer,) + bases
kwargs["size"] = size
if duration is not None:
bases = (TimeExpirer,) + bases
kwargs["duration"] = duration
if count is not None:
bases = (CountExpirer,) + bases
kwargs["count"] = count
# now create the class with the desired mixins
docstring = """Event handler for implementing a ringbuffer of Digital RF files.
This class inherits from a base class (DigitalRFRingbufferHandlerBase)
and some expirer mixins determined from the class factor arguments.
The expirers determine when a file needs to be expired from the
ringbuffer based on size, count, or duration constraints.
"""
cls = type("DigitalRFRingbufferHandler", bases, {"__doc__": docstring})
return cls(**kwargs)
class DigitalRFRingbuffer(object):
"""Monitor a directory and delete old Digital RF files when space is full.
This class combines an event handler and a file system observer. It
monitors a directory and its subdirectories for new Digital RF and Digital
Metadata files. When the ringbuffer threshold in size, count, or duration
is exceeded, the oldest files are deleted until the constraint is met.
"""
def __init__(
self,
path,
size=-200e6,
count=None,
duration=None,
verbose=False,
status_interval=10,
dryrun=False,
starttime=None,
endtime=None,
include_drf=True,
include_dmd=True,
force_polling=False,
):
"""Create Digital RF ringbuffer object. Use start/run method to begin.
Parameters
----------
path : str
Directory in which the ringbuffer is enforced.
size : float | int | None
Size of the ringbuffer in bytes. Negative values are used to
indicate all available space except the given amount. If None, no
size constraint is used.
count : int | None
Maximum number of files *for each channel*. If None, no count
constraint is used.
duration : int | float | None
Maximum time span *for each channel* in milliseconds. If None, no
duration constraint is used.
Other Parameters
----------------
verbose : bool
If True, print debugging info about the files that are created and
deleted and how much space they consume.
status_interval : None | int
Interval in seconds between printing of status updates. If None,
do not print status updates.
dryrun : bool
If True, do not actually delete files when expiring them from the
ringbuffer. Use for testing only!
starttime : datetime.datetime
Data covering this time or after will be included. This has no
effect on property files.
endtime : datetime.datetime
Data covering this time or earlier will be included. This has no
effect on property files.
include_drf : bool
If True, include Digital RF files. If False, ignore Digital RF
files.
include_dmd : bool
If True, include Digital Metadata files. If False, ignore Digital
Metadata files.
force_polling : bool
If True, force the watchdog to use polling instead of the default
observer.
"""
self.path = os.path.abspath(path)
self.size = size
self.count = count
self.duration = duration
self.verbose = verbose
self.status_interval = status_interval
self.dryrun = dryrun
self.starttime = starttime
self.endtime = endtime
self.include_drf = include_drf
self.include_dmd = include_dmd
self.force_polling = force_polling
self._start_time = None
self._task_threads = []
if self.size is None and self.count is None and self.duration is None:
errstr = "One of `size`, `count`, or `duration` must not be None."
raise ValueError(errstr)
if not self.include_drf and not self.include_dmd:
errstr = "One of `include_drf` or `include_dmd` must be True."
raise ValueError(errstr)
if self.status_interval is None:
self.status_interval = float("inf")
if self.size is not None:
if self.size < 0:
# get available space and reduce it by the (negative) size
# value to get the actual size to use
root = self.path
while not os.path.isdir(root):
root = os.path.dirname(root)
statvfs = os.statvfs(root)
bytes_available = statvfs.f_frsize * statvfs.f_bavail
if os.path.isdir(self.path):
existing = list_drf.ilsdrf(
self.path,
starttime=self.starttime,
endtime=self.endtime,
include_drf=self.include_drf,
include_dmd=self.include_dmd,
include_drf_properties=False,
include_dmd_properties=False,
)
for p in existing:
try:
bytes_available += os.path.getsize(p)
except OSError:
# catch instances where file no longer exists
if self.verbose:
traceback.print_exc()
return
self.size = max(bytes_available + self.size, 0)
self.event_handler = DigitalRFRingbufferHandler(
size=self.size,
count=self.count,
duration=self.duration,
verbose=self.verbose,
dryrun=self.dryrun,
starttime=self.starttime,
endtime=self.endtime,
include_drf=self.include_drf,
include_dmd=self.include_dmd,
)
self._init_observer()
def _init_observer(self):
self.observer = watchdog_drf.DirWatcher(
self.path, force_polling=self.force_polling
)
self.observer.schedule(self.event_handler, self.path, recursive=True)
def _add_existing_files(self):
"""Add existing files on disk to ringbuffer."""
# pause dispatching while we add existing files so files are added
# to the ringbuffer in the correct order
with self.observer.paused_dispatching():
# add existing files to ringbuffer handler
existing = list_drf.ilsdrf(
self.path,
starttime=self.starttime,
endtime=self.endtime,
include_drf=self.include_drf,
include_dmd=self.include_dmd,
include_drf_properties=False,
include_dmd_properties=False,
)
# do not sort because existing will already be sorted and we
# don't want to convert to a list
self.event_handler.add_files(existing, sort=False)
def start(self):
"""Start ringbuffer process."""
self._start_time = datetime.datetime.utcnow().replace(microsecond=0)
# start observer to add new files
self.observer.start()
if self.dryrun:
print("DRY RUN (files will not be deleted):")
now = datetime.datetime.utcnow().replace(microsecond=0)
print("{0} | Starting {1}:".format(now, self))
sys.stdout.flush()
# add files that already existed before the observer started
# (do it in another thread so we can get to join())
thread = threading.Thread(target=self._add_existing_files)
thread.daemon = True
thread.start()
self._task_threads.append(thread)
def _verify_ringbuffer_files(self, inbuffer):
"""Verify ringbuffer's `inbuffer` set of files with files on disk."""
# get set of all files that should be in the ringbuffer right away
# so we duplicate as few files from new events as possible
# events that happen while we build this file set can be duplicated
# when we verify the ringbuffer state below, but that's ok
ondisk = set(
list_drf.ilsdrf(
self.path,
starttime=self.starttime,
endtime=self.endtime,
include_drf=self.include_drf,
include_dmd=self.include_dmd,
include_drf_properties=False,
include_dmd_properties=False,
)
)
# now any file in inbuffer that is not in ondisk is a missed or
# duplicate deletion event, so remove those files
deletions = inbuffer - ondisk
self.event_handler.remove_files(deletions)
# any file in ondisk that is not in inbuffer is a missed or duplicate
# creation event, so add those files
creations = ondisk - deletions
self.event_handler.add_files(creations, sort=True)
# any file in both ondisk and inbuffer could have a missed modify
# event, so trigger a modify event for those files
possibly_modified = inbuffer & ondisk
self.event_handler.modify_files(possibly_modified, sort=True)
def _restart(self):
"""Restart observer using existing event handlers."""
# get list of files currently in ringbuffer before we modify it
# so we can detect missed events from after crash until ondisk
# file list is complete
inbuffer = set(self.event_handler.records.keys())
# make a new observer and start it ASAP
self._init_observer()
self.observer.start()
# verify existing state of ringbuffer
# (do it in another thread so we can get to join())
thread = threading.Thread(
target=self._verify_ringbuffer_files, kwargs=dict(inbuffer=inbuffer)
)
thread.daemon = True
thread.start()
self._task_threads.append(thread)
def join(self):
"""Wait until a KeyboardInterrupt is received to stop ringbuffer."""
try:
while True:
now = datetime.datetime.utcnow().replace(microsecond=0)
interval = int((now - self._start_time).total_seconds())
if (interval % self.status_interval) == 0:
status = self.event_handler.status()
print("{0} | ({1})".format(now, status))
sys.stdout.flush()
if not self.observer.all_alive():
# if not all threads of the observer are alive,
# reinitialize and restart
print("Found stopped thread, reinitializing and restarting.")
sys.stdout.flush()
# first make sure all task threads have stopped
for thread in self._task_threads:
while thread.is_alive():
print("Waiting for task thread to finish.")
sys.stdout.flush()
thread.join(1)
del self._task_threads[:]
self._restart()
time.sleep(1)
except KeyboardInterrupt:
# catch keyboard interrupt and simply exit
pass
finally:
self.stop()
sys.stdout.write("\n")
sys.stdout.flush()
self.observer.join()
def run(self):
"""Start ringbuffer and wait for a KeyboardInterrupt to stop."""
self.start()
self.join()
def stop(self):
"""Stop ringbuffer process."""
self.observer.stop()
def __str__(self):
"""Return string describing ringbuffer."""
amounts = []
if self.size is not None:
amounts.append("{0} bytes".format(self.size))
if self.count is not None:
amounts.append("{0} files".format(self.count))
if self.duration is not None:
amounts.append("{0} s".format(self.duration / 1e3))
s = "DigitalRFRingbuffer of ({0}) in {1}".format(", ".join(amounts), self.path)
return s
def _build_ringbuffer_parser(Parser, *args):
desc = (
"Enforce ringbuffer of Digital RF and Digital Metadata files. When"
" the space threshold is exceeded, the oldest files are deleted until"
" the size constraint is met."
)
parser = Parser(*args, description=desc)
parser.add_argument("path", help="Directory in which to enforce ringbuffer.")
parser.add_argument(
"-z",
"--size",
default=None,
help="""Size of ringbuffer, in bytes or using unit symbols (e.g 100GB).
Negative values are used to indicate all available space except
the given amount. (default: -200MB if no count or duration)""",
)
parser.add_argument(
"-c",
"--count",
type=int,
default=None,
help="""Max file count for each channel. (default: %(default)s)""",
)
parser.add_argument(
"-l",
"--duration",
default=None,
help="""Max duration for each channel in seconds.
(default: %(default)s)""",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Print the name new/deleted files and the space consumed.",
)
parser.add_argument(
"-p",
"--status_interval",
type=int,
default=10,
help="""Interval in seconds between printing of status updates.
(default: %(default)s)""",
)
parser.add_argument(
"-n",
"--dryrun",
action="store_true",
help="Do not delete files when expiring them from the ringbuffer.",
)
parser = list_drf._add_time_group(parser)
includegroup = parser.add_argument_group(title="include")
includegroup.add_argument(
"--nodrf",
dest="include_drf",
action="store_false",
help="""Do not ringbuffer Digital RF HDF5 files.
(default: False)""",
)
includegroup.add_argument(
"--nodmd",
dest="include_dmd",
action="store_false",
help="""Do not ringbuffer Digital Metadata HDF5 files.
(default: False)""",
)
parser = watchdog_drf._add_watchdog_group(parser)
parser.set_defaults(func=_run_ringbuffer)
return parser
def _run_ringbuffer(args):
import signal
# parse size string into number of bytes
if args.size == "":
args.size = None
if args.size is not None:
suffixes = OrderedDict(
[
("B", 1),
("KB", 1000 ** 1),
("KiB", 1024 ** 1),
("MB", 1000 ** 2),
("MiB", 1024 ** 2),
("GB", 1000 ** 3),
("GiB", 1024 ** 3),
("TB", 1000 ** 4),
("TiB", 1024 ** 4),
("PB", 1000 ** 5),
("PiB", 1024 ** 5),
]
)
m = re.match(r"(?P<num>\-?\d+\.?\d*)(?P<suf>\D*)", args.size)
if not m:
raise ValueError(
"Size string not recognized. " "Use number followed by suffix."
)
sizenum = eval(m.group("num"))
suf = m.group("suf").strip()
if not suf:
args.size = sizenum
elif suf in suffixes:
args.size = sizenum * suffixes[suf]
else:
raise ValueError(
"Size suffix not recognized. Use one of:\n"
"{0}".format(list(suffixes.keys()))
)
elif args.count is None and args.duration is None:
args.size = -200e6
# evaluate duration to float, from seconds to milliseconds
if args.duration is not None:
args.duration = float(eval(args.duration)) * 1e3
if args.starttime is not None:
args.starttime = util.parse_identifier_to_time(args.starttime)
if args.endtime is not None:
args.endtime = util.parse_identifier_to_time(
args.endtime, ref_datetime=args.starttime
)
kwargs = vars(args).copy()
del kwargs["func"]
# handle SIGTERM (getting killed) gracefully by calling sys.exit
def sigterm_handler(signal, frame):
print("Killed")
sys.stdout.flush()
sys.exit(128 + signal)
signal.signal(signal.SIGTERM, sigterm_handler)
ringbuffer = DigitalRFRingbuffer(**kwargs)
print("Type Ctrl-C to quit.")
ringbuffer.run()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = _build_ringbuffer_parser(ArgumentParser)
args = parser.parse_args()
args.func(args)
|
messaging.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
"""An implementation of the session and presentation layers as used in the Debug
Adapter Protocol (DAP): channels and their lifetime, JSON messages, requests,
responses, and events.
https://microsoft.github.io/debug-adapter-protocol/overview#base-protocol
"""
import collections
import contextlib
import functools
import itertools
import os
import socket
import sys
import threading
from debugpy.common import compat, fmt, json, log
from debugpy.common.compat import unicode
class JsonIOError(IOError):
"""Indicates that a read or write operation on JsonIOStream has failed.
"""
def __init__(self, *args, **kwargs):
stream = kwargs.pop("stream")
cause = kwargs.pop("cause", None)
if not len(args) and cause is not None:
args = [str(cause)]
super(JsonIOError, self).__init__(*args, **kwargs)
self.stream = stream
"""The stream that couldn't be read or written.
Set by JsonIOStream.read_json() and JsonIOStream.write_json().
JsonMessageChannel relies on this value to decide whether a NoMoreMessages
instance that bubbles up to the message loop is related to that loop.
"""
self.cause = cause
"""The underlying exception, if any."""
class NoMoreMessages(JsonIOError, EOFError):
"""Indicates that there are no more messages that can be read from or written
to a stream.
"""
def __init__(self, *args, **kwargs):
args = args if len(args) else ["No more messages"]
super(NoMoreMessages, self).__init__(*args, **kwargs)
class JsonIOStream(object):
"""Implements a JSON value stream over two byte streams (input and output).
Each value is encoded as a DAP packet, with metadata headers and a JSON payload.
"""
MAX_BODY_SIZE = 0xFFFFFF
json_decoder_factory = json.JsonDecoder
"""Used by read_json() when decoder is None."""
json_encoder_factory = json.JsonEncoder
"""Used by write_json() when encoder is None."""
@classmethod
def from_stdio(cls, name="stdio"):
"""Creates a new instance that receives messages from sys.stdin, and sends
them to sys.stdout.
On Win32, this also sets stdin and stdout to binary mode, since the protocol
requires that to work properly.
"""
if sys.version_info >= (3,):
stdin = sys.stdin.buffer
stdout = sys.stdout.buffer
else:
stdin = sys.stdin
stdout = sys.stdout
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(stdin.fileno(), os.O_BINARY)
msvcrt.setmode(stdout.fileno(), os.O_BINARY)
return cls(stdin, stdout, name)
@classmethod
def from_process(cls, process, name="stdio"):
"""Creates a new instance that receives messages from process.stdin, and sends
them to process.stdout.
"""
return cls(process.stdout, process.stdin, name)
@classmethod
def from_socket(cls, sock, name=None):
"""Creates a new instance that sends and receives messages over a socket.
"""
sock.settimeout(None) # make socket blocking
if name is None:
name = repr(sock)
# TODO: investigate switching to buffered sockets; readline() on unbuffered
# sockets is very slow! Although the implementation of readline() itself is
# native code, it calls read(1) in a loop - and that then ultimately calls
# SocketIO.readinto(), which is implemented in Python.
socket_io = sock.makefile("rwb", 0)
# SocketIO.close() doesn't close the underlying socket.
def cleanup():
try:
sock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
sock.close()
return cls(socket_io, socket_io, name, cleanup)
def __init__(self, reader, writer, name=None, cleanup=lambda: None):
"""Creates a new JsonIOStream.
reader must be a BytesIO-like object, from which incoming messages will be
read by read_json().
writer must be a BytesIO-like object, into which outgoing messages will be
written by write_json().
cleanup must be a callable; it will be invoked without arguments when the
stream is closed.
reader.readline() must treat "\n" as the line terminator, and must leave "\r"
as is - it must not replace "\r\n" with "\n" automatically, as TextIO does.
"""
if name is None:
name = fmt("reader={0!r}, writer={1!r}", reader, writer)
self.name = name
self._reader = reader
self._writer = writer
self._cleanup = cleanup
self._closed = False
def close(self):
"""Closes the stream, the reader, and the writer.
"""
if self._closed:
return
self._closed = True
log.debug("Closing {0} message stream", self.name)
try:
try:
# Close the writer first, so that the other end of the connection has
# its message loop waiting on read() unblocked. If there is an exception
# while closing the writer, we still want to try to close the reader -
# only one exception can bubble up, so if both fail, it'll be the one
# from reader.
try:
self._writer.close()
finally:
if self._reader is not self._writer:
self._reader.close()
finally:
self._cleanup()
except Exception:
# On Python 2, close() will raise an exception if there is a concurrent
# read() or write(), which is a common and expected occurrence with
# JsonMessageChannel, so don't even bother logging it.
if sys.version_info >= (3,):
log.reraise_exception(
"Error while closing {0} message stream", self.name
)
def _log_message(self, dir, data, logger=log.debug):
format_string = "{0} {1} " + (
"{2!j:indent=None}" if isinstance(data, list) else "{2!j}"
)
return logger(format_string, self.name, dir, data)
def _read_line(self, reader):
line = b""
while True:
try:
line += reader.readline()
except Exception as exc:
raise NoMoreMessages(str(exc), stream=self)
if not line:
raise NoMoreMessages(stream=self)
if line.endswith(b"\r\n"):
line = line[0:-2]
return line
def read_json(self, decoder=None):
"""Read a single JSON value from reader.
Returns JSON value as parsed by decoder.decode(), or raises NoMoreMessages
if there are no more values to be read.
"""
decoder = decoder if decoder is not None else self.json_decoder_factory()
reader = self._reader
read_line = functools.partial(self._read_line, reader)
# If any error occurs while reading and parsing the message, log the original
# raw message data as is, so that it's possible to diagnose missing or invalid
# headers, encoding issues, JSON syntax errors etc.
def log_message_and_reraise_exception(format_string="", *args, **kwargs):
if format_string:
format_string += "\n\n"
format_string += "{name} -->\n{raw_lines}"
raw_lines = b"".join(raw_chunks).split(b"\n")
raw_lines = "\n".join(repr(line) for line in raw_lines)
log.reraise_exception(
format_string, *args, name=self.name, raw_lines=raw_lines, **kwargs
)
raw_chunks = []
headers = {}
while True:
try:
line = read_line()
except Exception:
# Only log it if we have already read some headers, and are looking
# for a blank line terminating them. If this is the very first read,
# there's no message data to log in any case, and the caller might
# be anticipating the error - e.g. NoMoreMessages on disconnect.
if headers:
log_message_and_reraise_exception(
"Error while reading message headers:"
)
else:
raise
raw_chunks += [line, b"\n"]
if line == b"":
break
key, _, value = line.partition(b":")
headers[key] = value
try:
length = int(headers[b"Content-Length"])
if not (0 <= length <= self.MAX_BODY_SIZE):
raise ValueError
except (KeyError, ValueError):
try:
raise IOError("Content-Length is missing or invalid:")
except Exception:
log_message_and_reraise_exception()
body_start = len(raw_chunks)
body_remaining = length
while body_remaining > 0:
try:
chunk = reader.read(body_remaining)
if not chunk:
raise EOFError
except Exception as exc:
# Not logged due to https://github.com/microsoft/ptvsd/issues/1699
raise NoMoreMessages(str(exc), stream=self)
raw_chunks.append(chunk)
body_remaining -= len(chunk)
assert body_remaining == 0
body = b"".join(raw_chunks[body_start:])
try:
body = body.decode("utf-8")
except Exception:
log_message_and_reraise_exception()
try:
body = decoder.decode(body)
except Exception:
log_message_and_reraise_exception()
# If parsed successfully, log as JSON for readability.
self._log_message("-->", body)
return body
def write_json(self, value, encoder=None):
"""Write a single JSON value into writer.
Value is written as encoded by encoder.encode().
"""
if self._closed:
# Don't log this - it's a common pattern to write to a stream while
# anticipating EOFError from it in case it got closed concurrently.
raise NoMoreMessages(stream=self)
encoder = encoder if encoder is not None else self.json_encoder_factory()
writer = self._writer
# Format the value as a message, and try to log any failures using as much
# information as we already have at the point of the failure. For example,
# if it fails after it is serialized to JSON, log that JSON.
try:
body = encoder.encode(value)
except Exception:
raise self._log_message("<--", value, logger=log.exception)
if not isinstance(body, bytes):
body = body.encode("utf-8")
header = fmt("Content-Length: {0}\r\n\r\n", len(body))
header = header.encode("ascii")
data = header + body
data_written = 0
try:
while data_written < len(data):
written = writer.write(data[data_written:])
# On Python 2, socket.makefile().write() does not properly implement
# BytesIO.write(), and always returns None instead of the number of
# bytes written - but also guarantees that it is always a full write.
if written is None:
break
data_written += written
writer.flush()
except Exception as exc:
self._log_message("<--", value, logger=log.exception)
raise JsonIOError(stream=self, cause=exc)
self._log_message("<--", value)
def __repr__(self):
return fmt("{0}({1!r})", type(self).__name__, self.name)
class MessageDict(collections.OrderedDict):
"""A specialized dict that is used for JSON message payloads - Request.arguments,
Response.body, and Event.body.
For all members that normally throw KeyError when a requested key is missing, this
dict raises InvalidMessageError instead. Thus, a message handler can skip checks
for missing properties, and just work directly with the payload on the assumption
that it is valid according to the protocol specification; if anything is missing,
it will be reported automatically in the proper manner.
If the value for the requested key is itself a dict, it is returned as is, and not
automatically converted to MessageDict. Thus, to enable convenient chaining - e.g.
d["a"]["b"]["c"] - the dict must consistently use MessageDict instances rather than
vanilla dicts for all its values, recursively. This is guaranteed for the payload
of all freshly received messages (unless and until it is mutated), but there is no
such guarantee for outgoing messages.
"""
def __init__(self, message, items=None):
assert message is None or isinstance(message, Message)
if items is None:
super(MessageDict, self).__init__()
else:
super(MessageDict, self).__init__(items)
self.message = message
"""The Message object that owns this dict.
For any instance exposed via a Message object corresponding to some incoming
message, it is guaranteed to reference that Message object. There is no similar
guarantee for outgoing messages.
"""
def __repr__(self):
return fmt("{0!j}", self)
def __call__(self, key, validate, optional=False):
"""Like get(), but with validation.
The item is first retrieved as if with self.get(key, default=()) - the default
value is () rather than None, so that JSON nulls are distinguishable from
missing properties.
If optional=True, and the value is (), it's returned as is. Otherwise, the
item is validated by invoking validate(item) on it.
If validate=False, it's treated as if it were (lambda x: x) - i.e. any value
is considered valid, and is returned unchanged. If validate is a type or a
tuple, it's treated as json.of_type(validate). Otherwise, if validate is not
callable(), it's treated as json.default(validate).
If validate() returns successfully, the item is substituted with the value
it returns - thus, the validator can e.g. replace () with a suitable default
value for the property.
If validate() raises TypeError or ValueError, raises InvalidMessageError with
the same text that applies_to(self.messages).
See debugpy.common.json for reusable validators.
"""
if not validate:
validate = lambda x: x
elif isinstance(validate, type) or isinstance(validate, tuple):
validate = json.of_type(validate, optional=optional)
elif not callable(validate):
validate = json.default(validate)
value = self.get(key, ())
try:
value = validate(value)
except (TypeError, ValueError) as exc:
message = Message if self.message is None else self.message
err = fmt("{0}", exc)
if not err.startswith("["):
err = " " + err
raise message.isnt_valid("{0!j}{1}", key, err)
return value
def _invalid_if_no_key(func):
def wrap(self, key, *args, **kwargs):
try:
return func(self, key, *args, **kwargs)
except KeyError:
message = Message if self.message is None else self.message
raise message.isnt_valid("missing property {0!r}", key)
return wrap
__getitem__ = _invalid_if_no_key(collections.OrderedDict.__getitem__)
__delitem__ = _invalid_if_no_key(collections.OrderedDict.__delitem__)
pop = _invalid_if_no_key(collections.OrderedDict.pop)
del _invalid_if_no_key
def _payload(value):
"""JSON validator for message payload.
If that value is missing or null, it is treated as if it were {}.
"""
if value is not None and value != ():
if isinstance(value, dict): # can be int, str, list...
assert isinstance(value, MessageDict)
return value
# Missing payload. Construct a dummy MessageDict, and make it look like it was
# deserialized. See JsonMessageChannel._parse_incoming_message for why it needs
# to have associate_with().
def associate_with(message):
value.message = message
value = MessageDict(None)
value.associate_with = associate_with
return value
class Message(object):
"""Represents a fully parsed incoming or outgoing message.
https://microsoft.github.io/debug-adapter-protocol/specification#protocolmessage
"""
def __init__(self, channel, seq, json=None):
self.channel = channel
self.seq = seq
"""Sequence number of the message in its channel.
This can be None for synthesized Responses.
"""
self.json = json
"""For incoming messages, the MessageDict containing raw JSON from which
this message was originally parsed.
"""
def __str__(self):
return fmt("{0!j}", self.json) if self.json is not None else repr(self)
def describe(self):
"""A brief description of the message that is enough to identify it.
Examples:
'#1 request "launch" from IDE'
'#2 response to #1 request "launch" from IDE'.
"""
raise NotImplementedError
@property
def payload(self):
"""Payload of the message - self.body or self.arguments, depending on the
message type.
"""
raise NotImplementedError
def __call__(self, *args, **kwargs):
"""Same as self.payload(...)."""
return self.payload(*args, **kwargs)
def __contains__(self, key):
"""Same as (key in self.payload)."""
return key in self.payload
def is_event(self, *event):
"""Returns True if this message is an Event of one of the specified types.
"""
if not isinstance(self, Event):
return False
return event == () or self.event in event
def is_request(self, *command):
"""Returns True if this message is a Request of one of the specified types.
"""
if not isinstance(self, Request):
return False
return command == () or self.command in command
def is_response(self, *command):
"""Returns True if this message is a Response to a request of one of the
specified types.
"""
if not isinstance(self, Response):
return False
return command == () or self.request.command in command
def error(self, exc_type, format_string, *args, **kwargs):
"""Returns a new exception of the specified type from the point at which it is
invoked, with the specified formatted message as the reason.
The resulting exception will have its cause set to the Message object on which
error() was called. Additionally, if that message is a Request, a failure
response is immediately sent.
"""
assert issubclass(exc_type, MessageHandlingError)
silent = kwargs.pop("silent", False)
reason = fmt(format_string, *args, **kwargs)
exc = exc_type(reason, self, silent) # will log it
if isinstance(self, Request):
self.respond(exc)
return exc
def isnt_valid(self, *args, **kwargs):
"""Same as self.error(InvalidMessageError, ...).
"""
return self.error(InvalidMessageError, *args, **kwargs)
def cant_handle(self, *args, **kwargs):
"""Same as self.error(MessageHandlingError, ...).
"""
return self.error(MessageHandlingError, *args, **kwargs)
class Event(Message):
"""Represents an incoming event.
https://microsoft.github.io/debug-adapter-protocol/specification#event
It is guaranteed that body is a MessageDict associated with this Event, and so
are all the nested dicts in it. If "body" was missing or null in JSON, body is
an empty dict.
To handle the event, JsonMessageChannel tries to find a handler for this event in
JsonMessageChannel.handlers. Given event="X", if handlers.X_event exists, then it
is the specific handler for this event. Otherwise, handlers.event must exist, and
it is the generic handler for this event. A missing handler is a fatal error.
No further incoming messages are processed until the handler returns, except for
responses to requests that have wait_for_response() invoked on them.
To report failure to handle the event, the handler must raise an instance of
MessageHandlingError that applies_to() the Event object it was handling. Any such
failure is logged, after which the message loop moves on to the next message.
Helper methods Message.isnt_valid() and Message.cant_handle() can be used to raise
the appropriate exception type that applies_to() the Event object.
"""
def __init__(self, channel, seq, event, body, json=None):
super(Event, self).__init__(channel, seq, json)
self.event = event
if isinstance(body, MessageDict) and hasattr(body, "associate_with"):
body.associate_with(self)
self.body = body
def describe(self):
return fmt("#{0} event {1!j} from {2}", self.seq, self.event, self.channel)
@property
def payload(self):
return self.body
@staticmethod
def _parse(channel, message_dict):
seq = message_dict("seq", int)
event = message_dict("event", unicode)
body = message_dict("body", _payload)
message = Event(channel, seq, event, body, json=message_dict)
channel._enqueue_handlers(message, message._handle)
def _handle(self):
channel = self.channel
handler = channel._get_handler_for("event", self.event)
try:
try:
result = handler(self)
assert result is None, fmt(
"Handler {0} tried to respond to {1}.",
compat.srcnameof(handler),
self.describe(),
)
except MessageHandlingError as exc:
if not exc.applies_to(self):
raise
log.error(
"Handler {0}\ncouldn't handle {1}:\n{2}",
compat.srcnameof(handler),
self.describe(),
str(exc),
)
except Exception:
log.reraise_exception(
"Handler {0}\ncouldn't handle {1}:",
compat.srcnameof(handler),
self.describe(),
)
NO_RESPONSE = object()
"""Can be returned from a request handler in lieu of the response body, to indicate
that no response is to be sent.
Request.respond() must be invoked explicitly at some later point to provide a response.
"""
class Request(Message):
"""Represents an incoming or an outgoing request.
Incoming requests are represented directly by instances of this class.
Outgoing requests are represented by instances of OutgoingRequest, which provides
additional functionality to handle responses.
For incoming requests, it is guaranteed that arguments is a MessageDict associated
with this Request, and so are all the nested dicts in it. If "arguments" was missing
or null in JSON, arguments is an empty dict.
To handle the request, JsonMessageChannel tries to find a handler for this request
in JsonMessageChannel.handlers. Given command="X", if handlers.X_request exists,
then it is the specific handler for this request. Otherwise, handlers.request must
exist, and it is the generic handler for this request. A missing handler is a fatal
error.
The handler is then invoked with the Request object as its sole argument.
If the handler itself invokes respond() on the Request at any point, then it must
not return any value.
Otherwise, if the handler returns NO_RESPONSE, no response to the request is sent.
It must be sent manually at some later point via respond().
Otherwise, a response to the request is sent with the returned value as the body.
To fail the request, the handler can return an instance of MessageHandlingError,
or respond() with one, or raise one such that it applies_to() the Request object
being handled.
Helper methods Message.isnt_valid() and Message.cant_handle() can be used to raise
the appropriate exception type that applies_to() the Request object.
"""
def __init__(self, channel, seq, command, arguments, json=None):
super(Request, self).__init__(channel, seq, json)
self.command = command
if isinstance(arguments, MessageDict) and hasattr(arguments, "associate_with"):
arguments.associate_with(self)
self.arguments = arguments
self.response = None
"""Response to this request.
For incoming requests, it is set as soon as the request handler returns.
For outgoing requests, it is set as soon as the response is received, and
before self._handle_response is invoked.
"""
def describe(self):
return fmt("#{0} request {1!j} from {2}", self.seq, self.command, self.channel)
@property
def payload(self):
return self.arguments
def respond(self, body):
assert self.response is None
d = {"type": "response", "request_seq": self.seq, "command": self.command}
if isinstance(body, Exception):
d["success"] = False
err_text = str(body)
try:
err_text = compat.force_unicode(err_text, "utf-8")
except Exception:
# On Python 2, the error message might not be Unicode, and we don't
# really know what encoding it is. So if treating it as UTF-8 failed,
# use repr() as a fallback - it should escape all non-ASCII chars in
# the string.
err_text = compat.force_unicode(repr(body), "ascii", errors="replace")
d["message"] = err_text
else:
d["success"] = True
if body is not None and body != {}:
d["body"] = body
with self.channel._send_message(d) as seq:
pass
self.response = Response(self.channel, seq, self, body)
@staticmethod
def _parse(channel, message_dict):
seq = message_dict("seq", int)
command = message_dict("command", unicode)
arguments = message_dict("arguments", _payload)
message = Request(channel, seq, command, arguments, json=message_dict)
channel._enqueue_handlers(message, message._handle)
def _handle(self):
channel = self.channel
handler = channel._get_handler_for("request", self.command)
try:
try:
result = handler(self)
except MessageHandlingError as exc:
if not exc.applies_to(self):
raise
result = exc
log.error(
"Handler {0}\ncouldn't handle {1}:\n{2}",
compat.srcnameof(handler),
self.describe(),
str(exc),
)
if result is NO_RESPONSE:
assert self.response is None, fmt(
"Handler {0} for {1} must not return NO_RESPONSE if it has already "
"invoked request.respond().",
compat.srcnameof(handler),
self.describe(),
)
elif self.response is not None:
assert result is None or result is self.response.body, fmt(
"Handler {0} for {1} must not return a response body if it has "
"already invoked request.respond().",
compat.srcnameof(handler),
self.describe(),
)
else:
assert result is not None, fmt(
"Handler {0} for {1} must either call request.respond() before it "
"returns, or return the response body, or return NO_RESPONSE.",
compat.srcnameof(handler),
self.describe(),
)
try:
self.respond(result)
except NoMoreMessages:
log.warning(
"Channel was closed before the response from handler {0} to {1} could be sent",
compat.srcnameof(handler),
self.describe(),
)
except Exception:
log.reraise_exception(
"Handler {0}\ncouldn't handle {1}:",
compat.srcnameof(handler),
self.describe(),
)
class OutgoingRequest(Request):
"""Represents an outgoing request, for which it is possible to wait for a
response to be received, and register a response handler.
"""
_parse = _handle = None
def __init__(self, channel, seq, command, arguments):
super(OutgoingRequest, self).__init__(channel, seq, command, arguments)
self._response_handlers = []
def describe(self):
return fmt("#{0} request {1!j} to {2}", self.seq, self.command, self.channel)
def wait_for_response(self, raise_if_failed=True):
"""Waits until a response is received for this request, records the Response
object for it in self.response, and returns response.body.
If no response was received from the other party before the channel closed,
self.response is a synthesized Response with body=NoMoreMessages().
If raise_if_failed=True and response.success is False, raises response.body
instead of returning.
"""
with self.channel:
while self.response is None:
self.channel._handlers_enqueued.wait()
if raise_if_failed and not self.response.success:
raise self.response.body
return self.response.body
def on_response(self, response_handler):
"""Registers a handler to invoke when a response is received for this request.
The handler is invoked with Response as its sole argument.
If response has already been received, invokes the handler immediately.
It is guaranteed that self.response is set before the handler is invoked.
If no response was received from the other party before the channel closed,
self.response is a dummy Response with body=NoMoreMessages().
The handler is always invoked asynchronously on an unspecified background
thread - thus, the caller of on_response() can never be blocked or deadlocked
by the handler.
No further incoming messages are processed until the handler returns, except for
responses to requests that have wait_for_response() invoked on them.
"""
with self.channel:
self._response_handlers.append(response_handler)
self._enqueue_response_handlers()
def _enqueue_response_handlers(self):
response = self.response
if response is None:
# Response._parse() will submit the handlers when response is received.
return
def run_handlers():
for handler in handlers:
try:
try:
handler(response)
except MessageHandlingError as exc:
if not exc.applies_to(response):
raise
log.error(
"Handler {0}\ncouldn't handle {1}:\n{2}",
compat.srcnameof(handler),
response.describe(),
str(exc),
)
except Exception:
log.reraise_exception(
"Handler {0}\ncouldn't handle {1}:",
compat.srcnameof(handler),
response.describe(),
)
handlers = self._response_handlers[:]
self.channel._enqueue_handlers(response, run_handlers)
del self._response_handlers[:]
class Response(Message):
"""Represents an incoming or an outgoing response to a Request.
https://microsoft.github.io/debug-adapter-protocol/specification#response
error_message corresponds to "message" in JSON, and is renamed for clarity.
If success is False, body is None. Otherwise, it is a MessageDict associated
with this Response, and so are all the nested dicts in it. If "body" was missing
or null in JSON, body is an empty dict.
If this is a response to an outgoing request, it will be handled by the handler
registered via self.request.on_response(), if any.
Regardless of whether there is such a handler, OutgoingRequest.wait_for_response()
can also be used to retrieve and handle the response. If there is a handler, it is
executed before wait_for_response() returns.
No further incoming messages are processed until the handler returns, except for
responses to requests that have wait_for_response() invoked on them.
To report failure to handle the event, the handler must raise an instance of
MessageHandlingError that applies_to() the Response object it was handling. Any
such failure is logged, after which the message loop moves on to the next message.
Helper methods Message.isnt_valid() and Message.cant_handle() can be used to raise
the appropriate exception type that applies_to() the Response object.
"""
def __init__(self, channel, seq, request, body, json=None):
super(Response, self).__init__(channel, seq, json)
self.request = request
"""The request to which this is the response."""
if isinstance(body, MessageDict) and hasattr(body, "associate_with"):
body.associate_with(self)
self.body = body
"""Body of the response if the request was successful, or an instance
of some class derived from Exception it it was not.
If a response was received from the other side, but request failed, it is an
instance of MessageHandlingError containing the received error message. If the
error message starts with InvalidMessageError.PREFIX, then it's an instance of
the InvalidMessageError specifically, and that prefix is stripped.
If no response was received from the other party before the channel closed,
it is an instance of NoMoreMessages.
"""
def describe(self):
return fmt("#{0} response to {1}", self.seq, self.request.describe())
@property
def payload(self):
return self.body
@property
def success(self):
"""Whether the request succeeded or not.
"""
return not isinstance(self.body, Exception)
@property
def result(self):
"""Result of the request. Returns the value of response.body, unless it
is an exception, in which case it is raised instead.
"""
if self.success:
return self.body
else:
raise self.body
@staticmethod
def _parse(channel, message_dict, body=None):
seq = message_dict("seq", int) if (body is None) else None
request_seq = message_dict("request_seq", int)
command = message_dict("command", unicode)
success = message_dict("success", bool)
if body is None:
if success:
body = message_dict("body", _payload)
else:
error_message = message_dict("message", unicode)
exc_type = MessageHandlingError
if error_message.startswith(InvalidMessageError.PREFIX):
error_message = error_message[len(InvalidMessageError.PREFIX) :]
exc_type = InvalidMessageError
body = exc_type(error_message, silent=True)
try:
with channel:
request = channel._sent_requests.pop(request_seq)
known_request = True
except KeyError:
# Synthetic Request that only has seq and command as specified in response
# JSON, for error reporting purposes.
request = OutgoingRequest(channel, request_seq, command, "<unknown>")
known_request = False
if not success:
body.cause = request
response = Response(channel, seq, request, body, json=message_dict)
with channel:
request.response = response
request._enqueue_response_handlers()
if known_request:
return response
else:
raise response.isnt_valid(
"request_seq={0} does not match any known request", request_seq
)
class Disconnect(Message):
"""A dummy message used to represent disconnect. It's always the last message
received from any channel.
"""
def __init__(self, channel):
super(Disconnect, self).__init__(channel, None)
def describe(self):
return fmt("disconnect from {0}", self.channel)
class MessageHandlingError(Exception):
"""Indicates that a message couldn't be handled for some reason.
If the reason is a contract violation - i.e. the message that was handled did not
conform to the protocol specification - InvalidMessageError, which is a subclass,
should be used instead.
If any message handler raises an exception not derived from this class, it will
escape the message loop unhandled, and terminate the process.
If any message handler raises this exception, but applies_to(message) is False, it
is treated as if it was a generic exception, as desribed above. Thus, if a request
handler issues another request of its own, and that one fails, the failure is not
silently propagated. However, a request that is delegated via Request.delegate()
will also propagate failures back automatically. For manual propagation, catch the
exception, and call exc.propagate().
If any event handler raises this exception, and applies_to(event) is True, the
exception is silently swallowed by the message loop.
If any request handler raises this exception, and applies_to(request) is True, the
exception is silently swallowed by the message loop, and a failure response is sent
with "message" set to str(reason).
Note that, while errors are not logged when they're swallowed by the message loop,
by that time they have already been logged by their __init__ (when instantiated).
"""
def __init__(self, reason, cause=None, silent=False):
"""Creates a new instance of this class, and immediately logs the exception.
Message handling errors are logged immediately unless silent=True, so that the
precise context in which they occured can be determined from the surrounding
log entries.
"""
self.reason = reason
"""Why it couldn't be handled. This can be any object, but usually it's either
str or Exception.
"""
assert cause is None or isinstance(cause, Message)
self.cause = cause
"""The Message object for the message that couldn't be handled. For responses
to unknown requests, this is a synthetic Request.
"""
if not silent:
try:
raise self
except MessageHandlingError:
log.swallow_exception()
def __hash__(self):
return hash((self.reason, id(self.cause)))
def __eq__(self, other):
if not isinstance(other, MessageHandlingError):
return NotImplemented
if type(self) is not type(other):
return NotImplemented
if self.reason != other.reason:
return False
if self.cause is not None and other.cause is not None:
if self.cause.seq != other.cause.seq:
return False
return True
def __ne__(self, other):
return not self == other
def __str__(self):
return str(self.reason)
def __repr__(self):
s = type(self).__name__
if self.cause is None:
s += fmt("(reason={0!r})", self.reason)
else:
s += fmt(
"(channel={0!r}, cause={1!r}, reason={2!r})",
self.cause.channel.name,
self.cause.seq,
self.reason,
)
return s
def applies_to(self, message):
"""Whether this MessageHandlingError can be treated as a reason why the
handling of message failed.
If self.cause is None, this is always true.
If self.cause is not None, this is only true if cause is message.
"""
return self.cause is None or self.cause is message
def propagate(self, new_cause):
"""Propagates this error, raising a new instance of the same class with the
same reason, but a different cause.
"""
raise type(self)(self.reason, new_cause, silent=True)
class InvalidMessageError(MessageHandlingError):
"""Indicates that an incoming message did not follow the protocol specification -
for example, it was missing properties that are required, or the message itself
is not allowed in the current state.
Raised by MessageDict in lieu of KeyError for missing keys.
"""
PREFIX = "Invalid message: "
"""Automatically prepended to the "message" property in JSON responses, when the
handler raises InvalidMessageError.
If a failed response has "message" property that starts with this prefix, it is
reported as InvalidMessageError rather than MessageHandlingError.
"""
def __str__(self):
return InvalidMessageError.PREFIX + str(self.reason)
class JsonMessageChannel(object):
"""Implements a JSON message channel on top of a raw JSON message stream, with
support for DAP requests, responses, and events.
The channel can be locked for exclusive use via the with-statement::
with channel:
channel.send_request(...)
# No interleaving messages can be sent here from other threads.
channel.send_event(...)
"""
def __init__(self, stream, handlers=None, name=None):
self.stream = stream
self.handlers = handlers
self.name = name if name is not None else stream.name
self.started = False
self._lock = threading.RLock()
self._closed = False
self._seq_iter = itertools.count(1)
self._sent_requests = {} # {seq: Request}
self._handler_queue = [] # [(what, handler)]
self._handlers_enqueued = threading.Condition(self._lock)
self._handler_thread = None
self._parser_thread = None
def __str__(self):
return self.name
def __repr__(self):
return fmt("{0}({1!r})", type(self).__name__, self.name)
def __enter__(self):
self._lock.acquire()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self._lock.release()
def close(self):
"""Closes the underlying stream.
This does not immediately terminate any handlers that are already executing,
but they will be unable to respond. No new request or event handlers will
execute after this method is called, even for messages that have already been
received. However, response handlers will continue to executed for any request
that is still pending, as will any handlers registered via on_response().
"""
with self:
if not self._closed:
self._closed = True
self.stream.close()
def start(self):
"""Starts a message loop which parses incoming messages and invokes handlers
for them on a background thread, until the channel is closed.
Incoming messages, including responses to requests, will not be processed at
all until this is invoked.
"""
assert not self.started
self.started = True
self._parser_thread = threading.Thread(
target=self._parse_incoming_messages, name=fmt("{0} message parser", self)
)
self._parser_thread.pydev_do_not_trace = True
self._parser_thread.is_pydev_daemon_thread = True
self._parser_thread.daemon = True
self._parser_thread.start()
def wait(self):
"""Waits for the message loop to terminate, and for all enqueued Response
message handlers to finish executing.
"""
parser_thread = self._parser_thread
try:
if parser_thread is not None:
parser_thread.join()
except AssertionError:
log.debug("Handled error joining parser thread.")
try:
handler_thread = self._handler_thread
if handler_thread is not None:
handler_thread.join()
except AssertionError:
log.debug("Handled error joining handler thread.")
# Order of keys for _prettify() - follows the order of properties in
# https://microsoft.github.io/debug-adapter-protocol/specification
_prettify_order = (
"seq",
"type",
"request_seq",
"success",
"command",
"event",
"message",
"arguments",
"body",
"error",
)
def _prettify(self, message_dict):
"""Reorders items in a MessageDict such that it is more readable.
"""
for key in self._prettify_order:
if key not in message_dict:
continue
value = message_dict[key]
del message_dict[key]
message_dict[key] = value
@contextlib.contextmanager
def _send_message(self, message):
"""Sends a new message to the other party.
Generates a new sequence number for the message, and provides it to the
caller before the message is sent, using the context manager protocol::
with send_message(...) as seq:
# The message hasn't been sent yet.
...
# Now the message has been sent.
Safe to call concurrently for the same channel from different threads.
"""
assert "seq" not in message
with self:
seq = next(self._seq_iter)
message = MessageDict(None, message)
message["seq"] = seq
self._prettify(message)
with self:
yield seq
self.stream.write_json(message)
def send_request(self, command, arguments=None, on_before_send=None):
"""Sends a new request, and returns the OutgoingRequest object for it.
If arguments is None or {}, "arguments" will be omitted in JSON.
If on_before_send is not None, invokes on_before_send() with the request
object as the sole argument, before the request actually gets sent.
Does not wait for response - use OutgoingRequest.wait_for_response().
Safe to call concurrently for the same channel from different threads.
"""
d = {"type": "request", "command": command}
if arguments is not None and arguments != {}:
d["arguments"] = arguments
with self._send_message(d) as seq:
request = OutgoingRequest(self, seq, command, arguments)
if on_before_send is not None:
on_before_send(request)
self._sent_requests[seq] = request
return request
def send_event(self, event, body=None):
"""Sends a new event.
If body is None or {}, "body" will be omitted in JSON.
Safe to call concurrently for the same channel from different threads.
"""
d = {"type": "event", "event": event}
if body is not None and body != {}:
d["body"] = body
with self._send_message(d):
pass
def request(self, *args, **kwargs):
"""Same as send_request(...).wait_for_response()
"""
return self.send_request(*args, **kwargs).wait_for_response()
def propagate(self, message):
"""Sends a new message with the same type and payload.
If it was a request, returns the new OutgoingRequest object for it.
"""
assert message.is_request() or message.is_event()
if message.is_request():
return self.send_request(message.command, message.arguments)
else:
self.send_event(message.event, message.body)
def delegate(self, message):
"""Like propagate(message).wait_for_response(), but will also propagate
any resulting MessageHandlingError back.
"""
try:
result = self.propagate(message)
if result.is_request():
result = result.wait_for_response()
return result
except MessageHandlingError as exc:
exc.propagate(message)
def _parse_incoming_messages(self):
log.debug("Starting message loop for channel {0}", self)
try:
while True:
self._parse_incoming_message()
except NoMoreMessages as exc:
log.debug("Exiting message loop for channel {0}: {1}", self, exc)
with self:
# Generate dummy responses for all outstanding requests.
err_message = compat.force_unicode(str(exc), "utf-8", errors="replace")
# Response._parse() will remove items from _sent_requests, so
# make a snapshot before iterating.
sent_requests = list(self._sent_requests.values())
for request in sent_requests:
response_json = MessageDict(
None,
{
"seq": -1,
"request_seq": request.seq,
"command": request.command,
"success": False,
"message": err_message,
},
)
Response._parse(self, response_json, body=exc)
assert not len(self._sent_requests)
self._enqueue_handlers(Disconnect(self), self._handle_disconnect)
self.close()
_message_parsers = {
"event": Event._parse,
"request": Request._parse,
"response": Response._parse,
}
def _parse_incoming_message(self):
"""Reads incoming messages, parses them, and puts handlers into the queue
for _run_handlers() to invoke, until the channel is closed.
"""
# Set up a dedicated decoder for this message, to create MessageDict instances
# for all JSON objects, and track them so that they can be later wired up to
# the Message they belong to, once it is instantiated.
def object_hook(d):
d = MessageDict(None, d)
if "seq" in d:
self._prettify(d)
d.associate_with = associate_with
message_dicts.append(d)
return d
# A hack to work around circular dependency between messages, and instances of
# MessageDict in their payload. We need to set message for all of them, but it
# cannot be done until the actual Message is created - which happens after the
# dicts are created during deserialization.
#
# So, upon deserialization, every dict in the message payload gets a method
# that can be called to set MessageDict.message for *all* dicts belonging to
# that message. This method can then be invoked on the top-level dict by the
# parser, after it has parsed enough of the dict to create the appropriate
# instance of Event, Request, or Response for this message.
def associate_with(message):
for d in message_dicts:
d.message = message
del d.associate_with
message_dicts = []
decoder = self.stream.json_decoder_factory(object_hook=object_hook)
message_dict = self.stream.read_json(decoder)
assert isinstance(message_dict, MessageDict) # make sure stream used decoder
msg_type = message_dict("type", json.enum("event", "request", "response"))
parser = self._message_parsers[msg_type]
try:
parser(self, message_dict)
except InvalidMessageError as exc:
log.error(
"Failed to parse message in channel {0}: {1} in:\n{2!j}",
self,
str(exc),
message_dict,
)
except Exception as exc:
if isinstance(exc, NoMoreMessages) and exc.stream is self.stream:
raise
log.swallow_exception(
"Fatal error in channel {0} while parsing:\n{1!j}", self, message_dict
)
os._exit(1)
def _enqueue_handlers(self, what, *handlers):
"""Enqueues handlers for _run_handlers() to run.
`what` is the Message being handled, and is used for logging purposes.
If the background thread with _run_handlers() isn't running yet, starts it.
"""
with self:
self._handler_queue.extend((what, handler) for handler in handlers)
self._handlers_enqueued.notify_all()
# If there is anything to handle, but there's no handler thread yet,
# spin it up. This will normally happen only once, on the first call
# to _enqueue_handlers(), and that thread will run all the handlers
# for parsed messages. However, this can also happen is somebody calls
# Request.on_response() - possibly concurrently from multiple threads -
# after the channel has already been closed, and the initial handler
# thread has exited. In this case, we spin up a new thread just to run
# the enqueued response handlers, and it will exit as soon as it's out
# of handlers to run.
if len(self._handler_queue) and self._handler_thread is None:
self._handler_thread = threading.Thread(
target=self._run_handlers, name=fmt("{0} message handler", self)
)
self._handler_thread.pydev_do_not_trace = True
self._handler_thread.is_pydev_daemon_thread = True
self._handler_thread.start()
def _run_handlers(self):
"""Runs enqueued handlers until the channel is closed, or until the handler
queue is empty once the channel is closed.
"""
while True:
with self:
closed = self._closed
if closed:
# Wait for the parser thread to wrap up and enqueue any remaining
# handlers, if it is still running.
self._parser_thread.join()
# From this point on, _enqueue_handlers() can only get called
# from Request.on_response().
with self:
if not closed and not len(self._handler_queue):
# Wait for something to process.
self._handlers_enqueued.wait()
# Make a snapshot before releasing the lock.
handlers = self._handler_queue[:]
del self._handler_queue[:]
if closed and not len(handlers):
# Nothing to process, channel is closed, and parser thread is
# not running anymore - time to quit! If Request.on_response()
# needs to call _enqueue_handlers() later, it will spin up
# a new handler thread.
self._handler_thread = None
return
for what, handler in handlers:
# If the channel is closed, we don't want to process any more events
# or requests - only responses and the final disconnect handler. This
# is to guarantee that if a handler calls close() on its own channel,
# the corresponding request or event is the last thing to be processed.
if closed and handler in (Event._handle, Request._handle):
continue
with log.prefixed("/handling {0}/\n", what.describe()):
try:
handler()
except Exception:
# It's already logged by the handler, so just fail fast.
self.close()
os._exit(1)
def _get_handler_for(self, type, name):
"""Returns the handler for a message of a given type.
"""
with self:
handlers = self.handlers
for handler_name in (name + "_" + type, type):
try:
return getattr(handlers, handler_name)
except AttributeError:
continue
raise AttributeError(
fmt(
"handler object {0} for channel {1} has no handler for {2} {3!r}",
compat.srcnameof(handlers),
self,
type,
name,
)
)
def _handle_disconnect(self):
handler = getattr(self.handlers, "disconnect", lambda: None)
try:
handler()
except Exception:
log.reraise_exception(
"Handler {0}\ncouldn't handle disconnect from {1}:",
compat.srcnameof(handler),
self,
)
class MessageHandlers(object):
"""A simple delegating message handlers object for use with JsonMessageChannel.
For every argument provided, the object gets an attribute with the corresponding
name and value.
"""
def __init__(self, **kwargs):
for name, func in kwargs.items():
setattr(self, name, func)
|
assembler.py | import importlib
import inspect
import multiprocessing as mp
import os
import sys
from copy import copy
import baseTasks
import muparse
import progress
from errors import *
class Assembler(object):
taskFolder = 'tasks/'
tasks_serial = {}
tasks_reducer = {}
tasks_parallel = {}
def __init__(self, altPath=None):
if altPath is None:
thisDir = os.path.dirname(os.path.abspath(__file__))
rootDir, _ = os.path.split(thisDir)
self.taskFolder = os.path.join(rootDir, 'tasks')
else:
self.taskFolder = altPath
self.parser = muparse.Parser()
self.__importTasks(self.taskFolder)
def __importTasks(self, path):
if not path.endswith('/'): path += '/'
#scan the folder for modules
module_names = []
for module in os.listdir(path):
if os.path.isfile(path + module) and module.endswith('.py'):
module_names.append( module.split('.')[0] )
#import each and extract all the non-base classes that derive from BaseProcessor
sys.path.append(path)
for module in module_names:
tmod = importlib.import_module(module)
for name, obj in inspect.getmembers(tmod, inspect.isclass):
if (issubclass(obj, baseTasks.BaseProcessor) and not obj.isBase):
try:
if not obj.isValid:
obj.validateParams()
except baseTasks.BadParamException as e:
print(e.message + ' - skipping import')
else:
#add a string attribute identifying which module did the task come from
obj.module = module
#any task that reached this point is good to go, regardless of what kind it was
#but now it matters whether it's parallel or serial
if issubclass(obj, baseTasks.BaseParallel):
self.tasks_parallel[obj.name] = obj
elif issubclass(obj, baseTasks.BaseReducer):
self.tasks_reducer[obj.name] = obj
else:
self.tasks_serial[obj.name] = obj
def printTaskDetails(self, task):
s = lambda x: str(x).split("'")[1]
print('\t{}'.format(task.info))
print('\tParams: {}'.format(len(task.params)))
if len(task.params) > 0:
for param in task.params:
if type(param[1]) is not baseTasks.MuEnum:
print('\t\t{:10}{}{}'.format(
param[0],
s(param[1]),
'' if len(param) < 3 else ', default = {}'.format(param[2])
))
else:
print('\t\t{:10}ENUM: "{}"'.format(param[0], '", "'.join(param[1].keys())))
if len(param) == 3:
print('\t\t{:10}{}'.format('', 'default = "{}"'.format(param[2])))
print('\tInputs: {}, default: {}'.format(len(task.inputs), ', '.join(task.inputs)))
print('\tOutputs: {}, default: {}'.format(len(task.outputs), ', '.join(task.outputs)))
def printInfo(self, task_=None):
if task_ is not None:
if task_ in self.tasks_serial:
task = self.tasks_serial[task_]
print('{} [{}.py] (serial)'.format(task.name, task.module))
self.printTaskDetails(task)
elif task_ in self.tasks_reducer:
task = self.tasks_reducer[task_]
print('{} [{}.py] (reducer)'.format(task.name, task.module))
self.printTaskDetails(task)
elif task_ in self.tasks_parallel:
task = self.tasks_parallel[task_]
print('{} [{}.py] (parallel)'.format(task.name, task.module))
self.printTaskDetails(task)
else:
print('There is no task "{}".'.format(task_))
else:
def __info(t, m):
print('\t{:{pad}} [{mod}.py]'.format(t, pad=maxlen, mod=m))
sTasks = sorted(self.tasks_serial.keys())
rTasks = sorted(self.tasks_reducer.keys())
pTasks = sorted(self.tasks_parallel.keys())
maxlen = max([len(task) for task in sTasks+rTasks+pTasks])
print('List of available serial tasks:')
for task in sTasks:
if task == '': continue
__info(task, self.tasks_serial[task].module)
print('List of available reducer tasks:')
for task in rTasks:
if task == '': continue
__info(task, self.tasks_reducer[task].module)
print('List of available parallel tasks:')
for task in pTasks:
if task == '': continue
__info(task, self.tasks_parallel[task].module)
def preventVT100(self):
progress.useVT100(False)
def preventLogging(self):
progress.usePrint(False)
def assembleFromText(self, lines, num_proc=0, debug=False):
#parses the given text line by line, constructing tasks from them
flow = MacroFlow(num_proc=num_proc, debug=debug)
for n, line in enumerate(lines):
#parse the line into a dict of construction information
#(task name, input/output arguments, parameters)
taskData = self.parser.parseText(line)
taskName = taskData['name']
#allow empty (or commented) lines
if taskName == '':
continue
#try to find the task in the task lists, starting from parallel
if taskName in self.tasks_parallel.keys():
taskClass = self.tasks_parallel[taskName]
taskObject = taskClass(**taskData)
flow.appendParallel(taskObject)
elif taskName in self.tasks_serial.keys():
taskClass = self.tasks_serial[taskName]
taskObject = taskClass(**taskData)
flow.appendSerial(taskObject)
elif taskName in self.tasks_reducer.keys():
taskClass = self.tasks_reducer[taskName]
taskObject = taskClass(**taskData)
flow.appendReducer(taskObject)
else:
raise ConstructException('line {}: '.format(n), 'no such task!')
flow.completeParallel() #ensure the parallel tasks are assembled
return flow
class MacroFlow(object):
def __init__(self, num_proc=0, debug=False):
self.debug = debug
self.tasks = [] #tasks are executed in order
self.scope = {} #place where the intermediate results are contained
self.micros = [] #potential outputs of MicroFlows; list of tuples
self.parallel = None #MicroFlow object during construction
self.num_proc = num_proc
self.deferred = [] #serial reducers to be added after the MicroFlow
def checkItem(self, item):
# if it already is in scope - we're good
if item in self.scope.keys():
return True
# otherwise, look through the parallel tasks - maybe one can produce that item
elif len(self.micros) == 0:
return False
else:
for microID, microItems in self.micros:
if item in microItems:
#kindly ask that task to output it and append it to scope
self.tasks[microID].gather(item)
self.scope[item] = None
return True
# if the item has not been found to this point - it does not exist
return False
def appendSerial(self, task, isMicro=False):
#append a serial task to the execution list
#if a parallel task was added before, its MicroFlow has to be completed
if self.parallel is not None:
self.completeParallel()
#check whether the task's input requirements can be satisfied
for request in task.getInputs():
if not self.checkItem(request):
raise ConstructException(task.name, 'requests "' + request + '" which is not in scope yet')
#if it *is* a parallel task being added, mind that it doesn't output anything by default
#instead, ask it what could it possibly output and keep track of that
#if a serial task comes later and requests something from that potential scope,
#the parallel task will be instructed to actually produce that output
if isMicro:
self.micros.append( (len(self.tasks), task.micro_scope) )
else:
for output in task.getOutputs():
self.scope[output] = None
self.tasks.append(task)
def appendParallel(self, task, isReducer=False):
if self.parallel is None:
#we're only starting to construct a parallelized task set
self.parallel = MicroFlow(parent=self,
num_proc=self.num_proc,
debug=self.debug
)
self.parallel.append(task, isReducer)
def appendReducer(self, task):
#Reducer is added as both a parallel and a serial task. A parallel task is
#added to the MicroFlow to reduce the sub-lists into sub-results, which then
#get reduced to the final result using a serial task that will be appended
#right after that MicroFlow. The serial variant has to be modified though
#to iterate over its input.
#Create a serial copy of the original
serial = copy(task)
#Add the parallel variant, but be sure to change its output so that the
#serial variant receives it!
task.outputs = task.inputs
self.appendParallel(task, isReducer=True)
#Prepare the serial one but efer its addition until MicroFlow is completed
serial.action = serial.action_final
self.deferred.append(serial)
def completeParallel(self):
#when the series of parallel tasks ends, append it to the task list
if self.parallel is not None:
task = self.parallel
self.parallel = None
self.appendSerial(task, isMicro=True) #treat it *almost* as a serial task
#add the deferred reducers, if any
for serial in self.deferred:
self.appendSerial(serial)
self.deferred = []
def execute(self):
#measure time and print reports using external object
reporter = progress.SerialReporter()
#start with setting up each task
reporter.start('Setting up...')
try:
for task in self.tasks:
task.setup()
except baseTasks.muException as e:
e.die()
reporter.stop()
#execute the task list in order
for task in self.tasks:
reporter.start('Task: ' + task.name)
#query each task for its required inputs and retrieve their values from the scope
inputs = [self.scope[i] for i in task.getInputs()]
#feed them to the task and run it
results = task.action(*inputs)
#pack the output back to the scope
#mind that if a task returns multiple items, they are automatically packed into
#a tuple, while a single item is packed as is and should not be iterated over
outputs = task.getOutputs()
for result, key in zip(results if len(outputs) > 1 else (results,), outputs):
self.scope[key] = result
reporter.stop()
reporter.total('Done!')
return self.scope
class MicroFlow(object):
def __init__(self, parent, num_proc=0, debug=False):
self.name = 'MicroFlow'
self.debug = debug
self.tasks = []
self.macro = parent
self.reducers = []
self.gathered = []
self.micro_scope = set() #just for building phase
self.map_requests = [] #those items aren't produced by either of the local tasks
self.num_proc = num_proc if num_proc > 0 else mp.cpu_count()
if self.debug:
self.num_proc = 1
def __makeName(self):
#builds a name string from tasks on the list - best call that at setup
#shortens the list longer than 5 tasks
if len(self.tasks) > 5:
_name = self.tasks[0].name + ', ' + self.tasks[1].name + ', ... , ' + self.tasks[-1].name
else:
_name = ', '.join([task.name for task in self.tasks])
self.name += ' (' + _name + ')'
def append(self, task, isReducer=False):
requests = task.getInputs()
#check if the input requirements are satisfied
for request in requests:
#best case - another parallel task in this MicroFlow produces it
if request in self.micro_scope:
continue
#otherwise - ask the parent MacroFlow
if self.macro.checkItem(request):
self.map_requests.append(request)
continue
#if not found - we have a problem
raise ConstructException(task.name, 'requests "' + request + '" which is not in scope yet')
for output in task.getOutputs():
self.micro_scope.add(output)
self.tasks.append(task)
#if the task is also a reducer, we need to know about it
if isReducer:
self.reducers.append(task)
for output in task.getOutputs():
self.gather(output)
def getInputs(self):
return self.map_requests
def getOutputs(self):
return self.gathered
def gather(self, item):
#set to actually produce an item by given name
if item not in self.micro_scope:
raise ConstructException('MicroFlow', 'item "' + item + '" is not in the local scope!')
#ignore if we're already gathering this item
if item not in self.gathered:
self.gathered.append(item)
#quack like a BaseProcessor
def setup(self, **kwargs):
#task list is complete so we can construct the name string
self.__makeName()
self.reporter = progress.ParallelReporter('Task: ' + self.name)
self.pipes = []
self.pool = []
for i in range(self.num_proc):
a, b = mp.Pipe(True) #duplex pipe - we send data and receive results
self.pipes.append(a)
self.pool.append(
mp.Process(target=self.sequence, args=(b,) if i > 0 else (b,self.reporter))
)
#setup the parallel tasks
for task in self.tasks:
task.setup()
def action(self, *args):
#args are lists (assumed of equal size) which need to be cut into slices
#for each of the processes and sent to them as soon as they are started
batch_size = (len(args[0]) // self.num_proc + 1) if not self.debug else 1
for i in range(self.num_proc):
n_beg = i * batch_size
n_end = n_beg + batch_size
batch = [arg[n_beg:n_end] for arg in args]
self.pool[i].start()
self.pipes[i].send(batch)
#collect the outputs and wait until the processes are done
outputs = [pipe.recv() for pipe in self.pipes]
for process in self.pool:
process.join()
#merge the results
results = {item: [] for item in self.gathered}
for item in self.gathered:
for output in outputs:
results[item] += output[item]
#output
if len(self.gathered) > 1:
return [results[key] for key in self.gathered]
elif len(self.gathered) == 1:
return results[self.gathered[0]]
else:
return None
def sequence(self, pipe, progress=None):
#this function is executed by each process separately
#start by receiving data (list of args, each being a list)
input_data = pipe.recv()
#prepare to gather the outputs
collect = {item: [] for item in self.gathered}
#iterate over lists that make up the input data
for data in zip(*input_data):
#construct a local scope
scope = dict(zip(self.map_requests, data))
#iterate over the sequence of tasks
for task in self.tasks:
inputs = [scope[req] for req in task.getInputs()]
results = task.action(*inputs)
#pack outputs back but into the micro scope
outputs = task.getOutputs()
for result, key in zip(results if len(outputs) > 1 else (results,), outputs):
scope[key] = result
#if anything from the local scope was marked as gathered - do so
for item in self.gathered:
collect[item].append( scope[item] )
#report progress, if so requested (disabled by default)
if progress is not None: progress(len(input_data[0]))
#collect the outputs of any reduction tasks
for task in self.reducers:
for item in task.getOutputs():
collect[item] = task.output()
#send the outputs over the pipe
pipe.send(collect)
|
learner.py | # Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""IMPALA learner class."""
import functools
import itertools
import threading
from typing import Dict, Tuple, Text
import dm_env
import haiku as hk
from haiku.examples.impala import agent as agent_lib
from haiku.examples.impala import util
import jax
from jax.experimental import optimizers
from jax.experimental import optix
import jax.numpy as jnp
import numpy as np
import rlax
from six.moves import queue
# The IMPALA paper sums losses, rather than taking the mean.
# We wrap rlax to do so as well.
def policy_gradient_loss(logits, *args):
"""rlax.policy_gradient_loss, but with sum(loss) and [T, B, ...] inputs."""
mean_per_batch = jax.vmap(rlax.policy_gradient_loss, in_axes=1)(logits, *args)
total_loss_per_batch = mean_per_batch * logits.shape[0]
return jnp.sum(total_loss_per_batch)
def entropy_loss(logits, *args):
"""rlax.entropy_loss, but with sum(loss) and [T, B, ...] inputs."""
mean_per_batch = jax.vmap(rlax.entropy_loss, in_axes=1)(logits, *args)
total_loss_per_batch = mean_per_batch * logits.shape[0]
return jnp.sum(total_loss_per_batch)
class Learner:
"""Manages state and performs updates for IMPALA learner."""
def __init__(
self,
agent: agent_lib.Agent,
rng_key,
opt: optix.InitUpdate,
batch_size: int,
discount_factor: float,
frames_per_iter: int,
max_abs_reward: float = 0,
logger=None,
):
assert jax.device_count() == 1
self._agent = agent
self._opt = opt
self._batch_size = batch_size
self._discount_factor = discount_factor
self._frames_per_iter = frames_per_iter
self._max_abs_reward = max_abs_reward
# Data pipeline objects.
self._done = False
self._host_q = queue.Queue(maxsize=self._batch_size)
self._device_q = queue.Queue(maxsize=1)
# Prepare the parameters to be served to actors.
params = agent.initial_params(rng_key)
self._params_for_actor = (0, jax.device_get(params))
# Set up logging.
if logger is None:
logger = util.NullLogger()
self._logger = logger
def _loss(
self,
theta: hk.Params,
trajectories: util.Transition,
) -> Tuple[jnp.ndarray, Dict[Text, jnp.ndarray]]:
"""Compute vtrace-based actor-critic loss."""
initial_state = jax.tree_map(lambda t: t[0], trajectories.agent_state)
learner_outputs = self._agent.unroll(theta, trajectories.timestep,
initial_state)
v_t = learner_outputs.values[1:]
# Remove bootstrap timestep from non-timesteps.
_, actor_out, _ = jax.tree_map(lambda t: t[:-1], trajectories)
learner_outputs = jax.tree_map(lambda t: t[:-1], learner_outputs)
v_tm1 = learner_outputs.values
# Get the discount, reward, step_type from the *next* timestep.
timestep = jax.tree_map(lambda t: t[1:], trajectories.timestep)
discounts = timestep.discount * self._discount_factor
rewards = timestep.reward
if self._max_abs_reward > 0:
rewards = jnp.clip(rewards, -self._max_abs_reward, self._max_abs_reward)
# The step is uninteresting if we transitioned LAST -> FIRST.
# timestep corresponds to the *next* time step, so we filter for FIRST.
mask = jnp.not_equal(timestep.step_type, int(dm_env.StepType.FIRST))
mask = mask.astype(jnp.float32)
rhos = rlax.categorical_importance_sampling_ratios(
learner_outputs.policy_logits, actor_out.policy_logits,
actor_out.action)
# vmap vtrace_td_error_and_advantage to take/return [T, B, ...].
vtrace_td_error_and_advantage = jax.vmap(
rlax.vtrace_td_error_and_advantage, in_axes=1, out_axes=1)
vtrace_returns = vtrace_td_error_and_advantage(
v_tm1, v_t, rewards, discounts, rhos)
pg_advs = vtrace_returns.pg_advantage
pg_loss = policy_gradient_loss(learner_outputs.policy_logits,
actor_out.action, pg_advs, mask)
baseline_loss = 0.5 * jnp.sum(jnp.square(vtrace_returns.errors) * mask)
ent_loss = entropy_loss(learner_outputs.policy_logits, mask)
total_loss = pg_loss
total_loss += 0.5 * baseline_loss
total_loss += 0.01 * ent_loss
logs = {}
logs['PG_loss'] = pg_loss
logs['baseline_loss'] = baseline_loss
logs['entropy_loss'] = ent_loss
logs['total_loss'] = total_loss
return total_loss, logs
@functools.partial(jax.jit, static_argnums=0)
def update(self, params, opt_state, batch: util.Transition):
"""The actual update function."""
(_, logs), grads = jax.value_and_grad(
self._loss, has_aux=True)(params, batch)
grad_norm_unclipped = optimizers.l2_norm(grads)
updates, updated_opt_state = self._opt.update(grads, opt_state)
params = optix.apply_updates(params, updates)
weight_norm = optimizers.l2_norm(params)
logs.update({
'grad_norm_unclipped': grad_norm_unclipped,
'weight_norm': weight_norm,
})
return params, updated_opt_state, logs
def enqueue_traj(self, traj: util.Transition):
"""Enqueue trajectory."""
self._host_q.put(traj)
def params_for_actor(self) -> Tuple[int, hk.Params]:
return self._params_for_actor
def host_to_device_worker(self):
"""Elementary data pipeline."""
batch = []
while not self._done:
# Try to get a batch. Skip the iteration if we couldn't.
try:
for _ in range(len(batch), self._batch_size):
# As long as possible while keeping learner_test time reasonable.
batch.append(self._host_q.get(timeout=10))
except queue.Empty:
continue
assert len(batch) == self._batch_size
# Prepare for consumption, then put batch onto device.
stacked_batch = jax.tree_multimap(lambda *xs: np.stack(xs, axis=1),
*batch)
self._device_q.put(jax.device_put(stacked_batch))
# Clean out the built-up batch.
batch = []
def run(self, max_iterations: int = -1):
"""Runs the learner for max_iterations updates."""
# Start host-to-device transfer worker.
transfer_thread = threading.Thread(target=self.host_to_device_worker)
transfer_thread.start()
(num_frames, params) = self._params_for_actor
opt_state = self._opt.init(params)
steps = range(max_iterations) if max_iterations != -1 else itertools.count()
for _ in steps:
batch = self._device_q.get()
params, opt_state, logs = self.update(params, opt_state, batch)
num_frames += self._frames_per_iter
# Collect parameters to distribute to downstream actors.
self._params_for_actor = (num_frames, jax.device_get(params))
# Collect and write logs out.
logs = jax.device_get(logs)
logs.update({
'num_frames': num_frames,
})
self._logger.write(logs)
# Shut down.
self._done = True
self._logger.close()
transfer_thread.join()
|
picro.py | #!/bin/python
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, Gio, GLib, GdkPixbuf, GObject
import os
import sys
import subprocess
import signal
from threading import Thread
# Needed to remove stdout
FNULL = open(os.devnull, 'w')
# Blacklist some file types
BLACKLIST = ['vnd.fpx']
# Exit properly when Ctrl-c is hit
def signal_handler(sig, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# Core
class MainWindow(Gtk.Window):
def __init__(self, win_type='picro'):
Gtk.Window.__init__(self, title="Pic Organizer")
self._bootstrap()
self.type = win_type
# Class var
self.img_groups = {}
self.imgs_with_keywords = {}
self.img_paths = []
# var used to signal operation end
self.operation_done = None
# GUI
self.scrolled_win = self._scrolled_win()
self.grid = self._grid()
self.progress_bar = self._progress_bar()
self.group_names_input = self._group_names_input()
self.search_bar = self._search_bar()
self.done_bar = self._done_bar()
self.scrolled_win.add(self.grid)
if self.type == 'picro':
self.vbox = self.picro_window()
else:
# self.type == 'viewer'
self.vbox = self.viewer_window()
self.add(self.vbox)
self.show_all()
# hide stuff
self.search_bar.hide()
# separate thread to fetch images
self.img_fetch_thread = None
def picro_window(self):
"""Window initialized on picro mode"""
vbox = Gtk.Box.new(Gtk.Orientation(1), 0)
vbox.pack_start(self.progress_bar, False, False, 10)
vbox.pack_start(self.scrolled_win, True, True, 0)
vbox.pack_start(self.group_names_input, False, False, 0)
vbox.pack_start(self.done_bar, False, False, 0)
return vbox
def viewer_window(self):
"""Window initialized on viewer mode"""
vbox = Gtk.Box.new(Gtk.Orientation(1), 0)
vbox.pack_start(self.progress_bar, False, False, 10)
vbox.pack_start(self.search_bar, False, False, 10)
vbox.pack_start(self.scrolled_win, True, True, 0)
return vbox
def _bootstrap(self):
""" Initialize the main window"""
self.connect("destroy", Gtk.main_quit)
self.connect("key_press_event", self._core_func)
display = Gdk.Display.get_default()
monitor = display.get_primary_monitor()
if not monitor:
# Arbitrarily decide that the first monitor is the primary one as a fallback
monitor = display.get_monitor(0)
geometry = monitor.get_geometry()
scale_factor = monitor.get_scale_factor()
width = scale_factor * geometry.width
height = scale_factor * geometry.height
self.resize(width, height)
def _scrolled_win(self):
return Gtk.ScrolledWindow.new()
def _grid(self):
grid = Gtk.FlowBox.new()
grid.set_sort_func(self._picro_sort)
return grid
def _search_bar(self):
search_bar = Gtk.Entry.new()
search_bar.set_placeholder_text('Search for images by keywords')
search_bar.connect('changed', self._filter_search)
icon_search = Gio.ThemedIcon(name="edit-find-symbolic")
search_bar.set_icon_from_gicon(
Gtk.EntryIconPosition.SECONDARY, icon_search)
return search_bar
def _progress_bar(self):
label = Gtk.Label.new()
label.props.margin_left = 10
progress = Gtk.ProgressBar.new()
hbox = Gtk.Box.new(Gtk.Orientation(0), 0)
hbox.pack_start(label, False, False, 10)
hbox.pack_start(progress, True, True, 0)
return hbox
def add_progress(self, progress):
self.progress_bar.get_children()[1].set_fraction(progress)
def progress_label(self, label):
"""Reset the progress bar, add a new label to it and show it"""
def _reset_progress():
self.add_progress(0)
_reset_progress()
self.progress_bar.get_children()[0].set_label(label)
self.progress_bar.show_all()
def progress_pulse(self):
def pulse():
if not self.operation_done:
self.progress_bar.get_children()[1].pulse()
return True
else:
return False
self.operation_done = False
GLib.timeout_add(1000, pulse)
def _group_names_input(self):
group_names = Gtk.Box.new(Gtk.Orientation(0), 0)
for i in range(1, 10):
label = Gtk.Label.new(str(i))
entry = Gtk.Entry.new()
hbox = Gtk.Box.new(Gtk.Orientation(0), 0)
hbox.pack_start(label, True, True, 0)
hbox.pack_start(entry, True, True, 0)
group_names.pack_start(hbox, True, True, 0)
grp_scrolled_win = Gtk.ScrolledWindow.new()
grp_scrolled_win.add(group_names)
return grp_scrolled_win
def _get_entered_groups_names(self):
"""Get inputted group names and corresponding label"""
names_list = []
names_boxes = self.group_names_input.get_child().get_child()
for box in names_boxes:
names_list.append(
(box.get_children()[0].get_text(), box.get_children()[1].get_text()))
return names_list
# Hack? to avoid modifying pictures when writing the group name
def _groups_names_input_is_focused(self):
"""Returns True if at least one of the input menus is focused"""
return any(child for child in self.group_names_input.get_child(
).get_child().get_children() if child.get_children()[1].has_focus())
def _done_bar(self):
label = Gtk.Label.new(
"Name your groups than click 'Done' when you're finished")
button = Gtk.Button.new_with_label("Done")
button.connect('clicked', self._on_done_pressed)
hbox = Gtk.Box.new(Gtk.Orientation(0), 0)
hbox.pack_start(label, True, True, 0)
hbox.pack_start(button, True, True, 0)
return hbox
def _on_done_pressed(self, _widget):
"""Callback -> Add keywords to images"""
def translate(i):
# 65457 corresponds to KEY_PAD_1
return i - 65456
def add_keywords():
current_progress = 0
for key, image_list in self.img_groups.items():
index = translate(key)
for label_index, inputted_name in grp_names_list:
if index == int(label_index):
group_name = inputted_name
for _, image_file in image_list:
if group_name:
keyword = '-keywords=%s' % group_name
subprocess.call(
['exiftool', '-overwrite_original', image_file, keyword], stdout=FNULL)
progress = current_progress / (img_count - 1)
GLib.idle_add(self.add_progress, progress)
current_progress += 1
FNULL.close()
os._exit(0)
grp_names_list = self._get_entered_groups_names()
# delete untagged pic from dict
del self.img_groups[65466]
img_count = sum(len(v) for v in self.img_groups.values())
self.progress_label('Adding keywords to images')
Thread(target=add_keywords).start()
def _add_icons(self):
"""Create icons from current dir images"""
files = os.listdir(os.path.curdir)
img_list = []
tmp_list = []
def discover_images():
self.progress_label('Discovering images')
self.progress_pulse()
exiftool_cmd = ['exiftool', '-fast2'] + files
out = subprocess.run(exiftool_cmd, errors='ignore',
check=False, stdout=subprocess.PIPE).stdout
tmp_file_name = ''
for line in out.splitlines():
if 'Error' in line:
# file type can't be read
pass
elif '========' in line:
filename = line.split()[1]
tmp_file_name = filename
elif 'MIME Type' in line:
if any(filetype for filetype in BLACKLIST if filetype in line) or 'image' not in line:
# not an image or file type is blacklisted
tmp_file_name = ''
else:
# 'image' in line:
img_list.append(tmp_file_name)
elif 'Keywords' in line:
if tmp_file_name:
keywords = line.split(':')[1].strip()
self.imgs_with_keywords[tmp_file_name] = keywords
tmp_file_name = ''
self.operation_done = True
def sort_by_keywords():
out = []
for img in self.imgs_with_keywords:
idx = img_list.index(img)
if idx != -1:
del img_list[idx]
sorted_list = sorted(
self.imgs_with_keywords.items(), key=lambda x: x[1])
for v, _ in sorted_list:
out.append(v)
out += img_list
return out
def create_icons(image_files):
def add_to_grid(image):
self.grid.add(image[0])
def image_to_flowbox_child():
if img[1] in self.imgs_with_keywords:
self.imgs_with_keywords[img[0]
] = self.imgs_with_keywords.pop(img[1])
self.progress_label("Creating icons")
img_num = len(image_files)
for idx, img_file in enumerate(image_files):
img = self._create_images(img_file)
if img:
GLib.idle_add(add_to_grid, img)
if self.type == 'viewer':
GLib.idle_add(image_to_flowbox_child)
progress = idx/(img_num - 1)
GLib.idle_add(self.add_progress, progress)
tmp_list.append(img[1])
if self.type == 'viewer':
# Discover images step
discover_images()
# Sort images by keywords step
img_list = sort_by_keywords()
else:
# self.type = 'picro':
img_list = files
# Create icons step
create_icons(img_list)
# workaround race condition
self.img_paths = tmp_list
# GUI stuff
self.progress_bar.hide()
self.search_bar.show()
def _create_images(self, f):
try:
icn = GdkPixbuf.Pixbuf.new_from_file_at_size(f, 420, 420)
except:
# Not an image
return None
img = Gtk.Image.new_from_pixbuf(icn)
flow_box_child = Gtk.FlowBoxChild.new()
flow_box_child.add(img)
flow_box_child.show_all()
return (flow_box_child, f)
def _core_func(self, _, key):
"""Callback to key press event -> Sort pictures"""
if self.type == 'viewer':
return
if self._groups_names_input_is_focused():
self.grid.unselect_all()
return
if self.img_fetch_thread.is_alive():
return
# fill self.img_groups in main thread to avoid race condition
if self.img_paths and not self.img_groups:
self.img_groups[65466] = list(
zip(self.grid.get_children(), self.img_paths))
# KEY_PAD_1: 65457, KEY_PAD_9: 65465
key_val = key.get_keyval()[1]
if key_val not in range(65457, 65466):
return
if not self.grid.get_selected_children():
return
active_img = self.grid.get_selected_children()[0]
active_img = self._search_dict_for_img(active_img)
self._add_img_to_grp(key_val, active_img)
# signal sort fn
active_img[0].changed()
def _search_dict_for_img(self, active_img):
for img_list in self.img_groups.values():
if img_list:
active_img_list = [
img for img in img_list if active_img in img]
if active_img_list:
return active_img_list[0]
return None
def _add_img_to_grp(self, key_val, active_img):
for list_img in self.img_groups.values():
if active_img in list_img:
idx = list_img.index(active_img)
del list_img[idx]
break
if key_val not in self.img_groups:
self.img_groups[key_val] = [active_img]
else:
self.img_groups[key_val].append(active_img)
def _filter_search(self, entry):
self.grid.set_filter_func(self._sort, entry)
def _sort(self, child, entry):
if not entry.get_text():
return True
if child not in self.imgs_with_keywords and entry.get_text():
return False
key = self.imgs_with_keywords[child]
if entry.get_text() in key:
return True
return False
def _picro_sort(self, child1, child2):
def search_keys():
key1 = None
key2 = None
for key, value in self.img_groups.items():
for img, _ in value:
if child1 == img:
key1 = key
if child2 == img:
key2 = key
if key1 and key2:
return key1, key2
keys = search_keys()
if keys:
key1, key2 = keys
if key1 > key2:
return 1
elif key2 > key1:
return -1
else:
return 0
else:
return 0
def start(self):
"""Cooler name for self._add_icons + different thread"""
self.img_fetch_thread = Thread(target=self._add_icons)
self.img_fetch_thread.start()
if len(sys.argv) == 1:
win = MainWindow('picro')
else:
if sys.argv[1] == '-v' or sys.argv[1] == '--viewer':
win = MainWindow('viewer')
Gtk.init()
win.start()
Gtk.main()
|
managers.py | #
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import sys
import threading
import array
import queue
from time import time as _time
from traceback import format_exc
from . import connection
from . import context
from . import pool
from . import process
from . import reduction
from . import util
from . import get_context
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tobytes())
reduction.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
reduction.register(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return 'Token(typeid=%r, address=%r, id=%r)' % \
(self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind == '#TRACEBACK':
assert type(result) is str
return RemoteError(result)
elif kind == '#UNSERIALIZABLE':
assert type(result) is str
return RemoteError('Unserializable message: %s\n' % result)
else:
return ValueError('Unrecognized message type')
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if callable(func):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
assert isinstance(authkey, bytes)
self.registry = registry
self.authkey = process.AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.mutex = threading.RLock()
def serve_forever(self):
'''
Run the server forever
'''
self.stop_event = threading.Event()
process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
accepter.start()
try:
while not self.stop_event.is_set():
self.stop_event.wait(1)
except (KeyboardInterrupt, SystemExit):
pass
finally:
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.exit(0)
def accepter(self):
while True:
try:
c = self.listener.accept()
except OSError:
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop_event.is_set():
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
obj, exposed, gettypeid = id_to_obj[ident]
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception as e:
send(('#UNSERIALIZABLE', repr(msg)))
except Exception as e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
self.mutex.acquire()
try:
result = []
keys = list(self.id_to_obj.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
finally:
self.mutex.release()
def number_of_objects(self, c):
'''
Number of shared objects
'''
return len(self.id_to_obj) - 1 # don't count ident='0'
def shutdown(self, c):
'''
Shutdown this process
'''
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
except:
import traceback
traceback.print_exc()
finally:
self.stop_event.set()
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
self.mutex.acquire()
try:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
assert type(method_to_typeid) is dict
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
# increment the reference count immediately, to avoid
# this object being garbage collected before a Proxy
# object for it can be created. The caller of create()
# is responsible for doing a decref once the Proxy object
# has been created.
self.incref(c, ident)
return ident, tuple(exposed)
finally:
self.mutex.release()
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
self.mutex.acquire()
try:
self.id_to_refcount[ident] += 1
finally:
self.mutex.release()
def decref(self, c, ident):
self.mutex.acquire()
try:
assert self.id_to_refcount[ident] >= 1
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_obj[ident], self.id_to_refcount[ident]
util.debug('disposing of obj with id %r', ident)
finally:
self.mutex.release()
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle',
ctx=None):
if authkey is None:
authkey = process.current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = process.AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
self._ctx = ctx or get_context()
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
assert self._state.value == State.INITIAL
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
assert self._state.value == State.INITIAL
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = self._ctx.Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
if self._process is not None:
self._process.join(timeout)
if not self._process.is_alive():
self._process = None
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
if self._state.value == State.INITIAL:
self.start()
assert self._state.value == State.STARTED
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=1.0)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
address = property(lambda self: self._address)
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in list(method_to_typeid.items()):
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True):
BaseProxy._mutex.acquire()
try:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
finally:
BaseProxy._mutex.release()
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
if authkey is not None:
self._authkey = process.AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = process.current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception as e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if context.get_spawning_popen() is not None:
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %s>' % \
(type(self).__name__, self._token.typeid, '0x%x' % id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(process.current_process(), '_manager_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
incref = (
kwds.pop('incref', True) and
not getattr(process.current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return an proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec('''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = process.current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True, timeout=None):
args = (blocking,) if timeout is None else (blocking, timeout)
return self._callmethod('acquire', args)
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self):
return self._callmethod('notify')
def notify_all(self):
return self._callmethod('notify_all')
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class BarrierProxy(BaseProxy):
_exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def abort(self):
return self._callmethod('abort')
def reset(self):
return self._callmethod('reset')
@property
def parties(self):
return self._callmethod('__getattribute__', ('parties',))
@property
def n_waiting(self):
return self._callmethod('__getattribute__', ('n_waiting',))
@property
def broken(self):
return self._callmethod('__getattribute__', ('broken',))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
'__mul__', '__reversed__', '__rmul__', '__setitem__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
))
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__'
))
BasePoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
))
BasePoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'starmap_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
class PoolProxy(BasePoolProxy):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
|
simple_xchain_transfer_test.py | import sys
import time
from multiprocessing import Process, Value
from typing import Dict
from xrpl.models import XRP, IssuedCurrency, Payment, TrustSet
from xrpl.utils import xrp_to_drops
from slk.chain.chain import Chain
from slk.chain.mainchain import Mainchain
from slk.chain.xchain_transfer import main_to_side_transfer, side_to_main_transfer
from slk.sidechain_interaction import (
_chains_with_callback,
_convert_log_files_to_json,
close_mainchain_ledgers,
)
from slk.sidechain_params import SidechainParams
from slk.utils.eprint import disable_eprint, eprint
from tests.utils import (
generate_mainchain_account,
set_test_context_verbose_logging,
tst_context,
wait_for_balance_change,
)
def simple_xrp_test(mc_chain: Chain, sc_chain: Chain, params: SidechainParams):
alice = mc_chain.account_from_alias("alice")
adam = sc_chain.account_from_alias("adam")
# main to side
# First txn funds the side chain account
with tst_context(mc_chain, sc_chain):
to_send_asset = xrp_to_drops(200)
pre_bal = sc_chain.get_balance(adam, XRP())
main_to_side_transfer(mc_chain, sc_chain, alice, adam, to_send_asset, params)
wait_for_balance_change(sc_chain, adam, pre_bal, to_send_asset)
for i in range(2):
# even amounts for main to side
for value in range(10, 20, 2):
with tst_context(mc_chain, sc_chain):
to_send_asset = xrp_to_drops(value)
pre_bal = sc_chain.get_balance(adam, XRP())
main_to_side_transfer(
mc_chain, sc_chain, alice, adam, to_send_asset, params
)
wait_for_balance_change(sc_chain, adam, pre_bal, to_send_asset)
# side to main
# odd amounts for side to main
for value in range(9, 19, 2):
with tst_context(mc_chain, sc_chain):
to_send_asset = xrp_to_drops(value)
pre_bal = mc_chain.get_balance(alice, XRP())
side_to_main_transfer(
mc_chain, sc_chain, adam, alice, to_send_asset, params
)
wait_for_balance_change(mc_chain, alice, pre_bal, to_send_asset)
def simple_iou_test(mc_chain: Chain, sc_chain: Chain, params: SidechainParams):
alice = mc_chain.account_from_alias("alice")
adam = sc_chain.account_from_alias("adam")
iou_issuer = "root" if params.main_standalone else "issuer"
mc_asset = IssuedCurrency(
currency="USD", issuer=mc_chain.account_from_alias(iou_issuer).account_id
)
sc_asset = IssuedCurrency(
currency="USD", issuer=sc_chain.account_from_alias("door").account_id
)
mc_chain.add_asset_alias(mc_asset, "mcd") # main chain dollar
sc_chain.add_asset_alias(sc_asset, "scd") # side chain dollar
# make sure adam account on the side chain exists and set the trust line
with tst_context(mc_chain, sc_chain):
main_to_side_transfer(
mc_chain, sc_chain, alice, adam, xrp_to_drops(200), params
)
# create a trust line to alice and pay her USD.root/issuer
mc_chain.send_signed(
TrustSet(account=alice.account_id, limit_amount=mc_asset.to_amount(1_000_000))
)
mc_chain.maybe_ledger_accept()
mc_chain.send_signed(
Payment(
account=mc_chain.account_from_alias(iou_issuer).account_id,
destination=alice.account_id,
amount=mc_asset.to_amount(10_000),
)
)
mc_chain.maybe_ledger_accept()
# create a trust line for adam
sc_chain.send_signed(
TrustSet(account=adam.account_id, limit_amount=sc_asset.to_amount(1_000_000))
)
for i in range(2):
# even amounts for main to side
for value in range(10, 20, 2):
with tst_context(mc_chain, sc_chain):
value = str(value)
to_send_asset = mc_asset.to_amount(value)
rcv_asset = sc_asset.to_amount(value)
pre_bal = sc_asset.to_amount(sc_chain.get_balance(adam, rcv_asset))
main_to_side_transfer(
mc_chain, sc_chain, alice, adam, to_send_asset, params
)
wait_for_balance_change(sc_chain, adam, pre_bal, rcv_asset)
# side to main
# odd amounts for side to main
for value in range(9, 19, 2):
with tst_context(mc_chain, sc_chain):
value = str(value)
to_send_asset = sc_asset.to_amount(value)
rcv_asset = mc_asset.to_amount(value)
pre_bal = mc_asset.to_amount(mc_chain.get_balance(alice, rcv_asset))
side_to_main_transfer(
mc_chain, sc_chain, adam, alice, to_send_asset, params
)
wait_for_balance_change(mc_chain, alice, pre_bal, rcv_asset)
def run(mc_chain: Chain, sc_chain: Chain, params: SidechainParams):
# process will run while stop token is non-zero
stop_token = Value("i", 1)
p = None
if mc_chain.standalone:
p = Process(target=close_mainchain_ledgers, args=(stop_token, params))
p.start()
try:
# TODO: Tests fail without this sleep. Fix this bug.
time.sleep(10)
setup_accounts(mc_chain, sc_chain, params)
simple_xrp_test(mc_chain, sc_chain, params)
simple_iou_test(mc_chain, sc_chain, params)
finally:
if p:
stop_token.value = 0
p.join()
configs = sc_chain.get_configs()
if mc_chain.standalone:
configs = mc_chain.get_configs() + configs
_convert_log_files_to_json(
configs,
"final.json",
params.verbose,
)
def setup_accounts(mc_chain: Chain, sc_chain: Chain, params: SidechainParams):
# Setup a funded user account on the main chain, and add an unfunded account.
# Setup address book and add a funded account on the mainchain.
# Typical female names are addresses on the mainchain.
# The first account is funded.
alice = mc_chain.create_account("alice")
mc_chain.create_account("beth")
mc_chain.create_account("carol")
mc_chain.create_account("deb")
mc_chain.create_account("ella")
if isinstance(mc_chain, Mainchain):
mc_chain.send_signed(
Payment(
account=params.genesis_account.account_id,
destination=alice.account_id,
amount=xrp_to_drops(1_000),
)
)
else:
generate_mainchain_account(mc_chain.node.websocket_uri, alice.wallet)
mc_chain.maybe_ledger_accept()
# Typical male names are addresses on the sidechain.
# All accounts are initially unfunded
sc_chain.create_account("adam")
sc_chain.create_account("bob")
sc_chain.create_account("charlie")
sc_chain.create_account("dan")
sc_chain.create_account("ed")
def run_sidechain_test(params: SidechainParams):
def callback(mc_chain: Chain, sc_chain: Chain):
run(mc_chain, sc_chain, params)
_chains_with_callback(params, callback, setup_user_accounts=False)
def test_simple_xchain(configs_dirs_dict: Dict[int, str]):
try:
params = SidechainParams(configs_dir=configs_dirs_dict[1])
except Exception as e:
eprint(str(e))
sys.exit(1)
if params.verbose:
print("eprint enabled")
else:
disable_eprint()
# Set to true to help debug tests
set_test_context_verbose_logging(True)
run_sidechain_test(params)
# TODO: set this up so it runs tests on standalone mainchain and not-standalone
# mainchain
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.