source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
Thread.py
|
import time
from threading import Thread
def sleeper(i):
print "thread %d sleeps for 5 seconds" % i
time.sleep(5)
print "thread %d woke up" % i
for i in range(10):
t = Thread(target=sleeper, args=(i,))
t.start()
|
test_wallet.py
|
import rsa
from tcoin_base.wallet import Wallet
import time
import threading
import sys
IP = "127.0.0.1"
PORT = int(input('Port: '))
# w = input('wallet: ')
# w = Wallet.load_wallet_pem(node_addr=(IP, PORT), file_path='./' + w)
# w = Wallet(node_addr=(IP, PORT))
w = Wallet.optioned_create_wallet((IP, PORT))
# w2 = Wallet.load_wallet_pem(file_path='./2')
cur_recv = None
def new_job(target, args = (), daemon = False):
t = threading.Thread(target=target, args = args)
t.daemon = daemon
t.start()
def cmd():
global w
global cur_recv
while True:
cmd = input('command:')
if cmd == 'quit':
sys.exit()
if cmd == 'tx':
# new_job(target=w.send_tx,args=(w2.pu_ser,50,0),daemon=True)
pu = cur_recv.pu_ser
w.send_tx(pu,50,7)
if cmd == 'calc':
print(w.calculate_coins())
if cmd == 'my_tx':
for block in w.chain:
for tx in block.transactions:
if tx['receiver'] == w.pu_ser:
print(tx['input'],tx['output'])
if cmd == 'reconn':
w.connect_blockchain()
if cmd == 'pu':
print(w.pu_ser)
if cmd == 'save':
p = input('Save to: ')
w.save_wallet_pem(file_path='./' + p)
if cmd == 'load_recv':
p = input('load from: ')
cur_recv = Wallet.load_wallet_pem(file_path='./' + p)
new_job(cmd)
|
flow_runner.py
|
import os
import signal
import threading
from time import sleep as time_sleep
from typing import Any, Callable, Dict, Iterable, Optional, Iterator
from contextlib import contextmanager
import pendulum
import prefect
from prefect.client import Client
from prefect.core import Flow, Task
from prefect.engine.cloud import CloudTaskRunner
from prefect.engine.flow_runner import FlowRunner, FlowRunnerInitializeResult
from prefect.engine.runner import ENDRUN
from prefect.engine.state import Failed, Queued, State, Cancelling, Cancelled
from prefect.utilities.exceptions import VersionLockError
from prefect.utilities.graphql import with_args
class CloudFlowRunner(FlowRunner):
"""
FlowRunners handle the execution of Flows and determine the State of a Flow
before, during and after the Flow is run.
In particular, through the FlowRunner you can specify which tasks should be
the first tasks to run, which tasks should be returned after the Flow is finished,
and what states each task should be initialized with.
Args:
- flow (Flow): the `Flow` to be run
- state_handlers (Iterable[Callable], optional): A list of state change handlers
that will be called whenever the flow changes state, providing an
opportunity to inspect or modify the new state. The handler
will be passed the flow runner instance, the old (prior) state, and the new
(current) state, with the following signature:
```
state_handler(
flow_runner: FlowRunner,
old_state: State,
new_state: State) -> State
```
If multiple functions are passed, then the `new_state` argument will be the
result of the previous handler.
Note: new FlowRunners are initialized within the call to `Flow.run()` and in general,
this is the endpoint through which FlowRunners will be interacted with most frequently.
Example:
```python
@task
def say_hello():
print('hello')
with Flow("My Flow") as f:
say_hello()
fr = FlowRunner(flow=f)
flow_state = fr.run()
```
"""
def __init__(self, flow: Flow, state_handlers: Iterable[Callable] = None) -> None:
self.client = Client()
super().__init__(
flow=flow, task_runner_cls=CloudTaskRunner, state_handlers=state_handlers
)
def _heartbeat(self) -> bool:
try:
# use empty string for testing purposes
flow_run_id = prefect.context.get("flow_run_id", "") # type: str
self.client.update_flow_run_heartbeat(flow_run_id)
self.heartbeat_cmd = ["prefect", "heartbeat", "flow-run", "-i", flow_run_id]
query = {
"query": {
with_args("flow_run_by_pk", {"id": flow_run_id}): {
"flow": {"settings": True},
}
}
}
flow_run = self.client.graphql(query).data.flow_run_by_pk
if not flow_run.flow.settings.get("heartbeat_enabled", True):
return False
return True
except Exception:
self.logger.exception(
"Heartbeat failed for Flow '{}'".format(self.flow.name)
)
return False
def call_runner_target_handlers(self, old_state: State, new_state: State) -> State:
"""
A special state handler that the FlowRunner uses to call its flow's state handlers.
This method is called as part of the base Runner's `handle_state_change()` method.
Args:
- old_state (State): the old (previous) state
- new_state (State): the new (current) state
Returns:
- State: the new state
"""
raise_on_exception = prefect.context.get("raise_on_exception", False)
try:
new_state = super().call_runner_target_handlers(
old_state=old_state, new_state=new_state
)
except Exception as exc:
msg = "Exception raised while calling state handlers: {}".format(repr(exc))
self.logger.exception(msg)
if raise_on_exception:
raise exc
new_state = Failed(msg, result=exc)
flow_run_id = prefect.context.get("flow_run_id", None)
version = prefect.context.get("flow_run_version")
try:
cloud_state = new_state
state = self.client.set_flow_run_state(
flow_run_id=flow_run_id,
version=version if cloud_state.is_running() else None,
state=cloud_state,
)
except VersionLockError:
state = self.client.get_flow_run_state(flow_run_id=flow_run_id)
if state.is_running():
self.logger.debug(
"Version lock encountered and flow is already in a running state."
)
raise ENDRUN(state=state)
self.logger.debug(
"Version lock encountered, proceeding with state {}...".format(
type(state).__name__
)
)
new_state = state
except Exception as exc:
self.logger.exception(
"Failed to set flow state with error: {}".format(repr(exc))
)
raise ENDRUN(state=new_state)
if state.is_queued():
state.state = old_state # type: ignore
raise ENDRUN(state=state)
prefect.context.update(flow_run_version=(version or 0) + 1)
return new_state
@contextmanager
def check_for_cancellation(self) -> Iterator:
"""Contextmanager used to wrap a cancellable section of a flow run."""
cancelling = False
done = threading.Event()
flow_run_version = None
context = prefect.context.to_dict()
def interrupt_if_cancelling() -> None:
# We need to copy the context into this thread, since context is a
# thread local.
with prefect.context(context):
flow_run_id = prefect.context["flow_run_id"]
while True:
exiting_context = done.wait(
prefect.config.cloud.check_cancellation_interval
)
try:
self.logger.debug("Checking flow run state...")
flow_run_info = self.client.get_flow_run_info(flow_run_id)
except Exception:
self.logger.warning(
"Error getting flow run info", exc_info=True
)
continue
if not flow_run_info.state.is_running():
self.logger.warning(
"Flow run is no longer in a running state; the current state is: %r",
flow_run_info.state,
)
if isinstance(flow_run_info.state, Cancelling):
self.logger.info(
"Flow run has been cancelled, cancelling active tasks"
)
nonlocal cancelling
nonlocal flow_run_version
cancelling = True
flow_run_version = flow_run_info.version
# If not already leaving context, raise KeyboardInterrupt in the main thread
if not exiting_context:
if hasattr(signal, "raise_signal"):
# New in python 3.8
signal.raise_signal(signal.SIGINT) # type: ignore
else:
if os.name == "nt":
# This doesn't actually send a signal, so it will only
# interrupt the next Python bytecode instruction - if the
# main thread is blocked in a c extension the interrupt
# won't be seen until that returns.
from _thread import interrupt_main
interrupt_main()
else:
signal.pthread_kill(
threading.main_thread().ident, signal.SIGINT # type: ignore
)
break
elif exiting_context:
break
thread = threading.Thread(target=interrupt_if_cancelling, daemon=True)
thread.start()
try:
yield
except KeyboardInterrupt:
if not cancelling:
raise
finally:
done.set()
thread.join()
if cancelling:
prefect.context.update(flow_run_version=flow_run_version)
raise ENDRUN(state=Cancelled("Flow run is cancelled"))
def run(
self,
state: State = None,
task_states: Dict[Task, State] = None,
return_tasks: Iterable[Task] = None,
parameters: Dict[str, Any] = None,
task_runner_state_handlers: Iterable[Callable] = None,
executor: "prefect.engine.executors.Executor" = None,
context: Dict[str, Any] = None,
task_contexts: Dict[Task, Dict[str, Any]] = None,
) -> State:
"""
The main endpoint for FlowRunners. Calling this method will perform all
computations contained within the Flow and return the final state of the Flow.
Args:
- state (State, optional): starting state for the Flow. Defaults to
`Pending`
- task_states (dict, optional): dictionary of task states to begin
computation with, with keys being Tasks and values their corresponding state
- return_tasks ([Task], optional): list of Tasks to include in the
final returned Flow state. Defaults to `None`
- parameters (dict, optional): dictionary of any needed Parameter
values, with keys being strings representing Parameter names and values being
their corresponding values
- task_runner_state_handlers (Iterable[Callable], optional): A list of state change
handlers that will be provided to the task_runner, and called whenever a task
changes state.
- executor (Executor, optional): executor to use when performing
computation; defaults to the executor specified in your prefect configuration
- context (Dict[str, Any], optional): prefect.Context to use for execution
to use for each Task run
- task_contexts (Dict[Task, Dict[str, Any]], optional): contexts that will be
provided to each task
Returns:
- State: `State` representing the final post-run state of the `Flow`.
"""
context = context or {}
end_state = super().run(
state=state,
task_states=task_states,
return_tasks=return_tasks,
parameters=parameters,
task_runner_state_handlers=task_runner_state_handlers,
executor=executor,
context=context,
task_contexts=task_contexts,
)
# If start time is more than 10 minutes in the future,
# we fail the run so Lazarus can pick it up and reschedule it.
while end_state.is_queued() and (
end_state.start_time <= pendulum.now("utc").add(minutes=10) # type: ignore
):
assert isinstance(end_state, Queued)
time_remaining = max(
(end_state.start_time - pendulum.now("utc")).total_seconds(), 0
)
self.logger.debug(
(
f"Flow run is in a Queued state. Sleeping for at most {time_remaining:.2f} "
f"seconds and attempting to run again."
)
)
# Sleep until not in a queued state, then attempt to re-run
while time_remaining > 0:
delay = min(
prefect.config.cloud.check_cancellation_interval, time_remaining
)
time_remaining -= delay
# Imported `time.sleep` as `time_sleep` to allow monkeypatching in tests
time_sleep(delay)
flow_run_info = self.client.get_flow_run_info(
flow_run_id=prefect.context.get("flow_run_id")
)
context.update(flow_run_version=flow_run_info.version)
if not isinstance(flow_run_info.state, Queued):
break
# When concurrency slots become free, this will eventually result
# in a non queued state, but will result in more or less just waiting
# until the orchestration layer says we are clear to go. Purposefully
# not passing `state` so we can refresh the info from cloud,
# allowing us to prematurely bail out of flow runs that have already
# reached a finished state via another process.
end_state = super().run(
task_states=task_states,
return_tasks=return_tasks,
parameters=parameters,
task_runner_state_handlers=task_runner_state_handlers,
executor=executor,
context=context,
task_contexts=task_contexts,
)
return end_state
def initialize_run( # type: ignore
self,
state: Optional[State],
task_states: Dict[Task, State],
context: Dict[str, Any],
task_contexts: Dict[Task, Dict[str, Any]],
parameters: Dict[str, Any],
) -> FlowRunnerInitializeResult:
"""
Initializes the Task run by initializing state and context appropriately.
If the provided state is a Submitted state, the state it wraps is extracted.
Args:
- state (Optional[State]): the initial state of the run
- task_states (Dict[Task, State]): a dictionary of any initial task states
- context (Dict[str, Any], optional): prefect.Context to use for execution
to use for each Task run
- task_contexts (Dict[Task, Dict[str, Any]], optional): contexts that will be
provided to each task
- parameters(dict): the parameter values for the run
Returns:
- NamedTuple: a tuple of initialized objects:
`(state, task_states, context, task_contexts)`
"""
# load id from context
flow_run_id = prefect.context.get("flow_run_id")
try:
flow_run_info = self.client.get_flow_run_info(flow_run_id)
except Exception as exc:
self.logger.debug(
"Failed to retrieve flow state with error: {}".format(repr(exc))
)
if state is None:
state = Failed(
message="Could not retrieve state from Prefect Cloud", result=exc
)
raise ENDRUN(state=state)
updated_context = context or {}
updated_context.update(flow_run_info.context or {})
updated_context.update(
flow_id=flow_run_info.flow_id,
flow_run_id=flow_run_info.id,
flow_run_version=flow_run_info.version,
flow_run_name=flow_run_info.name,
scheduled_start_time=flow_run_info.scheduled_start_time,
)
tasks = {slug: t for t, slug in self.flow.slugs.items()}
# update task states and contexts
for task_run in flow_run_info.task_runs:
try:
task = tasks[task_run.task_slug]
except KeyError:
msg = (
f"Task slug {task_run.task_slug} not found in the current Flow; "
f"this is usually caused by changing the Flow without reregistering "
f"it with the Prefect API."
)
raise KeyError(msg)
task_states.setdefault(task, task_run.state)
task_contexts.setdefault(task, {}).update(
task_id=task_run.task_id,
task_run_id=task_run.id,
task_run_version=task_run.version,
)
# if state is set, keep it; otherwise load from Cloud
state = state or flow_run_info.state # type: ignore
# update parameters, prioritizing kwarg-provided params
updated_parameters = flow_run_info.parameters or {} # type: ignore
updated_parameters.update(parameters)
return super().initialize_run(
state=state,
task_states=task_states,
context=updated_context,
task_contexts=task_contexts,
parameters=updated_parameters,
)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 38316
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
politics_economics_process.py
|
import multiprocessing
import signal
import os
import time
import random
# an event that uses signals to communicate to parent process
class ExternalEvent:
def __init__(self, name, probability, lifespan, sig, handler):
self.name = name # event name
self.probability = probability # probability of happening each tick
self.lifespan = lifespan # how much ticks an event lasts
self.ttl = -1 # time to live <=> how much time before death
self.signal = sig # associated signal
self.handler = handler # handler function
# returns true if the event occured
def happens(self):
return random.random() < self.probability
# signal parent (market) that event is on if it was dead.
# just reset it ttl if already alive and signaled.
def up(self):
if self.ttl < 0:
self.alert()
self.ttl = self.lifespan
# signals death to parent (market)
def down(self):
self.alert()
self.ttl = -1
# sends signal to parent (market)
def alert(self):
os.kill(os.getppid(), self.signal)
# a source of events (ExternalEvent)
class ExternalEventSource:
def __init__(self, name, events, interval, daemon=False):
self.name = name # name of the source (used to identify source in terminal)
self.events = events # list of events to fire randomly
self.interval = interval # tick duration
self.process = None # stored process object instance
self.daemon = daemon # if process should be daemon or not
# deploys a process, assigns handlers and returns process.
# should be ran in parent for handlers to work properly
def deploy(self):
for event in self.events:
signal.signal(event.signal, event.handler)
event.handler = None
self.process = multiprocessing.Process(target=self.run)
self.process.daemon = self.daemon
self.process.name = f"{self.name}-process"
return self.process
# function executed by the process
def run(self):
print(f"[{self.process.name}] started. daemon={self.process.daemon}. pid={os.getpid()}. ppid={os.getppid()}]")
print(f"[{self.process.name}] Events : ")
for event in self.events:
print(f"[{self.process.name}] - {event.name} : p={event.probability} -> {event.signal}")
while True:
for event in self.events:
if event.happens():
event.up()
print(f"[{self.process.name}]", "Event", event.name, "!")
elif event.ttl == 0: # last tick of life. kill event and signal parent
event.down()
elif event.ttl > 0: # still alive, decrease ttl
event.ttl -= 1
try:
time.sleep(self.interval)
except KeyboardInterrupt:
exit(0)
|
test_ssl.py
|
# -*- coding: utf-8 -*-
# Test the support for SSL and sockets
import sys
import unittest
from test import test_support as support
from test.script_helper import assert_python_ok
import asyncore
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib2
import traceback
import weakref
import platform
import functools
from contextlib import closing
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = CERTFILE.encode(sys.getfilesystemencoding())
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = ONLYCERT.encode(sys.getfilesystemencoding())
BYTES_ONLYKEY = ONLYKEY.encode(sys.getfilesystemencoding())
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = CAPATH.encode(sys.getfilesystemencoding())
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = DHFILE.encode(sys.getfilesystemencoding())
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
class BasicTests(unittest.TestCase):
def test_sslwrap_simple(self):
# A crude test for the legacy API
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET))
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET)._sock)
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, (int, long))
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# socket.error raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
self.assertRaises(socket.error, ss.recv, 1)
self.assertRaises(socket.error, ss.recv_into, bytearray(b'x'))
self.assertRaises(socket.error, ss.recvfrom, 1)
self.assertRaises(socket.error, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(socket.error, ss.send, b'x')
self.assertRaises(socket.error, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with closing(ssl.wrap_socket(s)) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegexp(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegexp(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegexp(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with closing(ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE)) as s:
self.assertRaisesRegexp(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = u'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = u'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, u'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, u'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, u'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, u'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(socket.socket()) as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s, server_side=True, certfile=CERTFILE)) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegexp(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegexp(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0):
default |= ssl.OP_NO_COMPRESSION
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
self.assertEqual(0, ctx.options)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(IOError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegexp(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegexp(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegexp(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegexp(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegexp(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
ctx.load_verify_locations(cafile=BYTES_CERTFILE.decode('utf-8'))
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(IOError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError):
ctx.load_verify_locations(u'')
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read().decode("ascii")
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read().decode("ascii")
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegexp(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata=u"broken")
with self.assertRaisesRegexp(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(IOError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
with open(SIGNING_CA) as f:
cadata = f.read().decode("ascii")
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test__https_verify_certificates(self):
# Unit test to check the contect factory mapping
# The factories themselves are tested above
# This test will fail by design if run under PYTHONHTTPSVERIFY=0
# (as will various test_httplib tests)
# Uses a fresh SSL module to avoid affecting the real one
local_ssl = support.import_fresh_module("ssl")
# Certificate verification is enabled by default
self.assertIs(local_ssl._create_default_https_context,
local_ssl.create_default_context)
# Turn default verification off
local_ssl._https_verify_certificates(enable=False)
self.assertIs(local_ssl._create_default_https_context,
local_ssl._create_unverified_context)
# And back on
local_ssl._https_verify_certificates(enable=True)
self.assertIs(local_ssl._create_default_https_context,
local_ssl.create_default_context)
# The default behaviour is to enable
local_ssl._https_verify_certificates(enable=False)
local_ssl._https_verify_certificates()
self.assertIs(local_ssl._create_default_https_context,
local_ssl.create_default_context)
def test__https_verify_envvar(self):
# Unit test to check the PYTHONHTTPSVERIFY handling
# Need to use a subprocess so it can still be run under -E
https_is_verified = """import ssl, sys; \
status = "Error: _create_default_https_context does not verify certs" \
if ssl._create_default_https_context is \
ssl._create_unverified_context \
else None; \
sys.exit(status)"""
https_is_not_verified = """import ssl, sys; \
status = "Error: _create_default_https_context verifies certs" \
if ssl._create_default_https_context is \
ssl.create_default_context \
else None; \
sys.exit(status)"""
extra_env = {}
# Omitting it leaves verification on
assert_python_ok("-c", https_is_verified, **extra_env)
# Setting it to zero turns verification off
extra_env[ssl._https_verify_envvar] = "0"
assert_python_ok("-c", https_is_not_verified, **extra_env)
# Any other value should also leave it on
for setting in ("", "1", "enabled", "foo"):
extra_env[ssl._https_verify_envvar] = setting
assert_python_ok("-c", https_is_verified, **extra_env)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with closing(socket.socket()) as s:
s.bind(("127.0.0.1", 0))
s.listen(5)
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with closing(ctx.wrap_socket(c, False, do_handshake_on_connect=False)) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect((REMOTE_HOST, 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
s.connect((REMOTE_HOST, 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex((REMOTE_HOST, 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
rc = s.connect_ex((REMOTE_HOST, 444))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet(REMOTE_HOST):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=REMOTE_HOST)
s.connect((REMOTE_HOST, 443))
s.close()
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(REMOTE_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(REMOTE_ROOT_CERT) as f:
pem = f.read().decode('ascii')
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet(REMOTE_HOST):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect((REMOTE_HOST, 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet(REMOTE_HOST):
s = socket.socket(socket.AF_INET)
s.connect((REMOTE_HOST, 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
def test_ciphers(self):
remote = (REMOTE_HOST, 443)
with support.transient_internet(remote[0]):
with closing(ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s:
s.connect(remote)
with closing(ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT")) as s:
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"):
with closing(socket.socket(socket.AF_INET)) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet(REMOTE_HOST):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with closing(ctx1.wrap_socket(s)) as ss:
ss.connect((REMOTE_HOST, 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except socket.error as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
if not isinstance(e, ssl.SSLError) and e.errno != errno.ECONNRESET:
raise
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except ssl.SSLError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
class EchoServer(asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accept(self):
sock_obj, addr = self.accept()
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with closing(client_context.wrap_socket(socket.socket(),
server_hostname=sni_name)) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except socket.error as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
with self.assertRaisesRegexp(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket(),
server_hostname="localhost")) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket(),
server_hostname="invalid")) as s:
with self.assertRaisesRegexp(ssl.CertificateError,
"hostname 'invalid' doesn't match u?'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(socket.socket()) as s:
with self.assertRaisesRegexp(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_wrong_cert(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server, \
closing(socket.socket()) as sock, \
closing(ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except socket.error as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with closing(socket.socket()) as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except socket.error:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except socket.error as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib2.urlopen(url, context=context)
try:
dlen = f.info().getheader("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = u"PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = ssl.wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = [None]
peer = [None]
def serve():
server.listen(5)
# Block on the accept and wait on the connection to close.
evt.set()
remote[0], peer[0] = server.accept()
remote[0].recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote[0].close()
server.close()
# Sanity checks.
self.assertIsInstance(remote[0], ssl.SSLSocket)
self.assertEqual(peer[0], client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(context.wrap_socket(socket.socket())) as sock:
with self.assertRaises(socket.error) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(context.wrap_socket(socket.socket())) as sock:
with self.assertRaises(socket.error) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with closing(context.wrap_socket(socket.socket())) as s:
with self.assertRaises(ssl.SSLError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with closing(context.wrap_socket(socket.socket())) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1')
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True)
except ssl.SSLError as e:
stats = e
if expected is None and IS_OPENSSL_1_1:
# OpenSSL 1.1.0 raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1.0/0.0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_main(verbose=False):
if support.verbose:
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicTests, BasicSocketTests, SSLErrorTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
ipsec_perf_tool.py
|
#!/usr/bin/env python3
"""
**********************************************************************
Copyright(c) 2021, Intel Corporation All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**********************************************************************
"""
import threading
import queue
import os
import sys
import subprocess
import platform
import time
import argparse
import textwrap
# number of variants to run
TOTAL_VARIANTS = 0
# dictionary to store env vars
ENVS = None
# queues to store todo and completed variants
TODO_Q = None
DONE_Q = None
# don't output info to stderr if set
QUIET = False
# perf application name
PERF_APP = ''
# exit on error flag
EXIT_ERROR = False
class Variant:
"""Class to setup and run test case variant"""
def __init__(self, idx=None, arch=None, direction='encrypt', cipher_alg=None,
hash_alg=None, aead_alg=None, sizes=None, offset=None,
cold_cache=False, shani_off=False, gcm_job_api=False,
unhalted_cycles=False, quick_test=False, smoke_test=False,
imix=None, aad_size=None, job_iter=None):
"""Build perf app command line"""
global PERF_APP
self.idx = idx
self.arch = arch
self.direction = direction
self.cipher_alg = cipher_alg
self.hash_alg = hash_alg
self.aead_alg = aead_alg
self.sizes = sizes
self.offset = offset
self.cmd = '{} --no-progress-bar '.format(PERF_APP)
self.cmd_output = ''
self.out = []
self.core = None
self.cold_cache = cold_cache
self.shani_off = shani_off
self.gcm_job_api = gcm_job_api
self.unhalted_cycles = unhalted_cycles
self.quick_test = quick_test
self.smoke_test = smoke_test
self.imix = imix
self.aad_size = aad_size
self.job_iter = job_iter
if self.arch is not None:
self.cmd += ' --arch {}'.format(self.arch)
if self.offset is not None:
self.cmd += ' -o {}'.format(self.offset)
if self.aead_alg is not None:
if self.cipher_alg is not None or \
self.hash_alg is not None:
print("Invalid combination: aead + cipher / hash", \
file=sys.stderr)
sys.exit(1)
self.cmd += ' --aead-algo {}'.format(self.aead_alg)
if self.cipher_alg is not None:
if self.aead_alg is not None:
print("Invalid combination: aead + cipher", file=sys.stderr)
sys.exit(1)
self.cmd += ' --cipher-algo {}'.format(self.cipher_alg)
if self.hash_alg is not None:
if self.aead_alg is not None:
print("Invalid combination: aead + hash", file=sys.stderr)
sys.exit(1)
self.cmd += ' --hash-algo {}'.format(self.hash_alg)
if self.cipher_alg is not None or \
self.aead_alg is not None:
self.cmd += ' --cipher-dir {}'.format(self.direction)
if self.sizes is not None:
self.cmd += ' --job-size {}'.format(self.sizes)
if self.cold_cache is True:
self.cmd += ' -c'
if self.shani_off is True:
self.cmd += ' --shani-off'
if self.gcm_job_api is True:
self.cmd += ' --gcm-job-api'
if self.unhalted_cycles is True:
self.cmd += ' --unhalted-cycles'
if self.quick_test is True:
self.cmd += ' --quick'
if self.smoke_test is True:
self.cmd += ' --smoke'
if self.imix is not None:
self.cmd += ' --imix {}'.format(self.imix)
if self.aad_size is not None:
self.cmd += ' --aad-size {}'.format(self.aad_size)
if self.job_iter is not None:
self.cmd += ' --job-iter {}'.format(self.job_iter)
def run(self):
"""Run perf app and store output"""
try:
self.cmd_output = \
subprocess.run(self.cmd.split(), \
stdout=subprocess.PIPE, \
stderr=subprocess.PIPE, \
env=ENVS, check=True).stdout.decode('utf-8')
return True
except:
# on error - re-run and store stderr output
self.cmd_output = \
subprocess.run(self.cmd.split(), \
stderr=subprocess.PIPE, \
env=ENVS).stderr.decode('utf-8')
return False
def set_core(self, core):
"""Set core to run perf app on"""
self.core = core
mask = 1 << core
self.cmd += ' --cores {}'.format(str(hex(mask)))
def get_output(self):
"""Get output from run"""
return self.cmd_output
def get_cmd(self):
"""Get variant command line"""
return self.cmd
def get_idx(self):
"""Get assigned index"""
return self.idx
def get_info(self):
"""Get variant details"""
if self.idx is None:
idx = ''
else:
idx = self.idx
if self.cipher_alg is None:
cipher_alg = ''
else:
cipher_alg = self.cipher_alg
if self.hash_alg is None:
hash_alg = ''
elif cipher_alg == '':
hash_alg = self.hash_alg
else:
hash_alg = ' + ' + self.hash_alg
if self.aead_alg is None:
aead_alg = ''
else:
aead_alg = self.aead_alg
if self.core is None:
core = ''
else:
core = self.core
if self.direction is None:
direction = 'n/a'
else:
direction = self.direction
alg = '{}{}{}'.format(cipher_alg, hash_alg, aead_alg)
info = '{0:<5} {1:<4} {2:<6} {3:<7} {4:<40}'\
.format(idx, core, self.arch, direction, alg)
return info
def init_global_vars():
"""Initialize global variables"""
global TOTAL_VARIANTS
global ENVS
global TODO_Q
global DONE_Q
global QUIET
global PERF_APP
# init vars
TOTAL_VARIANTS = 0
QUIET = False
# include perf directory in PATH
path = '{}:{}'.format(os.getenv('PATH'), os.getenv('PWD'))
# set LD_LIBRARY_PATH if not already set
lib_path = os.getenv('LD_LIBRARY_PATH')
if lib_path is None:
lib_path = '../lib'
# create env vars dictionary to pass to subprocess module
ENVS = {'PATH' : path, 'LD_LIBRARY_PATH' : lib_path}
# init queues to store todo and completed variants
TODO_Q = queue.Queue()
DONE_Q = queue.Queue()
# detect OS and select app name
if platform.system() == 'Windows':
PERF_APP = 'ipsec_perf.exe'
else:
PERF_APP = 'ipsec_perf'
def get_info():
"""get system and app info from perf app output"""
global PERF_APP
archs = None
best_arch = None
cipher_algos = None
hash_algos = None
aead_algos = None
cmd = [PERF_APP, '--print-info'.format(type) ]
output = subprocess.run(cmd, stdout=subprocess.PIPE, \
stderr=subprocess.PIPE, \
env=ENVS, check=True).stdout.decode('utf-8')
lines = output.rstrip().split('\n')
try:
for line in lines:
info = line.split(':')
if info[0] == 'Supported architectures':
archs = info[1].split()
if info[0] == 'Best architecture':
best_arch = info[1].split()
if info[0] == 'Supported cipher algorithms':
cipher_algos = info[1].split()
if info[0] == 'Supported hash algorithms':
hash_algos = info[1].split()
if info[0] == 'Supported aead algorithms':
aead_algos = info[1].split()
except:
print("Error parsing --print-info output:\n" \
"{}".format(output), file=sys.stderr)
if archs is None or best_arch is None or cipher_algos is None \
or hash_algos is None or aead_algos is None:
print("Error parsing system and app information", file=sys.stderr)
sys.exit(1)
return archs, best_arch, cipher_algos, hash_algos, aead_algos
def parse_cores(core_str):
"""Parse core list passed through command line"""
num_cores = os.cpu_count()
cores = []
# remove spaces
core_str.replace(" ", "")
# check if not a range
if '-' not in core_str:
cores = list(map(int, core_str.strip().split(',')))
else:
# parse range e.g. 2-8
core_str = core_str.strip().split('-')
for i in range(int(core_str[0]), int(core_str[1]) + 1):
cores.append(i)
# ensure valid cores specified
for core in cores:
if core < 0 or core >= num_cores:
print("Core {} out of range!".format(core), file=sys.stderr)
raise Exception()
return cores
def parse_results(variants):
"""Parse output of perf app for variant"""
out = []
# set header
lines = variants[0].get_output().split('\n')
for line in lines[:-1]:
out.append(line.split('\t')[0])
# append output for all variants to single list
for var in variants:
lines = var.get_output().split('\n')
for i in range(0, len(lines) - 1):
out[i] += '\t{}'.format(lines[i].split()[1])
return out
def parse_args():
"""Parse command line arguments"""
global QUIET
cores = None
directions = ['encrypt', 'decrypt']
offset = 24
alg_types = ['cipher-only', 'hash-only', 'aead-only', 'cipher-hash-all']
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
description="Wrapper script for the ipsec-mb " \
"performance application enabling extended functionality")
# parse and validate args
parser.add_argument("-a", "--arch", choices=['SSE', 'AVX', 'AVX2', 'AVX512'],
default=None, action='append',
help="set architecture to test (default tests all supported archs)")
parser.add_argument("-c", "--cores", default=cores,
help="list/range of cores e.g. 2-8 or 3,4,5")
parser.add_argument("-d", "--direction", default=None,
choices=directions, help="Cipher direction")
parser.add_argument("-o", "--offset", default=offset, type=int,
help="offset for the SHA size increment, default is 24")
parser.add_argument("-t", "--alg-type", default=None, action='append', choices=alg_types,
help="algorithm types to test")
parser.add_argument("-s", "--job-size", default=None,
help=textwrap.dedent('''\
size of the cipher & hash job in bytes.
It can be:
- single value: test single size
- list: test multiple sizes separated by commas
- range: test multiple sizes with following format
min:step:max (e.g. 16:16:256)\n'''))
parser.add_argument("-q", "--quiet", default=False, action='store_true',
help="disable verbose output")
parser.add_argument("--cold-cache", default=False, action='store_true',
help="use cold cache, it uses warm as default")
parser.add_argument("--arch-best", action='store_true',
help="detect available architectures and run only on the best one")
parser.add_argument("--shani-off", action='store_true', help="don't use SHA extensions")
parser.add_argument("--gcm-job-api", action='store_true',
help="use JOB API for GCM perf tests (raw GCM API is default)")
parser.add_argument("--unhalted-cycles", action='store_true',
help=textwrap.dedent('''\
measure using unhalted cycles (requires root).
Note: RDTSC is used by default'''))
parser.add_argument("--quick", action='store_true',
help=textwrap.dedent('''\
reduces number of test iterations by x10
(less precise but quicker)'''))
parser.add_argument("--smoke", action='store_true',
help=textwrap.dedent('''\
very quick, imprecise and without print out
(for validation only)'''))
parser.add_argument("--imix", default=None,
help=textwrap.dedent('''\
set numbers that establish occurrence proportions between packet sizes.
It requires a list of sizes through --job-size.
(e.g. --imix 4,6 --job-size 64,128 will generate
a series of job sizes where on average 4 out of 10
packets will be 64B long and 6 out of 10 packets
will be 128B long)'''))
parser.add_argument("--aad-size", default=None, type=int,
help="size of AAD for AEAD algorithms")
parser.add_argument("--job-iter", default=None, type=int,
help="number of tests iterations for each job size")
args = parser.parse_args()
# validate and convert values where necessary
if args.arch is not None and args.arch_best is True:
print("{}: error: argument -a/--arch cannot be used with " \
"--arch-best".format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
if args.cores is not None:
try:
cores = parse_cores(args.cores)
except:
print("{}: error: argument -c/--cores: invalid value " \
"{}".format(sys.argv[0], args.cores), file=sys.stderr)
sys.exit(1)
if args.imix is not None and args.job_size is None:
print("{}: error: argument --imix must be used with " \
"--job-size".format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
if args.alg_type is not None:
alg_types = args.alg_type
else:
# strip all cipher hash combinations in default run
alg_types = alg_types[:-1]
if args.direction is not None:
directions = [args.direction]
if args.quiet is True:
QUIET = True
return args.arch, cores, directions, args.offset, \
alg_types, args.job_size, args.cold_cache, args.arch_best, \
args.shani_off, args.gcm_job_api, args.unhalted_cycles, \
args.quick, args.smoke, args.imix, \
args.aad_size, args.job_iter
def run_test(core=None):
"""
Main processing thread function
1. Dequeue variants from todo queue until empty
2. Run performance test for variant
3. Place completed variants in completed (done) queue
"""
global QUIET
global TODO_Q
global DONE_Q
global EXIT_ERROR
while TODO_Q.empty() is False:
variant = TODO_Q.get()
# skip if error encountered
if EXIT_ERROR is True:
if QUIET is False:
print('{} {}'.format(variant.get_info(), '...skipped'), file=sys.stderr)
TODO_Q.task_done()
continue
# set core if specified
if core is not None:
variant.set_core(core)
# print variant information
if QUIET is False:
print(variant.get_info(), file=sys.stderr)
# run variant
if variant.run() is False:
print('Error encountered running: {}\nOutput:\n{}'\
.format(variant.get_cmd(),
variant.get_output()),
file=sys.stderr)
EXIT_ERROR = True
DONE_Q.put(variant)
TODO_Q.task_done()
def main():
"""
Main function to:
- parse command line args
- generate and enqueue list of variants to run
- schedule variants across selected cores
- post process results and print to stdout
"""
global TOTAL_VARIANTS
global QUIET
global TODO_Q
global DONE_Q
global EXIT_ERROR
header = '\n{0:<5} {1:<4} {2:<6} {3:<7} {4:<40}'\
.format('NO', 'CORE', 'ARCH', 'DIR', 'ALG')
result = [] # list to store parsed results
# init global vars
init_global_vars()
supported_archs, best_arch, cipher_algos, hash_algos, aead_algos = get_info()
# parse command line args
archs, cores, directions, offset, alg_types, sizes, cold_cache, arch_best, \
shani_off, gcm_job_api, unhalted_cycles, quick_test, smoke_test, \
imix, aad_size, job_iter = parse_args()
# validate requested archs are supported
if arch_best is True:
archs = best_arch
elif archs is None:
archs = supported_archs
else:
for arch in archs:
if arch not in supported_archs:
print('Error: {} arch not supported!'.format(arch), file=sys.stderr)
sys.exit(1)
# print args
if QUIET is False:
print('Testing:', file=sys.stderr)
print(' Architectures: {}'.format(archs), file=sys.stderr)
print(' Algorithms: {}'.format(alg_types), file=sys.stderr)
print(' Directions: {}'.format(directions), file=sys.stderr)
if offset is not None:
print(' Offset: {}'.format(offset), file=sys.stderr)
if aad_size is not None:
print(' AAD size: {}'.format(aad_size), file=sys.stderr)
if sizes is not None:
print(' Sizes: {}'.format(sizes), file=sys.stderr)
if imix is not None:
print(' IMIX: {}'.format(imix), file=sys.stderr)
if cores is not None:
print(' Cores: {}'.format(cores), file=sys.stderr)
print(' Cache: {}'.format("cold" if cold_cache else "warm"), file=sys.stderr)
print(' SHANI: {}'.format("off" if shani_off else "on"), file=sys.stderr)
print(' GCM API: {}'.format("job" if gcm_job_api else "direct"), file=sys.stderr)
print(' Measuring using {}'.format("unhalted cycles" if unhalted_cycles \
else "rdtsc"), file=sys.stderr)
if quick_test is True or smoke_test is True:
print(' Test type: {}'.format("smoke" if smoke_test else "quick"), file=sys.stderr)
if job_iter is not None:
print(' Job iterations: {}'.format(job_iter), file=sys.stderr)
print(header, file=sys.stderr)
# fill todo queue with variants to test
for arch in archs:
if 'cipher-only' in alg_types:
for direction in directions:
for cipher_alg in cipher_algos:
TODO_Q.put(Variant(idx=TOTAL_VARIANTS, arch=arch, direction=direction,
offset=offset, sizes=sizes, cipher_alg=cipher_alg,
cold_cache=cold_cache, shani_off=shani_off,
gcm_job_api=gcm_job_api, unhalted_cycles=unhalted_cycles,
quick_test=quick_test, smoke_test=smoke_test, imix=imix,
aad_size=aad_size, job_iter=job_iter))
TOTAL_VARIANTS += 1
if 'hash-only' in alg_types:
# skip direction for hash only algs
for hash_alg in hash_algos:
TODO_Q.put(Variant(idx=TOTAL_VARIANTS, arch=arch, direction=None,
offset=offset, sizes=sizes, hash_alg=hash_alg,
cold_cache=cold_cache, shani_off=shani_off,
gcm_job_api=gcm_job_api, unhalted_cycles=unhalted_cycles,
quick_test=quick_test, smoke_test=smoke_test, imix=imix,
aad_size=aad_size, job_iter=job_iter))
TOTAL_VARIANTS += 1
if 'aead-only' in alg_types:
for direction in directions:
for aead_alg in aead_algos:
TODO_Q.put(Variant(idx=TOTAL_VARIANTS, arch=arch, direction=direction,
offset=offset, sizes=sizes, aead_alg=aead_alg,
cold_cache=cold_cache, shani_off=shani_off,
gcm_job_api=gcm_job_api, unhalted_cycles=unhalted_cycles,
quick_test=quick_test, smoke_test=smoke_test, imix=imix,
aad_size=aad_size, job_iter=job_iter))
TOTAL_VARIANTS += 1
if 'cipher-hash-all' in alg_types:
for direction in directions:
# all cipher + hash combinations
for cipher_alg in cipher_algos:
for hash_alg in hash_algos:
TODO_Q.put(Variant(idx=TOTAL_VARIANTS, arch=arch, direction=direction,
offset=offset, sizes=sizes, cipher_alg=cipher_alg,
hash_alg=hash_alg, cold_cache=cold_cache,
shani_off=shani_off, gcm_job_api=gcm_job_api,
unhalted_cycles=unhalted_cycles, quick_test=quick_test,
smoke_test=smoke_test, imix=imix, aad_size=aad_size,
job_iter=job_iter))
TOTAL_VARIANTS += 1
# take starting timestamp
start_ts = time.time()
# If cores selected start a new thread on each core
# otherwise start single thread without specifying a core
#
# Each thread takes a variant from the todo queue
# and places it in the done queue when complete
if cores is None:
threading.Thread(target=run_test).start()
else:
for core in cores:
threading.Thread(target=run_test, args=(core,)).start()
# wait for all threads to complete
TODO_Q.join()
# take end timestamp
end_ts = time.time()
# exit if error encountered
if EXIT_ERROR is True:
print('Error encountered while running tests!', file=sys.stderr)
sys.exit(1)
# output time taken to complete
runtime = end_ts - start_ts
if QUIET is False:
print("Time to complete: {:.3f} seconds" \
.format(runtime), file=sys.stderr)
# transfer completed runs from the
# done queue to the results list
while DONE_Q.empty() is False:
variant = DONE_Q.get()
result.append(variant)
# sort by idx
result.sort(key=lambda x: x.get_idx())
# parse results and print to stdout
output = parse_results(result)
for line in output:
print(line)
if __name__ == "__main__":
main()
|
curses_ui_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the curses-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import curses
import os
import tempfile
import threading
import numpy as np
from six.moves import queue
from tensorflow.python.debug.cli import cli_config
from tensorflow.python.debug.cli import cli_test_utils
from tensorflow.python.debug.cli import curses_ui
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import tensor_format
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
def string_to_codes(cmd):
return [ord(c) for c in cmd]
def codes_to_string(cmd_code):
# Omit non-ASCII key codes.
return "".join([chr(code) for code in cmd_code if code < 256])
class MockCursesUI(curses_ui.CursesUI):
"""Mock subclass of CursesUI that bypasses actual terminal manipulations."""
def __init__(self,
height,
width,
command_sequence=None):
self._height = height
self._width = width
self._command_sequence = command_sequence
self._command_counter = 0
# The mock class has no actual textbox. So use this variable to keep
# track of what's entered in the textbox on creation.
self._curr_existing_command = ""
# Observers for test.
# Observers of screen output.
self.unwrapped_outputs = []
self.wrapped_outputs = []
self.scroll_messages = []
self.output_array_pointer_indices = []
self.output_pad_rows = []
# Observers of command textbox.
self.existing_commands = []
# Observer for tab-completion candidates.
self.candidates_lists = []
# Observer for the main menu.
self.main_menu_list = []
# Observer for toast messages.
self.toasts = []
curses_ui.CursesUI.__init__(
self,
config=cli_config.CLIConfig(
config_file_path=os.path.join(tempfile.mkdtemp(), ".tfdbg_config")))
# Override the default path to the command history file to avoid test
# concurrency issues.
fd, history_file_path = tempfile.mkstemp()
os.close(fd)
self._command_history_store = debugger_cli_common.CommandHistory(
history_file_path=history_file_path)
# Below, override the _screen_ prefixed member methods that interact with the
# actual terminal, so that the mock can run in a terminal-less environment.
# TODO(cais): Search for a way to have a mock terminal object that behaves
# like the actual terminal, so that we can test the terminal interaction
# parts of the CursesUI class.
def _screen_init(self):
pass
def _screen_refresh_size(self):
self._max_y = self._height
self._max_x = self._width
def _screen_launch(self, enable_mouse_on_start):
self._mouse_enabled = enable_mouse_on_start
def _screen_terminate(self):
pass
def _screen_refresh(self):
pass
def _screen_create_command_window(self):
pass
def _screen_create_command_textbox(self, existing_command=None):
"""Override to insert observer of existing commands.
Used in testing of history navigation and tab completion.
Args:
existing_command: Command string entered to the textbox at textbox
creation time. Note that the textbox does not actually exist in this
mock subclass. This method only keeps track of and records the state.
"""
self.existing_commands.append(existing_command)
self._curr_existing_command = existing_command
def _screen_new_output_pad(self, rows, cols):
return "mock_pad"
def _screen_add_line_to_output_pad(self, pad, row, txt, color_segments=None):
pass
def _screen_draw_text_line(self, row, line, attr=curses.A_NORMAL, color=None):
pass
def _screen_scroll_output_pad(self, pad, viewport_top, viewport_left,
screen_location_top, screen_location_left,
screen_location_bottom, screen_location_right):
pass
def _screen_get_user_command(self):
command = self._command_sequence[self._command_counter]
self._command_key_counter = 0
for c in command:
if c == curses.KEY_RESIZE:
# Special case for simulating a terminal resize event in curses.
self._height = command[1]
self._width = command[2]
self._on_textbox_keypress(c)
self._command_counter += 1
return ""
elif c == curses.KEY_MOUSE:
mouse_x = command[1]
mouse_y = command[2]
self._command_counter += 1
self._textbox_curr_terminator = c
return self._fetch_hyperlink_command(mouse_x, mouse_y)
else:
y = self._on_textbox_keypress(c)
self._command_key_counter += 1
if y == curses_ui.CursesUI.CLI_TERMINATOR_KEY:
break
self._command_counter += 1
# Take into account pre-existing string automatically entered on textbox
# creation.
return self._curr_existing_command + codes_to_string(command)
def _screen_getmouse(self):
output = (0, self._mouse_xy_sequence[self._mouse_counter][0],
self._mouse_xy_sequence[self._mouse_counter][1], 0,
curses.BUTTON1_CLICKED)
self._mouse_counter += 1
return output
def _screen_gather_textbox_str(self):
return codes_to_string(self._command_sequence[self._command_counter]
[:self._command_key_counter])
def _scroll_output(self, direction, line_index=None):
"""Override to observe screen output.
This method is invoked after every command that generates a new screen
output and after every keyboard triggered screen scrolling. Therefore
it is a good place to insert the observer.
Args:
direction: which direction to scroll.
line_index: (int or None) Optional line index to scroll to. See doc string
of the overridden method for more information.
"""
curses_ui.CursesUI._scroll_output(self, direction, line_index=line_index)
self.unwrapped_outputs.append(self._curr_unwrapped_output)
self.wrapped_outputs.append(self._curr_wrapped_output)
self.scroll_messages.append(self._scroll_info)
self.output_array_pointer_indices.append(self._output_array_pointer_indices)
self.output_pad_rows.append(self._output_pad_row)
def _display_main_menu(self, output):
curses_ui.CursesUI._display_main_menu(self, output)
self.main_menu_list.append(self._main_menu)
def _screen_render_nav_bar(self):
pass
def _screen_render_menu_pad(self):
pass
def _display_candidates(self, candidates):
curses_ui.CursesUI._display_candidates(self, candidates)
self.candidates_lists.append(candidates)
def _toast(self, message, color=None, line_index=None):
curses_ui.CursesUI._toast(self, message, color=color, line_index=line_index)
self.toasts.append(message)
class CursesTest(test_util.TensorFlowTestCase):
_EXIT = string_to_codes("exit\n")
def _babble(self, args, screen_info=None):
ap = argparse.ArgumentParser(
description="Do babble.", usage=argparse.SUPPRESS)
ap.add_argument(
"-n",
"--num_times",
dest="num_times",
type=int,
default=60,
help="How many times to babble")
ap.add_argument(
"-l",
"--line",
dest="line",
type=str,
default="bar",
help="The content of each line")
ap.add_argument(
"-k",
"--link",
dest="link",
action="store_true",
help="Create a command link on each line")
ap.add_argument(
"-m",
"--menu",
dest="menu",
action="store_true",
help="Create a menu for testing")
parsed = ap.parse_args(args)
lines = [parsed.line] * parsed.num_times
font_attr_segs = {}
if parsed.link:
for i in range(len(lines)):
font_attr_segs[i] = [(
0,
len(lines[i]),
debugger_cli_common.MenuItem("", "babble"),)]
annotations = {}
if parsed.menu:
menu = debugger_cli_common.Menu()
menu.append(
debugger_cli_common.MenuItem("babble again", "babble"))
menu.append(
debugger_cli_common.MenuItem("ahoy", "ahoy", enabled=False))
annotations[debugger_cli_common.MAIN_MENU_KEY] = menu
output = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs, annotations=annotations)
return output
def _print_ones(self, args, screen_info=None):
ap = argparse.ArgumentParser(
description="Print all-one matrix.", usage=argparse.SUPPRESS)
ap.add_argument(
"-s",
"--size",
dest="size",
type=int,
default=3,
help="Size of the matrix. For example, of the value is 3, "
"the matrix will have shape (3, 3)")
parsed = ap.parse_args(args)
m = np.ones([parsed.size, parsed.size])
return tensor_format.format_tensor(m, "m")
def testInitialization(self):
ui = MockCursesUI(40, 80)
self.assertEqual(0, ui._command_pointer)
self.assertEqual([], ui._active_command_history)
self.assertEqual("", ui._pending_command)
def testCursesUiInChildThreadStartsWithoutException(self):
result = queue.Queue()
def child_thread():
try:
MockCursesUI(40, 80)
except ValueError as e:
result.put(e)
t = threading.Thread(target=child_thread)
t.start()
t.join()
self.assertTrue(result.empty())
def testRunUIExitImmediately(self):
"""Make sure that the UI can exit properly after launch."""
ui = MockCursesUI(40, 80, command_sequence=[self._EXIT])
ui.run_ui()
# No screen output should have happened.
self.assertEqual(0, len(ui.unwrapped_outputs))
def testRunUIEmptyCommand(self):
"""Issue an empty command then exit."""
ui = MockCursesUI(40, 80, command_sequence=[[], self._EXIT])
ui.run_ui()
# Empty command should not lead to any screen output.
self.assertEqual(0, len(ui.unwrapped_outputs))
def testRunUIInvalidCommandPrefix(self):
"""Handle an unregistered command prefix."""
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("foo\n"), self._EXIT])
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["ERROR: Invalid command prefix \"foo\""],
ui.unwrapped_outputs[0].lines)
# TODO(cais): Add explanation for the 35 extra lines.
self.assertEqual(["ERROR: Invalid command prefix \"foo\""],
ui.wrapped_outputs[0].lines[:1])
# A single line of output should not have caused scrolling.
self.assertNotIn("Scroll", ui.scroll_messages[0])
self.assertIn("Mouse:", ui.scroll_messages[0])
def testRunUIInvalidCommandSyntax(self):
"""Handle a command with invalid syntax."""
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble -z\n"), self._EXIT])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertIn("Mouse:", ui.scroll_messages[0])
self.assertEqual(
["Syntax error for command: babble", "For help, do \"help babble\""],
ui.unwrapped_outputs[0].lines)
def testRunUIScrollTallOutputPageDownUp(self):
"""Scroll tall output with PageDown and PageUp."""
# Use PageDown and PageUp to scroll back and forth a little before exiting.
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble\n"), [curses.KEY_NPAGE] * 2 +
[curses.KEY_PPAGE] + self._EXIT])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(4, len(ui.wrapped_outputs))
self.assertEqual(4, len(ui.scroll_messages))
# Before scrolling.
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
# Initial scroll: At the top.
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
self.assertIn("Mouse:", ui.scroll_messages[0])
# After 1st scrolling (PageDown).
# The screen output shouldn't have changed. Only the viewport should.
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
self.assertIn("Scroll (PgDn/PgUp): 1.69%", ui.scroll_messages[1])
self.assertIn("Mouse:", ui.scroll_messages[1])
# After 2nd scrolling (PageDown).
self.assertIn("Scroll (PgDn/PgUp): 3.39%", ui.scroll_messages[2])
self.assertIn("Mouse:", ui.scroll_messages[2])
# After 3rd scrolling (PageUp).
self.assertIn("Scroll (PgDn/PgUp): 1.69%", ui.scroll_messages[3])
self.assertIn("Mouse:", ui.scroll_messages[3])
def testCutOffTooManyOutputLines(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble -n 20\n"), self._EXIT])
# Modify max_output_lines so that this test doesn't use too much time or
# memory.
ui.max_output_lines = 10
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(["bar"] * 10 + ["Output cut off at 10 lines!"],
ui.wrapped_outputs[0].lines[:11])
def testRunUIScrollTallOutputEndHome(self):
"""Scroll tall output with PageDown and PageUp."""
# Use End and Home to scroll a little before exiting to test scrolling.
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble\n"),
[curses.KEY_END] * 2 + [curses.KEY_HOME] + self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(4, len(ui.wrapped_outputs))
self.assertEqual(4, len(ui.scroll_messages))
# Before scrolling.
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
# Initial scroll: At the top.
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
# After 1st scrolling (End).
self.assertIn("Scroll (PgUp): 100.00%", ui.scroll_messages[1])
# After 2nd scrolling (End).
self.assertIn("Scroll (PgUp): 100.00%", ui.scroll_messages[2])
# After 3rd scrolling (Hhome).
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[3])
def testRunUIWithInitCmd(self):
"""Run UI with an initial command specified."""
ui = MockCursesUI(40, 80, command_sequence=[self._EXIT])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui(init_command="babble")
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
def testCompileHelpWithoutHelpIntro(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"), self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[0].lines[:4])
def testCompileHelpWithHelpIntro(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"), self._EXIT])
help_intro = debugger_cli_common.RichTextLines(
["This is a curses UI.", "All it can do is 'babble'.", ""])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.set_help_intro(help_intro)
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(
help_intro.lines + ["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[0].lines[:7])
def testCommandHistoryNavBackwardOnce(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
[curses.KEY_UP], # Hit Up and Enter.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(2, len(ui.unwrapped_outputs))
for i in [0, 1]:
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[i].lines[:4])
def testCommandHistoryNavBackwardTwice(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
string_to_codes("babble\n"),
[curses.KEY_UP],
[curses.KEY_UP], # Hit Up twice and Enter.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
# The 1st and 3rd outputs are for command "help".
for i in [0, 2]:
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[i].lines[:4])
# The 2nd output is for command "babble".
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testCommandHistoryNavBackwardOverLimit(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
string_to_codes("babble\n"),
[curses.KEY_UP],
[curses.KEY_UP],
[curses.KEY_UP], # Hit Up three times and Enter.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
# The 1st and 3rd outputs are for command "help".
for i in [0, 2]:
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[i].lines[:4])
# The 2nd output is for command "babble".
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testCommandHistoryNavBackwardThenForward(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
string_to_codes("babble\n"),
[curses.KEY_UP],
[curses.KEY_UP],
[curses.KEY_DOWN], # Hit Up twice and Down once.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
# The 1st output is for command "help".
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[0].lines[:4])
# The 2nd and 3rd outputs are for command "babble".
for i in [1, 2]:
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[i].lines)
def testCommandHistoryPrefixNavBackwardOnce(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 1\n"),
string_to_codes("babble -n 10\n"),
string_to_codes("help\n"),
string_to_codes("b") + [curses.KEY_UP], # Navigate with prefix.
string_to_codes("\n"),
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(["bar"], ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[1].lines)
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[2].lines[:4])
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[3].lines)
def testTerminalResize(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble\n"),
[curses.KEY_RESIZE, 100, 85], # Resize to [100, 85]
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The resize event should have caused a second screen output event.
self.assertEqual(2, len(ui.unwrapped_outputs))
self.assertEqual(2, len(ui.wrapped_outputs))
self.assertEqual(2, len(ui.scroll_messages))
# The 1st and 2nd screen outputs should be identical (unwrapped).
self.assertEqual(ui.unwrapped_outputs[0], ui.unwrapped_outputs[1])
# The 1st scroll info should contain scrolling, because the screen size
# is less than the number of lines in the output.
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
def testTabCompletionWithCommonPrefix(self):
# Type "b" and trigger tab completion.
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("b\t"), string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["ba"])
ui.run_ui()
# The automatically registered exit commands "exit" and "quit" should not
# appear in the tab completion candidates because they don't start with
# "b".
self.assertEqual([["ba", "babble"]], ui.candidates_lists)
# "ba" is a common prefix of the two candidates. So the "ba" command should
# have been issued after the Enter.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
def testTabCompletionEmptyTriggerWithoutCommonPrefix(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["a"])
# Use a different alias "a" instead.
ui.run_ui()
# The manually registered command, along with the automatically registered
# exit commands should appear in the candidates.
self.assertEqual(
[["a", "babble", "cfg", "config", "exit", "h", "help", "m", "mouse",
"quit"]], ui.candidates_lists)
# The two candidates have no common prefix. So no command should have been
# issued.
self.assertEqual(0, len(ui.unwrapped_outputs))
self.assertEqual(0, len(ui.wrapped_outputs))
self.assertEqual(0, len(ui.scroll_messages))
def testTabCompletionNonemptyTriggerSingleCandidate(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("b\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["a"])
ui.run_ui()
# There is only one candidate, so no candidates should have been displayed.
# Instead, the completion should have been automatically keyed in, leading
# to the "babble" command being issue.
self.assertEqual([[]], ui.candidates_lists)
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
def testTabCompletionNoMatch(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("c\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["a"])
ui.run_ui()
# Only the invalid command "c" should have been issued.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["ERROR: Invalid command prefix \"c\""],
ui.unwrapped_outputs[0].lines)
self.assertEqual(["ERROR: Invalid command prefix \"c\""],
ui.wrapped_outputs[0].lines[:1])
def testTabCompletionOneWordContext(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.register_tab_comp_context(["babble", "b"], ["10", "20", "30", "300"])
ui.run_ui()
self.assertEqual([["30", "300"]], ui.candidates_lists)
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 30, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 30, ui.wrapped_outputs[0].lines[:30])
def testTabCompletionTwice(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 1\t"), # Trigger tab completion.
string_to_codes("2\t"), # With more prefix, tab again.
string_to_codes("3\n"),
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.register_tab_comp_context(["babble", "b"], ["10", "120", "123"])
ui.run_ui()
# There should have been two different lists of candidates.
self.assertEqual([["10", "120", "123"], ["120", "123"]],
ui.candidates_lists)
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 123, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 123, ui.wrapped_outputs[0].lines[:123])
def testRegexSearch(self):
"""Test regex search."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/(b|r)\n"), # Regex search and highlight.
string_to_codes("/a\n"), # Regex search and highlight.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The unwrapped (original) output should never have any highlighting.
self.assertEqual(3, len(ui.unwrapped_outputs))
for i in range(3):
self.assertEqual(["bar"] * 3, ui.unwrapped_outputs[i].lines)
self.assertEqual({}, ui.unwrapped_outputs[i].font_attr_segs)
# The wrapped outputs should show highlighting depending on the regex.
self.assertEqual(3, len(ui.wrapped_outputs))
# The first output should have no highlighting.
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[0].lines[:3])
self.assertEqual({}, ui.wrapped_outputs[0].font_attr_segs)
# The second output should have highlighting for "b" and "r".
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[1].lines[:3])
for i in range(3):
self.assertEqual([(0, 1, "black_on_white"), (2, 3, "black_on_white")],
ui.wrapped_outputs[1].font_attr_segs[i])
# The third output should have highlighting for "a" only.
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[1].lines[:3])
for i in range(3):
self.assertEqual([(1, 2, "black_on_white")],
ui.wrapped_outputs[2].font_attr_segs[i])
def testRegexSearchContinuation(self):
"""Test continuing scrolling down to next regex match."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/(b|r)\n"), # Regex search and highlight.
string_to_codes("/\n"), # Continue scrolling down: 1st time.
string_to_codes("/\n"), # Continue scrolling down: 2nd time.
string_to_codes("/\n"), # Continue scrolling down: 3rd time.
string_to_codes("/\n"), # Continue scrolling down: 4th time.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The 1st output is for the non-searched output. The other three are for
# the searched output. Even though continuation search "/" is performed
# four times, there should be only three searched outputs, because the
# last one has exceeded the end.
self.assertEqual(4, len(ui.unwrapped_outputs))
for i in range(4):
self.assertEqual(["bar"] * 3, ui.unwrapped_outputs[i].lines)
self.assertEqual({}, ui.unwrapped_outputs[i].font_attr_segs)
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[0].lines[:3])
self.assertEqual({}, ui.wrapped_outputs[0].font_attr_segs)
for j in range(1, 4):
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[j].lines[:3])
self.assertEqual({
0: [(0, 1, "black_on_white"), (2, 3, "black_on_white")],
1: [(0, 1, "black_on_white"), (2, 3, "black_on_white")],
2: [(0, 1, "black_on_white"), (2, 3, "black_on_white")]
}, ui.wrapped_outputs[j].font_attr_segs)
self.assertEqual([0, 0, 1, 2], ui.output_pad_rows)
def testRegexSearchUnderLineWrapping(self):
ui = MockCursesUI(
40,
6, # Use a narrow window to trigger line wrapping
command_sequence=[
string_to_codes("babble -n 3 -l foo-bar-baz-qux\n"),
string_to_codes("/foo\n"), # Regex search and highlight.
string_to_codes("/\n"), # Continue scrolling down: 1st time.
string_to_codes("/\n"), # Continue scrolling down: 2nd time.
string_to_codes("/\n"), # Continue scrolling down: 3rd time.
string_to_codes("/\n"), # Continue scrolling down: 4th time.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some")
ui.run_ui()
self.assertEqual(4, len(ui.wrapped_outputs))
for wrapped_output in ui.wrapped_outputs:
self.assertEqual(["foo-", "bar-", "baz-", "qux"] * 3,
wrapped_output.lines[0 : 12])
# The scroll location should reflect the line wrapping.
self.assertEqual([0, 0, 4, 8], ui.output_pad_rows)
def testRegexSearchNoMatchContinuation(self):
"""Test continuing scrolling when there is no regex match."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/foo\n"), # Regex search and highlight.
string_to_codes("/\n"), # Continue scrolling down.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The regex search and continuation search in the 3rd command should not
# have produced any output.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual([0], ui.output_pad_rows)
def testRegexSearchContinuationWithoutSearch(self):
"""Test continuation scrolling when no regex search has been performed."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/\n"), # Continue scrolling without search first.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual([0], ui.output_pad_rows)
def testRegexSearchWithInvalidRegex(self):
"""Test using invalid regex to search."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/[\n"), # Continue scrolling without search first.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# Invalid regex should not have led to a new screen of output.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual([0], ui.output_pad_rows)
# Invalid regex should have led to a toast error message.
self.assertEqual(
[MockCursesUI._UI_WAIT_MESSAGE,
"ERROR: Invalid regular expression: \"[\"",
MockCursesUI._UI_WAIT_MESSAGE],
ui.toasts)
def testRegexSearchFromCommandHistory(self):
"""Test regex search commands are recorded in command history."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/(b|r)\n"), # Regex search and highlight.
string_to_codes("babble -n 4\n"),
[curses.KEY_UP],
[curses.KEY_UP],
string_to_codes("\n"), # Hit Up twice and Enter.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(4, len(ui.wrapped_outputs))
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[0].lines[:3])
self.assertEqual({}, ui.wrapped_outputs[0].font_attr_segs)
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[1].lines[:3])
for i in range(3):
self.assertEqual([(0, 1, "black_on_white"), (2, 3, "black_on_white")],
ui.wrapped_outputs[1].font_attr_segs[i])
self.assertEqual(["bar"] * 4, ui.wrapped_outputs[2].lines[:4])
self.assertEqual({}, ui.wrapped_outputs[2].font_attr_segs)
# The regex search command loaded from history should have worked on the
# new screen output.
self.assertEqual(["bar"] * 4, ui.wrapped_outputs[3].lines[:4])
for i in range(4):
self.assertEqual([(0, 1, "black_on_white"), (2, 3, "black_on_white")],
ui.wrapped_outputs[3].font_attr_segs[i])
def testDisplayTensorWithIndices(self):
"""Test displaying tensor with indices."""
ui = MockCursesUI(
9, # Use a small screen height to cause scrolling.
80,
command_sequence=[
string_to_codes("print_ones --size 5\n"),
[curses.KEY_NPAGE],
[curses.KEY_NPAGE],
[curses.KEY_NPAGE],
[curses.KEY_END],
[curses.KEY_NPAGE], # This PageDown goes over the bottom limit.
[curses.KEY_PPAGE],
[curses.KEY_PPAGE],
[curses.KEY_PPAGE],
[curses.KEY_HOME],
[curses.KEY_PPAGE], # This PageDown goes over the top limit.
self._EXIT
])
ui.register_command_handler("print_ones", self._print_ones,
"print an all-one matrix of specified size")
ui.run_ui()
self.assertEqual(11, len(ui.unwrapped_outputs))
self.assertEqual(11, len(ui.output_array_pointer_indices))
self.assertEqual(11, len(ui.scroll_messages))
for i in range(11):
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"m\":", ""], ui.unwrapped_outputs[i].lines[:2])
self.assertEqual(
repr(np.ones([5, 5])).split("\n"), ui.unwrapped_outputs[i].lines[2:])
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[0])
self.assertIn(" Scroll (PgDn): 0.00% -[1,0] ", ui.scroll_messages[0])
# Scrolled down one line.
self.assertEqual({
0: None,
-1: [2, 0]
}, ui.output_array_pointer_indices[1])
self.assertIn(" Scroll (PgDn/PgUp): 16.67% -[2,0] ", ui.scroll_messages[1])
# Scrolled down one line.
self.assertEqual({
0: [0, 0],
-1: [3, 0]
}, ui.output_array_pointer_indices[2])
self.assertIn(" Scroll (PgDn/PgUp): 33.33% [0,0]-[3,0] ",
ui.scroll_messages[2])
# Scrolled down one line.
self.assertEqual({
0: [1, 0],
-1: [4, 0]
}, ui.output_array_pointer_indices[3])
self.assertIn(" Scroll (PgDn/PgUp): 50.00% [1,0]-[4,0] ",
ui.scroll_messages[3])
# Scroll to the bottom.
self.assertEqual({
0: [4, 0],
-1: None
}, ui.output_array_pointer_indices[4])
self.assertIn(" Scroll (PgUp): 100.00% [4,0]- ", ui.scroll_messages[4])
# Attempt to scroll beyond the bottom should lead to no change.
self.assertEqual({
0: [4, 0],
-1: None
}, ui.output_array_pointer_indices[5])
self.assertIn(" Scroll (PgUp): 100.00% [4,0]- ", ui.scroll_messages[5])
# Scrolled up one line.
self.assertEqual({
0: [3, 0],
-1: None
}, ui.output_array_pointer_indices[6])
self.assertIn(" Scroll (PgDn/PgUp): 83.33% [3,0]- ", ui.scroll_messages[6])
# Scrolled up one line.
self.assertEqual({
0: [2, 0],
-1: None
}, ui.output_array_pointer_indices[7])
self.assertIn(" Scroll (PgDn/PgUp): 66.67% [2,0]- ", ui.scroll_messages[7])
# Scrolled up one line.
self.assertEqual({
0: [1, 0],
-1: [4, 0]
}, ui.output_array_pointer_indices[8])
self.assertIn(" Scroll (PgDn/PgUp): 50.00% [1,0]-[4,0] ",
ui.scroll_messages[8])
# Scroll to the top.
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[9])
self.assertIn(" Scroll (PgDn): 0.00% -[1,0] ", ui.scroll_messages[9])
# Attempt to scroll pass the top limit should lead to no change.
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[10])
self.assertIn(" Scroll (PgDn): 0.00% -[1,0] ", ui.scroll_messages[10])
def testScrollTensorByValidIndices(self):
"""Test scrolling to specified (valid) indices in a tensor."""
ui = MockCursesUI(
8, # Use a small screen height to cause scrolling.
80,
command_sequence=[
string_to_codes("print_ones --size 5\n"),
string_to_codes("@[0, 0]\n"), # Scroll to element [0, 0].
string_to_codes("@1,0\n"), # Scroll to element [3, 0].
string_to_codes("@[0,2]\n"), # Scroll back to line 0.
self._EXIT
])
ui.register_command_handler("print_ones", self._print_ones,
"print an all-one matrix of specified size")
ui.run_ui()
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(4, len(ui.output_array_pointer_indices))
for i in range(4):
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"m\":", ""], ui.unwrapped_outputs[i].lines[:2])
self.assertEqual(
repr(np.ones([5, 5])).split("\n"), ui.unwrapped_outputs[i].lines[2:])
self.assertEqual({
0: None,
-1: [0, 0]
}, ui.output_array_pointer_indices[0])
self.assertEqual({
0: [0, 0],
-1: [2, 0]
}, ui.output_array_pointer_indices[1])
self.assertEqual({
0: [1, 0],
-1: [3, 0]
}, ui.output_array_pointer_indices[2])
self.assertEqual({
0: [0, 0],
-1: [2, 0]
}, ui.output_array_pointer_indices[3])
def testScrollTensorByInvalidIndices(self):
"""Test scrolling to specified invalid indices in a tensor."""
ui = MockCursesUI(
8, # Use a small screen height to cause scrolling.
80,
command_sequence=[
string_to_codes("print_ones --size 5\n"),
string_to_codes("@[10, 0]\n"), # Scroll to invalid indices.
string_to_codes("@[]\n"), # Scroll to invalid indices.
string_to_codes("@\n"), # Scroll to invalid indices.
self._EXIT
])
ui.register_command_handler("print_ones", self._print_ones,
"print an all-one matrix of specified size")
ui.run_ui()
# Because all scroll-by-indices commands are invalid, there should be only
# one output event.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.output_array_pointer_indices))
# Check error messages.
self.assertEqual("ERROR: Indices exceed tensor dimensions.", ui.toasts[2])
self.assertEqual("ERROR: invalid literal for int() with base 10: ''",
ui.toasts[4])
self.assertEqual("ERROR: Empty indices.", ui.toasts[6])
def testWriteScreenOutputToFileWorks(self):
fd, output_path = tempfile.mkstemp()
os.close(fd)
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2>%s\n" % output_path),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
with gfile.Open(output_path, "r") as f:
self.assertEqual("bar\nbar\n", f.read())
# Clean up output file.
gfile.Remove(output_path)
def testIncompleteRedirectErrors(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2 >\n"),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(["ERROR: Redirect file path is empty"], ui.toasts)
self.assertEqual(0, len(ui.unwrapped_outputs))
def testAppendingRedirectErrors(self):
fd, output_path = tempfile.mkstemp()
os.close(fd)
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2 >> %s\n" % output_path),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(
["Syntax error for command: babble", "For help, do \"help babble\""],
ui.unwrapped_outputs[0].lines)
# Clean up output file.
gfile.Remove(output_path)
def testMouseOffTakesEffect(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("mouse off\n"), string_to_codes("babble\n"),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertFalse(ui._mouse_enabled)
self.assertIn("Mouse: OFF", ui.scroll_messages[-1])
def testMouseOffAndOnTakeEffect(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("mouse off\n"), string_to_codes("mouse on\n"),
string_to_codes("babble\n"), self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertTrue(ui._mouse_enabled)
self.assertIn("Mouse: ON", ui.scroll_messages[-1])
def testMouseClickOnLinkTriggersCommand(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -k\n"),
[curses.KEY_MOUSE, 1, 4], # A click on a hyperlink.
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(2, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testMouseClickOnLinkWithExistingTextTriggersCommand(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -k\n"),
string_to_codes("foo"), # Enter some existing code in the textbox.
[curses.KEY_MOUSE, 1, 4], # A click on a hyperlink.
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(2, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testMouseClickOffLinkDoesNotTriggersCommand(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -k\n"),
# A click off a hyperlink (too much to the right).
[curses.KEY_MOUSE, 8, 4],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# The mouse click event should not triggered no command.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
# This command should have generated no main menus.
self.assertEqual([None], ui.main_menu_list)
def testMouseClickOnEnabledMenuItemWorks(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -m\n"),
# A click on the enabled menu item.
[curses.KEY_MOUSE, 3, 2],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(2, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
# Check the content of the menu.
self.assertEqual(["| babble again | ahoy | "], ui.main_menu_list[0].lines)
self.assertEqual(1, len(ui.main_menu_list[0].font_attr_segs))
self.assertEqual(1, len(ui.main_menu_list[0].font_attr_segs[0]))
item_annot = ui.main_menu_list[0].font_attr_segs[0][0]
self.assertEqual(2, item_annot[0])
self.assertEqual(14, item_annot[1])
self.assertEqual("babble", item_annot[2][0].content)
self.assertEqual("underline", item_annot[2][1])
# The output from the menu-triggered command does not have a menu.
self.assertIsNone(ui.main_menu_list[1])
def testMouseClickOnDisabledMenuItemTriggersNoCommand(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -m\n"),
# A click on the disabled menu item.
[curses.KEY_MOUSE, 18, 1],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
def testNavigationUsingCommandLineWorks(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2\n"),
string_to_codes("babble -n 4\n"),
string_to_codes("prev\n"),
string_to_codes("next\n"),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[1].lines)
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[2].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[3].lines)
def testNavigationOverOldestLimitUsingCommandLineGivesCorrectWarning(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2\n"),
string_to_codes("babble -n 4\n"),
string_to_codes("prev\n"),
string_to_codes("prev\n"), # Navigate over oldest limit.
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[1].lines)
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[2].lines)
self.assertEqual("At the OLDEST in navigation history!", ui.toasts[-2])
def testNavigationOverLatestLimitUsingCommandLineGivesCorrectWarning(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2\n"),
string_to_codes("babble -n 4\n"),
string_to_codes("prev\n"),
string_to_codes("next\n"),
string_to_codes("next\n"), # Navigate over latest limit.
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[1].lines)
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[2].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[3].lines)
self.assertEqual("At the LATEST in navigation history!", ui.toasts[-2])
def testMouseClicksOnNavBarWorks(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2\n"),
string_to_codes("babble -n 4\n"),
# A click on the back (prev) button of the nav bar.
[curses.KEY_MOUSE, 3, 1],
# A click on the forward (prev) button of the nav bar.
[curses.KEY_MOUSE, 7, 1],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[1].lines)
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[2].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[3].lines)
def testMouseClicksOnNavBarAfterPreviousScrollingWorks(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2\n"),
[curses.KEY_NPAGE], # Scroll down one line.
string_to_codes("babble -n 4\n"),
# A click on the back (prev) button of the nav bar.
[curses.KEY_MOUSE, 3, 1],
# A click on the forward (prev) button of the nav bar.
[curses.KEY_MOUSE, 7, 1],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(6, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[0].lines)
# From manual scroll.
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[1].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[2].lines)
# From history navigation.
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[3].lines)
# From history navigation's auto-scroll to history scroll position.
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[4].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[5].lines)
self.assertEqual(6, len(ui.scroll_messages))
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
self.assertIn("Scroll (PgUp): 100.00%", ui.scroll_messages[1])
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[2])
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[3])
self.assertIn("Scroll (PgUp): 100.00%", ui.scroll_messages[4])
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[5])
class ScrollBarTest(test_util.TensorFlowTestCase):
def testConstructorRaisesExceptionForNotEnoughHeight(self):
with self.assertRaisesRegexp(
ValueError, r"Insufficient height for ScrollBar \(2\)"):
curses_ui.ScrollBar(0, 0, 1, 1, 0, 0)
def testLayoutIsEmptyForZeroRow(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 0, 0)
layout = scroll_bar.layout()
self.assertEqual([" "] * 8, layout.lines)
self.assertEqual({}, layout.font_attr_segs)
def testLayoutIsEmptyFoOneRow(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 0, 1)
layout = scroll_bar.layout()
self.assertEqual([" "] * 8, layout.lines)
self.assertEqual({}, layout.font_attr_segs)
def testClickCommandForOneRowIsNone(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 0, 1)
self.assertIsNone(scroll_bar.get_click_command(0))
self.assertIsNone(scroll_bar.get_click_command(3))
self.assertIsNone(scroll_bar.get_click_command(7))
self.assertIsNone(scroll_bar.get_click_command(8))
def testLayoutIsCorrectForTopPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 0, 20)
layout = scroll_bar.layout()
self.assertEqual(["UP"] + [" "] * 6 + ["DN"], layout.lines)
self.assertEqual(
{0: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
1: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testWidth1LayoutIsCorrectForTopPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 0, 7, 0, 20)
layout = scroll_bar.layout()
self.assertEqual(["U"] + [" "] * 6 + ["D"], layout.lines)
self.assertEqual(
{0: [(0, 1, curses_ui.ScrollBar.BASE_ATTR)],
1: [(0, 1, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 1, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testWidth3LayoutIsCorrectForTopPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 2, 7, 0, 20)
layout = scroll_bar.layout()
self.assertEqual(["UP "] + [" "] * 6 + ["DN "], layout.lines)
self.assertEqual(
{0: [(0, 3, curses_ui.ScrollBar.BASE_ATTR)],
1: [(0, 3, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 3, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testWidth4LayoutIsCorrectForTopPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 3, 7, 0, 20)
layout = scroll_bar.layout()
self.assertEqual([" UP "] + [" "] * 6 + ["DOWN"], layout.lines)
self.assertEqual(
{0: [(0, 4, curses_ui.ScrollBar.BASE_ATTR)],
1: [(0, 4, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 4, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testLayoutIsCorrectForBottomPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 19, 20)
layout = scroll_bar.layout()
self.assertEqual(["UP"] + [" "] * 6 + ["DN"], layout.lines)
self.assertEqual(
{0: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
6: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testLayoutIsCorrectForMiddlePosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 10, 20)
layout = scroll_bar.layout()
self.assertEqual(["UP"] + [" "] * 6 + ["DN"], layout.lines)
self.assertEqual(
{0: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
3: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testClickCommandsAreCorrectForMiddlePosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 10, 20)
self.assertIsNone(scroll_bar.get_click_command(-1))
self.assertEqual(curses_ui._SCROLL_UP_A_LINE,
scroll_bar.get_click_command(0))
self.assertEqual(curses_ui._SCROLL_UP,
scroll_bar.get_click_command(1))
self.assertEqual(curses_ui._SCROLL_UP,
scroll_bar.get_click_command(2))
self.assertIsNone(scroll_bar.get_click_command(3))
self.assertEqual(curses_ui._SCROLL_DOWN,
scroll_bar.get_click_command(5))
self.assertEqual(curses_ui._SCROLL_DOWN,
scroll_bar.get_click_command(6))
self.assertEqual(curses_ui._SCROLL_DOWN_A_LINE,
scroll_bar.get_click_command(7))
self.assertIsNone(scroll_bar.get_click_command(8))
def testClickCommandsAreCorrectForBottomPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 19, 20)
self.assertIsNone(scroll_bar.get_click_command(-1))
self.assertEqual(curses_ui._SCROLL_UP_A_LINE,
scroll_bar.get_click_command(0))
for i in range(1, 6):
self.assertEqual(curses_ui._SCROLL_UP,
scroll_bar.get_click_command(i))
self.assertIsNone(scroll_bar.get_click_command(6))
self.assertEqual(curses_ui._SCROLL_DOWN_A_LINE,
scroll_bar.get_click_command(7))
self.assertIsNone(scroll_bar.get_click_command(8))
def testClickCommandsAreCorrectForScrollBarNotAtZeroMinY(self):
scroll_bar = curses_ui.ScrollBar(0, 5, 1, 12, 10, 20)
self.assertIsNone(scroll_bar.get_click_command(0))
self.assertIsNone(scroll_bar.get_click_command(4))
self.assertEqual(curses_ui._SCROLL_UP_A_LINE,
scroll_bar.get_click_command(5))
self.assertEqual(curses_ui._SCROLL_UP,
scroll_bar.get_click_command(6))
self.assertEqual(curses_ui._SCROLL_UP,
scroll_bar.get_click_command(7))
self.assertIsNone(scroll_bar.get_click_command(8))
self.assertEqual(curses_ui._SCROLL_DOWN,
scroll_bar.get_click_command(10))
self.assertEqual(curses_ui._SCROLL_DOWN,
scroll_bar.get_click_command(11))
self.assertEqual(curses_ui._SCROLL_DOWN_A_LINE,
scroll_bar.get_click_command(12))
self.assertIsNone(scroll_bar.get_click_command(13))
if __name__ == "__main__":
googletest.main()
|
mongodb_04.py
|
import time
from pymongo import MongoClient
from datetime import datetime
from threading import Thread, Lock
start = datetime.now()
client = MongoClient("mongodb://username:password@127.0.0.1")
database = client['database_name']
collection = database['collection_name']
threads_count = 0
lock = Lock()
package = []
def send(p):
global threads_count
with lock:
threads_count += 1
collection.insert_many(p)
with lock:
threads_count -= 1
with open('utils/trash.csv') as file:
for line in file.readlines():
name, description = line.split(',')
package.append({
'name': name,
'description': description
})
if len(package) >= 10000:
while threads_count >= 4: time.sleep(0)
Thread(target=send, args=(package[:],), daemon=True).start()
package.clear()
if package:
collection.insert_many(package)
while threads_count != 0:
pass
print(collection.count_documents({}))
collection.drop()
client.drop_database('mongo')
print(datetime.now() - start)
|
test_driver_remote_connection_threaded.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import concurrent.futures
import sys
import queue
from threading import Thread
from gremlin_python.driver.driver_remote_connection import (
DriverRemoteConnection)
from gremlin_python.process.anonymous_traversal import traversal
__author__ = 'David M. Brown (davebshow@gmail.com)'
def test_conns_in_threads(remote_connection):
q = queue.Queue()
child = Thread(target=_executor, args=(q, None))
child2 = Thread(target=_executor, args=(q, None))
child.start()
child2.start()
for x in range(2):
success = q.get()
assert success == 'success!'
child.join()
child2.join()
def test_conn_in_threads(remote_connection):
q = queue.Queue()
child = Thread(target=_executor, args=(q, remote_connection))
child2 = Thread(target=_executor, args=(q, remote_connection))
child.start()
child2.start()
for x in range(2):
success = q.get()
assert success == 'success!'
child.join()
child2.join()
def _executor(q, conn):
close = False
if not conn:
# This isn't a fixture so close manually
close = True
conn = DriverRemoteConnection(
'ws://localhost:45940/gremlin', 'gmodern', pool_size=4)
try:
g = traversal().withRemote(conn)
future = g.V().promise()
t = future.result()
assert len(t.toList()) == 6
except:
q.put(sys.exc_info()[0])
else:
q.put('success!')
# Close conn
if close:
conn.close()
def handle_request():
try:
remote_connection = DriverRemoteConnection("ws://localhost:45940/gremlin", "g")
g = traversal().withRemote(remote_connection)
g.V().limit(1).toList()
remote_connection.close()
return True
except RuntimeError:
return False
def test_multithread(client):
try:
for i in range(10):
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(handle_request)
assert future.result()
except RuntimeError:
assert False
|
detect_drowsiness_im.py
|
from scipy.spatial import distance as dist
#from imutils.video import VideoStream
from imutils.video import WebcamVideoStream
from imutils import face_utils
from threading import Thread
import numpy as np
import playsound
import argparse
import imutils
import time
import dlib
import cv2
from socket import *
HOST = '127.0.0.1'
#HOST = '27.96.130.164'
#HOST = '192.168.0.11'
PORT = 1117
BUFSIZE = 1024
ADDR = (HOST,PORT)
client_socket = socket(AF_INET, SOCK_STREAM)
try:
client_socket.connect(ADDR)
print('client connection is success..')
except Exception as e:
print('connection error %s:%s'%ADDR)
def sound_alarm(path):
# play an alarm sound
playsound.playsound("./alarm.wav")
#playsound.playsound("/home/pi/cty/Drowsiness-detection/alarm.wav")
def sending(data):
message = 'Alert(DWS)'
message = message.encode('utf-8')
client_socket.send(message)
def eye_aspect_ratio(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
ap = argparse.ArgumentParser()
ap.add_argument('-p', "--shape-predictor", required = True,
help = "path to facial landmark predictor")
ap.add_argument('-a', "--alarm", type = str, default = "",
help = "path to alarm .wav file")
ap.add_argument('-w', "--webcam", type = str, default = 0,
help = "index of webcam on system")
args = vars(ap.parse_args())
EYE_AR_THRESH = 0.23
EYE_AR_CONSEC_FRAMES = 48
COUNTER = 0
ALARM_ON = False
print("[INFO] Loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
print("[INFO] Starting video stream thread...")
cam = WebcamVideoStream(src=0).start()
#cam = cv2.VideoCapture(0) #d
#cam.set(3, 320) #d
#cam.set(4, 240) #d
time.sleep(1.0)
# loop over frames from the video stream
while True:
frame = cam.read()
frame = imutils.resize(frame, width = 400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
# loop over the face detections
for rect in rects:
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
leftEye = shape[lStart:lEnd] #
rightEye = shape[rStart:rEnd] #
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
leftEyeHull = cv2.convexHull(leftEye) #
rightEyeHull = cv2.convexHull(rightEye) #
cv2.drawContours(frame, [leftEyeHull], -1, (66, 244, 197), 1) #
cv2.drawContours(frame, [rightEyeHull], -1, (66, 244, 197), 1) #
if ear < EYE_AR_THRESH:
COUNTER += 1
if COUNTER >= EYE_AR_CONSEC_FRAMES:
if not ALARM_ON:
ALARM_ON = True
if args["alarm"] != "":
t = Thread(target=sound_alarm,
args=(args["alarm"],))
t.daemon = True
t.start()
th = Thread(target=sending,
args=(args["alarm"],))
th.daemon = True
th.start()
cv2.putText(frame, "Drowsing!!", (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
'''
if COUNTER >= EYE_AR_CONSEC_FRAMES:
message = 'Alert(DWS)'
th = Thread(target=sending(message.encode('utf-8'),args=())
th.daemon = True
th.start()
'''
else:
COUNTER = 0
ALARM_ON = False
cv2.putText(frame, "EAR: {:.2f}".format(ear), (150, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
'''
getdata = client_socket.recv(BUFSIZE)
if getdata.decode('utf-8') == 'Webcam OFF':
cam.stop()
elif getdata.decode('utf-8') == 'Webcam ON':
cam.read()
'''
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
print('closing...')
cv2.destroyAllWindows()
cam.stop()
|
test_common.py
|
from __future__ import absolute_import, unicode_literals
import pytest
import socket
from amqp import RecoverableConnectionError
from case import ContextMock, Mock, patch
from kombu import common
from kombu.common import (
Broadcast, maybe_declare,
send_reply, collect_replies,
declaration_cached, ignore_errors,
QoS, PREFETCH_COUNT_MAX, generate_oid
)
from t.mocks import MockPool
def test_generate_oid():
from uuid import NAMESPACE_OID
from kombu.five import bytes_if_py2
instance = Mock()
args = (1, 1001, 2001, id(instance))
ent = bytes_if_py2('%x-%x-%x-%x' % args)
with patch('kombu.common.uuid3') as mock_uuid3, \
patch('kombu.common.uuid5') as mock_uuid5:
mock_uuid3.side_effect = ValueError
mock_uuid3.return_value = 'uuid3-6ba7b812-9dad-11d1-80b4'
mock_uuid5.return_value = 'uuid5-6ba7b812-9dad-11d1-80b4'
oid = generate_oid(1, 1001, 2001, instance)
mock_uuid5.assert_called_once_with(NAMESPACE_OID, ent)
assert oid == 'uuid5-6ba7b812-9dad-11d1-80b4'
def test_ignore_errors():
connection = Mock()
connection.channel_errors = (KeyError,)
connection.connection_errors = (KeyError,)
with ignore_errors(connection):
raise KeyError()
def raising():
raise KeyError()
ignore_errors(connection, raising)
connection.channel_errors = connection.connection_errors = ()
with pytest.raises(KeyError):
with ignore_errors(connection):
raise KeyError()
class test_declaration_cached:
def test_when_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['foo']
assert declaration_cached('foo', chan)
def test_when_not_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['bar']
assert not declaration_cached('foo', chan)
class test_Broadcast:
def test_arguments(self):
with patch('kombu.common.uuid',
return_value='test') as uuid_mock:
q = Broadcast(name='test_Broadcast')
uuid_mock.assert_called_with()
assert q.name == 'bcast.test'
assert q.alias == 'test_Broadcast'
assert q.auto_delete
assert q.exchange.name == 'test_Broadcast'
assert q.exchange.type == 'fanout'
q = Broadcast('test_Broadcast', 'explicit_queue_name')
assert q.name == 'explicit_queue_name'
assert q.exchange.name == 'test_Broadcast'
q2 = q(Mock())
assert q2.name == q.name
with patch('kombu.common.uuid',
return_value='test') as uuid_mock:
q = Broadcast('test_Broadcast',
'explicit_queue_name',
unique=True)
uuid_mock.assert_called_with()
assert q.name == 'explicit_queue_name.test'
q2 = q(Mock())
assert q2.name.split('.')[0] == q.name.split('.')[0]
class test_maybe_declare:
def _get_mock_channel(self):
# Given: A mock Channel with mock'd connection/client/entities
channel = Mock()
channel.connection.client.declared_entities = set()
return channel
def _get_mock_entity(self, is_bound=False, can_cache_declaration=True):
# Given: Unbound mock Entity (will bind to channel when bind called
entity = Mock()
entity.can_cache_declaration = can_cache_declaration
entity.is_bound = is_bound
def _bind_entity(channel):
entity.channel = channel
entity.is_bound = True
return entity
entity.bind = _bind_entity
return entity
def test_cacheable(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is already bound
entity = self._get_mock_entity(
is_bound=True, can_cache_declaration=True)
entity.channel = channel
entity.auto_delete = False
assert entity.is_bound, "Expected entity is bound to begin this test."
# When: Calling maybe_declare default
maybe_declare(entity, channel)
# Then: It called declare on the entity queue and added it to list
assert entity.declare.call_count == 1
assert hash(entity) in channel.connection.client.declared_entities
# When: Calling maybe_declare default (again)
maybe_declare(entity, channel)
# Then: we did not call declare again because its already in our list
assert entity.declare.call_count == 1
# When: Entity channel connection has gone away
entity.channel.connection = None
# Then: maybe_declare must raise a RecoverableConnectionError
with pytest.raises(RecoverableConnectionError):
maybe_declare(entity)
def test_binds_entities(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is not bound
entity = self._get_mock_entity()
assert not entity.is_bound, "Expected entity unbound to begin test."
# When: calling maybe_declare with default of no retry policy
maybe_declare(entity, channel)
# Then: the entity is now bound because it called to bind it
assert entity.is_bound is True, "Expected entity is now marked bound."
def test_binds_entities_when_retry_policy(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is not bound
entity = self._get_mock_entity()
assert not entity.is_bound, "Expected entity unbound to begin test."
# Given: A retry policy
sample_retry_policy = {
'interval_start': 0,
'interval_max': 1,
'max_retries': 3,
'interval_step': 0.2,
'errback': lambda x: "Called test errback retry policy",
}
# When: calling maybe_declare with retry enabled
maybe_declare(entity, channel, retry=True, **sample_retry_policy)
# Then: the entity is now bound because it called to bind it
assert entity.is_bound is True, "Expected entity is now marked bound."
def test_with_retry(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is already bound
entity = self._get_mock_entity(
is_bound=True, can_cache_declaration=True)
entity.channel = channel
assert entity.is_bound, "Expected entity is bound to begin this test."
# When calling maybe_declare with retry enabled (default policy)
maybe_declare(entity, channel, retry=True)
# Then: the connection client used ensure to ensure the retry policy
assert channel.connection.client.ensure.call_count
def test_with_retry_dropped_connection(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is already bound
entity = self._get_mock_entity(
is_bound=True, can_cache_declaration=True)
entity.channel = channel
assert entity.is_bound, "Expected entity is bound to begin this test."
# When: Entity channel connection has gone away
entity.channel.connection = None
# When: calling maybe_declare with retry
# Then: the RecoverableConnectionError should be raised
with pytest.raises(RecoverableConnectionError):
maybe_declare(entity, channel, retry=True)
class test_replies:
def test_send_reply(self):
req = Mock()
req.content_type = 'application/json'
req.content_encoding = 'binary'
req.properties = {'reply_to': 'hello',
'correlation_id': 'world'}
channel = Mock()
exchange = Mock()
exchange.is_bound = True
exchange.channel = channel
producer = Mock()
producer.channel = channel
producer.channel.connection.client.declared_entities = set()
send_reply(exchange, req, {'hello': 'world'}, producer)
assert producer.publish.call_count
args = producer.publish.call_args
assert args[0][0] == {'hello': 'world'}
assert args[1] == {
'exchange': exchange,
'routing_key': 'hello',
'correlation_id': 'world',
'serializer': 'json',
'retry': False,
'retry_policy': None,
'content_encoding': 'binary',
}
@patch('kombu.common.itermessages')
def test_collect_replies_with_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue, no_ack=False)
m = next(it)
assert m is body
itermessages.assert_called_with(conn, channel, queue, no_ack=False)
message.ack.assert_called_with()
with pytest.raises(StopIteration):
next(it)
channel.after_reply_message_received.assert_called_with(queue.name)
@patch('kombu.common.itermessages')
def test_collect_replies_no_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue)
m = next(it)
assert m is body
itermessages.assert_called_with(conn, channel, queue, no_ack=True)
message.ack.assert_not_called()
@patch('kombu.common.itermessages')
def test_collect_replies_no_replies(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
itermessages.return_value = []
it = collect_replies(conn, channel, queue)
with pytest.raises(StopIteration):
next(it)
channel.after_reply_message_received.assert_not_called()
class test_insured:
@patch('kombu.common.logger')
def test_ensure_errback(self, logger):
common._ensure_errback('foo', 30)
logger.error.assert_called()
def test_revive_connection(self):
on_revive = Mock()
channel = Mock()
common.revive_connection(Mock(), channel, on_revive)
on_revive.assert_called_with(channel)
common.revive_connection(Mock(), channel, None)
def get_insured_mocks(self, insured_returns=('works', 'ignored')):
conn = ContextMock()
pool = MockPool(conn)
fun = Mock()
insured = conn.autoretry.return_value = Mock()
insured.return_value = insured_returns
return conn, pool, fun, insured
def test_insured(self):
conn, pool, fun, insured = self.get_insured_mocks()
ret = common.insured(pool, fun, (2, 2), {'foo': 'bar'})
assert ret == 'works'
conn.ensure_connection.assert_called_with(
errback=common._ensure_errback,
)
insured.assert_called()
i_args, i_kwargs = insured.call_args
assert i_args == (2, 2)
assert i_kwargs == {'foo': 'bar', 'connection': conn}
conn.autoretry.assert_called()
ar_args, ar_kwargs = conn.autoretry.call_args
assert ar_args == (fun, conn.default_channel)
assert ar_kwargs.get('on_revive')
assert ar_kwargs.get('errback')
def test_insured_custom_errback(self):
conn, pool, fun, insured = self.get_insured_mocks()
custom_errback = Mock()
common.insured(pool, fun, (2, 2), {'foo': 'bar'},
errback=custom_errback)
conn.ensure_connection.assert_called_with(errback=custom_errback)
class MockConsumer(object):
consumers = set()
def __init__(self, channel, queues=None, callbacks=None, **kwargs):
self.channel = channel
self.queues = queues
self.callbacks = callbacks
def __enter__(self):
self.consumers.add(self)
return self
def __exit__(self, *exc_info):
self.consumers.discard(self)
class test_itermessages:
class MockConnection(object):
should_raise_timeout = False
def drain_events(self, **kwargs):
if self.should_raise_timeout:
raise socket.timeout()
for consumer in MockConsumer.consumers:
for callback in consumer.callbacks:
callback('body', 'message')
def test_default(self):
conn = self.MockConnection()
channel = Mock()
channel.connection.client = conn
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
ret = next(it)
assert ret == ('body', 'message')
with pytest.raises(StopIteration):
next(it)
def test_when_raises_socket_timeout(self):
conn = self.MockConnection()
conn.should_raise_timeout = True
channel = Mock()
channel.connection.client = conn
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
with pytest.raises(StopIteration):
next(it)
@patch('kombu.common.deque')
def test_when_raises_IndexError(self, deque):
deque_instance = deque.return_value = Mock()
deque_instance.popleft.side_effect = IndexError()
conn = self.MockConnection()
channel = Mock()
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
with pytest.raises(StopIteration):
next(it)
class test_QoS:
class _QoS(QoS):
def __init__(self, value):
self.value = value
QoS.__init__(self, None, value)
def set(self, value):
return value
def test_qos_exceeds_16bit(self):
with patch('kombu.common.logger') as logger:
callback = Mock()
qos = QoS(callback, 10)
qos.prev = 100
# cannot use 2 ** 32 because of a bug on macOS Py2.5:
# https://jira.mongodb.org/browse/PYTHON-389
qos.set(4294967296)
logger.warning.assert_called()
callback.assert_called_with(prefetch_count=0)
def test_qos_increment_decrement(self):
qos = self._QoS(10)
assert qos.increment_eventually() == 11
assert qos.increment_eventually(3) == 14
assert qos.increment_eventually(-30) == 14
assert qos.decrement_eventually(7) == 7
assert qos.decrement_eventually() == 6
def test_qos_disabled_increment_decrement(self):
qos = self._QoS(0)
assert qos.increment_eventually() == 0
assert qos.increment_eventually(3) == 0
assert qos.increment_eventually(-30) == 0
assert qos.decrement_eventually(7) == 0
assert qos.decrement_eventually() == 0
assert qos.decrement_eventually(10) == 0
def test_qos_thread_safe(self):
qos = self._QoS(10)
def add():
for i in range(1000):
qos.increment_eventually()
def sub():
for i in range(1000):
qos.decrement_eventually()
def threaded(funs):
from threading import Thread
threads = [Thread(target=fun) for fun in funs]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
threaded([add, add])
assert qos.value == 2010
qos.value = 1000
threaded([add, sub]) # n = 2
assert qos.value == 1000
def test_exceeds_short(self):
qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1)
qos.update()
assert qos.value == PREFETCH_COUNT_MAX - 1
qos.increment_eventually()
assert qos.value == PREFETCH_COUNT_MAX
qos.increment_eventually()
assert qos.value == PREFETCH_COUNT_MAX + 1
qos.decrement_eventually()
assert qos.value == PREFETCH_COUNT_MAX
qos.decrement_eventually()
assert qos.value == PREFETCH_COUNT_MAX - 1
def test_consumer_increment_decrement(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.update()
assert qos.value == 10
mconsumer.qos.assert_called_with(prefetch_count=10)
qos.decrement_eventually()
qos.update()
assert qos.value == 9
mconsumer.qos.assert_called_with(prefetch_count=9)
qos.decrement_eventually()
assert qos.value == 8
mconsumer.qos.assert_called_with(prefetch_count=9)
assert {'prefetch_count': 9} in mconsumer.qos.call_args
# Does not decrement 0 value
qos.value = 0
qos.decrement_eventually()
assert qos.value == 0
qos.increment_eventually()
assert qos.value == 0
def test_consumer_decrement_eventually(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.decrement_eventually()
assert qos.value == 9
qos.value = 0
qos.decrement_eventually()
assert qos.value == 0
def test_set(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.set(12)
assert qos.prev == 12
qos.set(qos.prev)
|
mavtest.py
|
import rospy
import glob
import json
import math
import os
import px4tools
import sys
from mavros import mavlink
from mavros_msgs.msg import Mavlink, Waypoint, WaypointReached, GlobalPositionTarget, State
from mavros_msgs.srv import CommandBool, SetMode, CommandTOL, WaypointPush, WaypointClear
from sensor_msgs.msg import NavSatFix
from geometry_msgs.msg import PoseStamped
from mavros_test_common import MavrosTestCommon
from pymavlink import mavutil
from threading import Thread
class mavrostest():
def state_callback(self, data):
self.state = data
def wp_reached_callback(self, data):
self.wp_reached = data
def global_pos_callback(self, data):
self.global_pos = data
def __init__(self):
rospy.init_node('test_node', anonymous=True)
self.state = State()
self.wp_reached = 0
self.global_pos = NavSatFix()
#SUBSCRIBERS
state_sub = rospy.Subscriber('mavros/state', State, self.state_callback)
#global_pos_sub = rospy.Subscriber('/mavros/global_position/global', State, self.state_callback)
local_pos_sub = rospy.Subscriber('/mavros/global_position/global', NavSatFix, self.global_pos_callback)
wp_reached_sub = rospy.Subscriber('/mavros/mission/reached', WaypointReached, self.state_callback)
#PUBLISHERS
local_pos_pub = rospy.Publisher('mavros/setpoint_position/local', PoseStamped, queue_size=10)
#global_pos_pub = rospy.Publisher('mavros/setpoint_position/global', GlobalPositionTarget, queue_size=10)
#SERVICES
arm_client = rospy.ServiceProxy('mavros/cmd/arming', CommandBool)
takeoff_client = rospy.ServiceProxy('mavros/cmd/takeoff', CommandTOL)
land_client = rospy.ServiceProxy('mavros/cmd/land', CommandTOL)
mode_client = rospy.ServiceProxy('mavros/set_mode', SetMode)
mission_push_client = rospy.ServiceProxy('mavros/mission/push', WaypointPush)
mission_clear_client = rospy.ServiceProxy('mavros/mission/clear', WaypointClear)
rate = rospy.Rate(20)
while (not self.state.connected):
print('Waiting on Connection')
rate.sleep()
print('Connected')
# need to simulate heartbeat to prevent datalink loss detection
hb_mav_msg = mavutil.mavlink.MAVLink_heartbeat_message(mavutil.mavlink.MAV_TYPE_GCS, 0, 0, 0, 0, 0)
hb_mav_msg.pack(mavutil.mavlink.MAVLink('', 2, 1))
hb_ros_msg = mavlink.convert_to_rosmsg(hb_mav_msg)
hb_thread = Thread(target=self.send_heartbeat, args=(hb_ros_msg))
last_request = rospy.Time.now()
# Disarm
ack = False
while (not ack):
try:
ack = arm_client(False).success
except rospy.ServiceException as e:
print("Disarming Failed: %s" %e)
rate.sleep()
print('Disarmed')
# Set Mode
mode = "AUTO.LOITER"
ack = False
while (not ack):
try:
ack = mode_client(0, "AUTO.LOITER").mode_sent # 0 is custom mode
except rospy.ServiceException as e:
print("Mode Change Failed: %s" %e)
rate.sleep()
print('Mode set to ', mode)
# Arm
ack = False
while (not ack):
try:
ack = arm_client(True).success
except rospy.ServiceException as e:
print("Arming Failed: %s" %e)
rate.sleep()
print('Armed')
#Clear any old missions
ack = False
while (not ack):
try:
ack = mission_clear_client().success
except rospy.ServiceException as e:
print("Mission Clear Failed: %s" %e)
rate.sleep()
print('old missions cleared')
#Create and execute Mission
home_lat = self.global_pos.latitude
home_long = self.global_pos.longitude
waypoints = []
takeoff = Waypoint()
takeoff.frame = 3
takeoff.command = mavutil.mavlink.MAV_CMD_NAV_TAKEOFF
takeoff.is_current = True
takeoff.autocontinue = True
takeoff.param1 = 0.0
takeoff.param2 = 0.0
takeoff.param3 = 0.3
takeoff.param4 = 0.0
takeoff.x_lat = home_lat
takeoff.y_long = home_long
takeoff.z_alt = 8.0
wp1 = Waypoint()
wp1.frame = 3
wp1.command = mavutil.mavlink.MAV_CMD_NAV_WAYPOINT
wp1.is_current = True
wp1.autocontinue = True
wp1.param1 = 0.0
wp1.param2 = 0.0
wp1.param3 = 0.3
wp1.param4 = 0.0
wp1.x_lat = home_lat + 0.00005
wp1.y_long = home_long
wp1.z_alt = 8.0
waypoints.append(wp1)
rtl = Waypoint()
rtl.frame = 3
rtl.command = 20
rtl.is_current = True
rtl.autocontinue = True
rtl.param1 = 0.0
rtl.param2 = 0.0
rtl.param3 = 0.0
rtl.param4 = 0.0
rtl.x_lat = 0.0
rtl.y_long = 0.0
rtl.z_alt = 0.0
waypoints.append(rtl)
ack = False
while (not ack):
try:
ack = mission_push_client(start_index=0, waypoints=waypoints).success
except rospy.ServiceException as e:
print("Mission Push Failed: %s" %e)
rate.sleep()
print('Mission Loaded')
# Set Mode
mode = "AUTO.MISSION"
ack = False
while (not ack):
try:
ack = mode_client(5, mode).mode_sent # 0 is custom mode
except rospy.ServiceException as e:
print("Mode Change Failed: %s" %e)
rate.sleep()
print('Beginning Mission')
while (self.wp_reached != 3):
rate.sleep()
# print "\nTaking off"
# try:
# response = takeoff_client(altitude=10, latitude=0, longitude=0, min_pitch=0, yaw=0)
# rospy.loginfo(response)
# except rospy.ServiceException as e:
# print("Takeoff failed: %s" %e)
#
# Helper methods
#
def send_heartbeat(self, hb_ros_msg):
rate = rospy.Rate(2) # Hz
while not rospy.is_shutdown():
self.mavlink_pub.publish(hb_ros_msg)
try: # prevent garbage in console output when thread is killed
rate.sleep()
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
mavrostest()
|
main.py
|
#!/usr/bin/env python
# flake8: noqa: E402
import argparse
import grp
import logging
import os
import pwd
import signal
import sys
import time
from pathlib import Path
from typing import Tuple, Optional
import daemon
import psutil
from daemon.daemon import change_process_owner
from pid import PidFile, PidFileError
logger = logging.getLogger(__name__)
sys.path.append(str(Path(__file__).resolve().parents[2]))
from irrd import __version__, ENV_MAIN_PROCESS_PID
from irrd.conf import config_init, CONFIG_PATH_DEFAULT, get_setting, get_configuration
from irrd.mirroring.scheduler import MirrorScheduler
from irrd.server.http.server import run_http_server
from irrd.server.whois.server import start_whois_server
from irrd.storage.preload import PreloadStoreManager
from irrd.utils.process_support import ExceptionLoggingProcess
# This file does not have a unit test, but is instead tested through
# the integration tests. Writing a unit test would be too complex.
def main():
description = """IRRd main process"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--config', dest='config_file_path', type=str,
help=f'use a different IRRd config file (default: {CONFIG_PATH_DEFAULT})')
parser.add_argument('--foreground', dest='foreground', action='store_true',
help=f"run IRRd in the foreground, don't detach")
args = parser.parse_args()
mirror_frequency = int(os.environ.get('IRRD_SCHEDULER_TIMER_OVERRIDE', 15))
daemon_kwargs = {
'umask': 0o022,
}
if args.foreground:
daemon_kwargs['detach_process'] = False
daemon_kwargs['stdout'] = sys.stdout
daemon_kwargs['stderr'] = sys.stderr
# config_init with commit may only be called within DaemonContext,
# but this call here causes fast failure for most misconfigurations
config_init(args.config_file_path, commit=False)
if not any([
get_configuration().user_config_staging.get('log.logfile_path'),
get_configuration().user_config_staging.get('log.logging_config_path'),
args.foreground,
]):
logging.critical('Unable to start: when not running in the foreground, you must set '
'either log.logfile_path or log.logging_config_path in the settings')
return
with daemon.DaemonContext(**daemon_kwargs):
config_init(args.config_file_path)
uid, gid = get_configured_owner()
# Running as root is permitted on CI
if not os.environ.get('CI') and not uid and os.geteuid() == 0:
logging.critical('Unable to start: user and group must be defined in settings '
'when starting IRRd as root')
return
piddir = get_setting('piddir')
logger.info('IRRd attempting to secure PID')
try:
with PidFile(pidname='irrd', piddir=piddir):
logger.info(f'IRRd {__version__} starting, PID {os.getpid()}, PID file in {piddir}')
run_irrd(mirror_frequency=mirror_frequency,
config_file_path=args.config_file_path if args.config_file_path else CONFIG_PATH_DEFAULT,
uid=uid,
gid=gid,
)
except PidFileError as pfe:
logger.error(f'Failed to start IRRd, unable to lock PID file irrd.pid in {piddir}: {pfe}')
except Exception as e:
logger.error(f'Error occurred in main process, terminating. Error follows:')
logger.exception(e)
os.kill(os.getpid(), signal.SIGTERM)
def run_irrd(mirror_frequency: int, config_file_path: str, uid: Optional[int], gid: Optional[int]):
terminated = False
os.environ[ENV_MAIN_PROCESS_PID] = str(os.getpid())
whois_process = ExceptionLoggingProcess(
target=start_whois_server,
name='irrd-whois-server-listener',
kwargs={'uid': uid, 'gid': gid}
)
whois_process.start()
if uid and gid:
change_process_owner(uid=uid, gid=gid)
mirror_scheduler = MirrorScheduler()
preload_manager = PreloadStoreManager(name='irrd-preload-store-manager')
preload_manager.start()
uvicorn_process = ExceptionLoggingProcess(target=run_http_server, name='irrd-http-server-listener', args=(config_file_path, ))
uvicorn_process.start()
def sighup_handler(signum, frame):
# On SIGHUP, check if the configuration is valid and reload in
# this process, and if it is valid, signal SIGHUP to all
# child processes.
if get_configuration().reload():
parent = psutil.Process(os.getpid())
children = parent.children(recursive=True)
for process in children:
process.send_signal(signal.SIGHUP)
if children:
logging.info('Main process received SIGHUP with valid config, sent SIGHUP to '
f'child processes {[c.pid for c in children]}')
signal.signal(signal.SIGHUP, sighup_handler)
def sigterm_handler(signum, frame):
mirror_scheduler.terminate_children()
parent = psutil.Process(os.getpid())
children = parent.children(recursive=True)
for process in children:
try:
process.send_signal(signal.SIGTERM)
except Exception:
# If we can't SIGTERM some of the processes,
# do the best we can.
pass
if children:
logging.info('Main process received SIGTERM, sent SIGTERM to '
f'child processes {[c.pid for c in children]}')
nonlocal terminated
terminated = True
signal.signal(signal.SIGTERM, sigterm_handler)
sleeps = mirror_frequency
while not terminated:
# This loops every second to prevent long blocking on SIGTERM.
mirror_scheduler.update_process_state()
if sleeps >= mirror_frequency:
mirror_scheduler.run()
sleeps = 0
time.sleep(1)
sleeps += 1
logging.debug(f'Main process waiting for child processes to terminate')
for child_process in whois_process, uvicorn_process, preload_manager:
child_process.join(timeout=3)
parent = psutil.Process(os.getpid())
children = parent.children(recursive=True)
for process in children:
try:
process.send_signal(signal.SIGKILL)
except Exception:
pass
if children:
logging.info('Some processes left alive after SIGTERM, send SIGKILL to '
f'child processes {[c.pid for c in children]}')
logging.info(f'Main process exiting')
def get_configured_owner() -> Tuple[int, int]:
uid = gid = None
user = get_setting('user')
group = get_setting('group')
if user and group:
uid = pwd.getpwnam(user).pw_uid
gid = grp.getgrnam(group).gr_gid
return uid, gid
if __name__ == '__main__': # pragma: no cover
main()
|
gsi_index_partitioning.py
|
import copy
import json
import threading
import time
from .base_gsi import BaseSecondaryIndexingTests
from membase.api.rest_client import RestConnection, RestHelper
import random
from lib import testconstants
from lib.Cb_constants.CBServer import CbServer
from lib.couchbase_helper.tuq_generators import TuqGenerators
from lib.memcached.helper.data_helper import MemcachedClientHelper
from lib.remote.remote_util import RemoteMachineShellConnection
from threading import Thread
from pytests.query_tests_helper import QueryHelperTests
from couchbase_helper.documentgenerator import JsonDocGenerator
from couchbase_helper.cluster import Cluster
from .gsi_replica_indexes import GSIReplicaIndexesTests
from lib.membase.helper.cluster_helper import ClusterOperationHelper
class GSIIndexPartitioningTests(GSIReplicaIndexesTests):
def setUp(self):
super(GSIIndexPartitioningTests, self).setUp()
self.num_items = self.input.param("items", 5000)
self.log.info("No. of items: {0}".format(str(self.num_items)))
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
self.rest = RestConnection(self.index_servers[0])
self.node_list = []
for server in self.index_servers:
self.node_list.append(server.ip + ":" + server.port)
self.num_queries = self.input.param("num_queries", 100)
self.num_index_partitions = self.input.param("num_index_partitions", 8)
self.recover_failed_node = self.input.param("recover_failed_node",
False)
self.op_type = self.input.param("op_type", "create")
self.node_operation = self.input.param("node_op", "reboot")
self.implicit_use_index = self.input.param("implicit_use_index", False)
self.use_replica_index = self.input.param("use_replica_index", False)
self.failover_index = self.input.param("failover_index", False)
self.index_partitioned = self.input.param('index_partitioned', False)
def tearDown(self):
super(GSIIndexPartitioningTests, self).tearDown()
'''Test that checks if hte last_known_scan_time stat is being set properly
- Test explicitly calling a specific index to see if it is updated
- Test implicitly calling a specific index to see if it is updated
- Test if the stat persists after an indexer crash'''
def test_index_last_query_stat(self):
index_node = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=False)
rest = RestConnection(index_node)
doc = {"indexer.statsPersistenceInterval": 60}
rest.set_index_settings_internal(doc)
shell = RemoteMachineShellConnection(index_node)
output1, error1 = shell.execute_command("killall -9 indexer")
self.sleep(30)
if self.index_partitioned:
create_index_query = "CREATE INDEX idx on default(age) partition by hash(name) USING GSI"
else:
create_index_query = "CREATE INDEX idx ON default(age) USING GSI"
create_index_query2 = "CREATE INDEX idx2 ON default(name) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.wait_until_indexes_online()
indexer_nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
self.assertTrue(indexer_nodes, "There are no indexer nodes in the cluster!")
# Ensure last_known_scan_time starts at default value
for node in indexer_nodes:
rest = RestConnection(node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
self.assertEqual(indexes['default'][index]['last_known_scan_time'], 0)
else:
continue
# Implicitly or Explicitly use the index in question
if self.implicit_use_index:
use_index_query = 'select * from default where age > 50'
else:
use_index_query = 'select * from default USE INDEX (idx using GSI) where age > 50'
self.n1ql_helper.run_cbq_query(query=use_index_query, server= self.n1ql_node)
current_time = int(time.time())
self.log.info(current_time)
used_index = 'idx'
for index_node in indexer_nodes:
rest = RestConnection(index_node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
if index == used_index:
self.log.info(int(str(indexes['default'][index]['last_known_scan_time'])[:10]))
self.assertTrue(current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 60, 'The timestamp is more than a minute off')
if self.failover_index:
self.sleep(60)
shell = RemoteMachineShellConnection(index_node)
output1, error1 = shell.execute_command("killall -9 indexer")
self.sleep(30)
break
else:
self.assertTrue(indexes['default'][index]['last_known_scan_time'] == 0)
else:
continue
if self.failover_index:
for index_node in indexer_nodes:
rest = RestConnection(index_node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
if index == used_index:
self.log.info(int(str(indexes['default'][index]['last_known_scan_time'])[:10]))
self.assertTrue(
current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 180,
'The timestamp is more than a minute off')
else:
self.assertTrue(indexes['default'][index]['last_known_scan_time'] == 0)
else:
continue
'''Same as the test above for partitioned indexes'''
def test_index_last_query_stat_partitioned(self):
create_index_query = "CREATE INDEX idx on default(age) partition by hash(name) USING GSI"
create_index_query2 = "CREATE INDEX idx2 ON default(name) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.wait_until_indexes_online()
indexer_nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
self.assertTrue(indexer_nodes, "There are no indexer nodes in the cluster!")
# Ensure last_known_scan_time starts at default value
for node in indexer_nodes:
rest = RestConnection(node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
self.assertEqual(indexes['default'][index]['last_known_scan_time'], 0)
else:
continue
# Implicitly or Explicitly use the index in question
if self.implicit_use_index:
use_index_query = 'select * from default where age > 50'
else:
use_index_query = 'select * from default USE INDEX (idx using GSI) where age > 50'
self.n1ql_helper.run_cbq_query(query=use_index_query, server= self.n1ql_node)
current_time = int(time.time())
self.log.info(current_time)
used_index = 'idx'
for index_node in indexer_nodes:
rest = RestConnection(index_node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
if index == used_index:
self.assertTrue(current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 60, 'The timestamp is more than a minute off')
else:
self.assertTrue(indexes['default'][index]['last_known_scan_time'] == 0)
else:
continue
'''Test that equivalent indexes/replicas are being updated properly, if you specifically use an index any of
its equivalent indexes can be used, however both should not be used'''
def test_index_last_query_stat_equivalent_indexes(self):
if not self.use_replica_index:
create_index_query = "CREATE INDEX idx ON default(age) USING GSI"
create_index_query2 = "CREATE INDEX idx2 ON default(name) USING GSI"
create_index_query3 = "CREATE INDEX idx3 ON default(age) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query3,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
else:
create_index_query = "CREATE INDEX idx ON default(age) USING GSI WITH {'num_replica': 1};"
create_index_query2 = "CREATE INDEX idx2 ON default(name) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.wait_until_indexes_online()
indexer_nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
self.assertTrue(indexer_nodes, "There are no indexer nodes in the cluster!")
# Ensure last_known_scan_time starts at default value
for node in indexer_nodes:
rest = RestConnection(node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
self.assertEqual(indexes['default'][index]['last_known_scan_time'], 0)
else:
continue
use_index_query = 'select * from default USE INDEX (idx using GSI) where age > 50'
self.n1ql_helper.run_cbq_query(query=use_index_query, server= self.n1ql_node)
current_time = int(time.time())
self.log.info(current_time)
check_idx = False
check_idx3 = False
for node in indexer_nodes:
rest = RestConnection(node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
if self.use_replica_index:
if index == 'idx':
if current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 60:
check_idx = True
elif index == 'idx (replica 1)':
if current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 60:
check_idx3 = True
else:
self.assertTrue(indexes['default'][index]['last_known_scan_time'] == 0)
else:
if index == 'idx':
if current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 60:
check_idx = True
elif index == 'idx3':
if current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 60:
check_idx3 = True
else:
self.assertTrue(indexes['default'][index]['last_known_scan_time'] == 0)
else:
continue
# One or the other should have been used, not both
self.assertTrue(check_idx or check_idx3)
self.assertFalse(check_idx and check_idx3)
'''Run a query that uses two different indexes at once and make sure both are properly updated'''
def test_index_last_query_multiple_indexes(self):
create_index_query = "CREATE INDEX idx ON default(age) USING GSI"
create_index_query2 = "CREATE INDEX idx2 ON default(name) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.wait_until_indexes_online()
indexer_nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
self.assertTrue(indexer_nodes, "There are no indexer nodes in the cluster!")
# Ensure last_known_scan_time starts at default value
for node in indexer_nodes:
rest = RestConnection(node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
self.assertEqual(indexes['default'][index]['last_known_scan_time'], 0)
else:
continue
# Construct a query that uses both created indexes and ensure they both have a last used timestamp
use_index_query = 'select * from default where age > 50 and name = "Caryssa"'
self.n1ql_helper.run_cbq_query(query=use_index_query, server= self.n1ql_node)
current_time = int(time.time())
self.log.info(current_time)
# All indexes that were created should be used
for node in indexer_nodes:
rest = RestConnection(node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
self.assertTrue(current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 60, 'The timestamp is more than a minute off')
else:
continue
'''Make sure that two indexes with the same name on two different buckets does not cause an incorrect update of stat'''
def test_index_last_query_stat_multiple_buckets(self):
create_index_query = "CREATE INDEX idx ON default(age) USING GSI"
create_index_query2 = "CREATE INDEX idx ON standard_bucket0(age) USING GSI"
create_index_query3 = "CREATE INDEX idx2 ON default(name) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query3,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.wait_until_indexes_online()
indexer_nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
self.assertTrue(indexer_nodes, "There are no indexer nodes in the cluster!")
# Ensure last_known_scan_time starts at default value
for node in indexer_nodes:
rest = RestConnection(node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
self.fail_if_no_buckets()
for bucket in self.buckets:
if bucket.name in indexes:
for index in indexes[bucket.name]:
self.assertEqual(indexes[bucket.name][index]['last_known_scan_time'], 0)
else:
continue
# Implicitly or Explicitly use the index in question
if self.implicit_use_index:
use_index_query = 'select * from default where age > 50'
else:
use_index_query = 'select * from default USE INDEX (idx using GSI) where age > 50'
self.n1ql_helper.run_cbq_query(query=use_index_query, server= self.n1ql_node)
current_time = int(time.time())
self.log.info(current_time)
used_index = 'idx'
used_bucket = 'default'
for node in indexer_nodes:
rest = RestConnection(node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
self.fail_if_no_buckets()
for bucket in self.buckets:
if bucket.name in indexes:
for index in indexes[bucket.name]:
if index == used_index and used_bucket == bucket.name:
self.assertTrue(current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 60, 'The timestamp is more than a minute off')
else:
self.assertTrue(indexes[bucket.name][index]['last_known_scan_time'] == 0)
else:
continue
# Test that generates n number of create index statements with various permutations and combinations
# of different clauses used in the create index statement.
def test_create_partitioned_indexes(self):
self._load_emp_dataset(end=self.num_items)
create_index_queries = self.generate_random_create_index_statements(
bucketname=self.buckets[0].name, idx_node_list=self.node_list,
num_statements=self.num_queries)
failed_index_creation = 0
for create_index_query in create_index_queries:
try:
self.n1ql_helper.run_cbq_query(
query=create_index_query["index_definition"],
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(10)
index_metadata = self.rest.get_indexer_metadata()
index_map = self.get_index_map()
if index_metadata:
status = self.validate_partitioned_indexes(create_index_query,
index_map,
index_metadata)
if not status:
failed_index_creation += 1
self.log.info(
"** Following query failed validation : {0}".format(
create_index_query["index_definition"]))
else:
failed_index_creation += 1
self.log.info(
"** Following index did not get created : {0}".format(
create_index_query["index_definition"]))
self.log.info("output from /getIndexStatus")
self.log.info(index_metadata)
self.log.info("Index Map")
self.log.info(index_map)
drop_index_query = "DROP INDEX default.{0}".format(
create_index_query["index_name"])
try:
self.n1ql_helper.run_cbq_query(
query=drop_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.log.info(
"Total Create Index Statements Run: {0}, Passed : {1}, Failed : {2}".format(
self.num_queries, self.num_queries - failed_index_creation,
failed_index_creation))
self.assertTrue(failed_index_creation == 0,
"Some create index statements failed validations. Pls see the test log above for details.")
def test_partition_index_with_excluded_nodes(self):
self._load_emp_dataset(end=self.num_items)
# Setting to exclude a node for planner
self.rest.set_index_planner_settings("excludeNode=in")
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name)"
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
# Validate index created and check the hosts on which partitions are hosted.
expected_hosts = self.node_list[1:]
expected_hosts.sort()
validated = False
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
for index in index_metadata["status"]:
if index["name"] == "idx1":
self.log.info("Expected Hosts : {0}".format(expected_hosts))
self.log.info("Actual Hosts : {0}".format(index["hosts"]))
self.assertNotIn(self.node_list[0], index["hosts"],
"Planner did not ignore excluded node during index creation")
#self.assertEqual(index["hosts"], expected_hosts,
# "Planner did not ignore excluded node during index creation")
validated = True
if not validated:
self.fail("Looks like index was not created.")
def test_replica_partition_index_with_excluded_nodes(self):
self._load_emp_dataset(end=self.num_items)
# Setting to exclude a node for planner
self.rest.set_index_planner_settings("excludeNode=in")
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}}}".format(
self.num_index_replicas)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
index_names = []
index_names.append("idx1")
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
# Need to see if the indexes get created in the first place
# Validate index created and check the hosts on which partitions are hosted.
expected_hosts = self.node_list[1:]
expected_hosts.sort()
validated = False
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
index_validated = 0
for index_name in index_names:
for index in index_metadata["status"]:
if index["name"] == index_name:
self.log.info("Expected Hosts : {0}".format(expected_hosts))
self.log.info("Actual Hosts : {0}".format(index["hosts"]))
self.assertEqual(index["hosts"], expected_hosts,
"Planner did not ignore excluded node during index creation for {0}".format(
index_name))
index_validated += 1
self.assertEqual(index_validated, (self.num_index_replicas + 1),
"All index replicas not created")
def test_partition_index_by_non_indexed_field(self):
self._load_emp_dataset(end=self.num_items)
create_index_statement = "CREATE INDEX idx1 on default(name,dept) partition by hash(salary) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = "idx1"
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
def test_default_num_partitions(self):
self._load_emp_dataset(end=self.num_items)
self.rest.set_index_settings(
{"indexer.numPartitions": 6})
create_index_statement = "CREATE INDEX idx1 on default(name,dept) partition by hash(salary) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = "idx1"
index_details["num_partitions"] = 6
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
def test_change_default_num_partitions_after_create_index(self):
self._load_emp_dataset(end=self.num_items)
self.rest.set_index_settings(
{"indexer.numPartitions": 16})
create_index_statement = "CREATE INDEX idx1 on default(name,dept) partition by hash(salary) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = "idx1"
index_details["num_partitions"] = 16
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
self.rest.set_index_settings(
{"indexer.numPartitions": 32})
create_index_statement = "CREATE INDEX idx2 on default(namesalary) partition by hash(salary) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = "idx2"
index_details["num_partitions"] = 32
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
# Validate num_partitions for idx1 doesnt change
index_details = {}
index_details["index_name"] = "idx1"
index_details["num_partitions"] = 16
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Num partitions for existing indexes changed after updating default value")
def test_default_num_partitions_negative(self):
self._load_emp_dataset(end=self.num_items)
self.rest.set_index_settings(
{"indexer.numPartitions": 8})
numpartition_values_str = ["abc", "2018-03-04 18:02:37"]
numpartition_values_num = [0, -5, 46.6789]
for value in numpartition_values_str:
indexname = "index_" + str(random.randint(1, 100))
try:
self.rest.set_index_settings(
{"indexer.numPartitions": '{0}'.format(value)})
create_index_statement = "CREATE INDEX {0} on default(name,dept) partition by hash(salary) USING GSI".format(
indexname)
self.n1ql_helper.run_cbq_query(query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = indexname
if (not isinstance(value, str)) and int(value) > 0:
index_details["num_partitions"] = int(value)
else:
index_details["num_partitions"] = 8
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
for value in numpartition_values_num:
indexname = "index_" + str(random.randint(101, 200))
try:
self.rest.set_index_settings(
{"indexer.numPartitions": value})
create_index_statement = "CREATE INDEX {0} on default(name,dept) partition by hash(salary) USING GSI".format(
indexname)
self.n1ql_helper.run_cbq_query(query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = indexname
if (not isinstance(value, str)) and int(value) > 0:
index_details["num_partitions"] = int(value)
else:
index_details["num_partitions"] = 8
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
def test_numpartitions_negative(self):
self._load_emp_dataset(end=self.num_items)
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_partition':null}}"
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
numpartition_values_str = ["abc", "2018-03-04 18:02:37"]
for value in numpartition_values_str:
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_partition':'{0}'}}".format(
value)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
else:
self.fail(
"Index got created with an invalid num_partition value : {0}".format(
value))
numpartition_values_num = [0, -5]
for value in numpartition_values_num:
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_partition':{0}}}".format(
value)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
else:
self.fail(
"Index got created with an invalid num_partition value : {0}".format(
value))
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {'num_partition':47.6789}"
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"Index did not get created with an double value for num_partition value : 47.6789")
else:
self.log.info("Index got created successfully with num_partition being a double value : 47.6789")
def test_partitioned_index_with_replica(self):
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
self.assertTrue(self.validate_partition_map(index_metadata, "idx1",
self.num_index_replicas,
self.num_index_partitions),
"Partition map validation failed")
def test_partitioned_index_with_replica_with_server_groups(self):
self._load_emp_dataset(end=self.num_items)
self._create_server_groups()
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}}}".format(
self.num_index_replicas)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
index_metadata = self.rest.get_indexer_metadata()
index_hosts_list = []
for index in index_metadata["status"]:
index_hosts_list.append(index["hosts"])
self.log.info("Index Host List : {0}".format(index_hosts_list))
# Need to change the validation logic here. Between index and its replicas, they should have a full set of partitions in both the server groups.
# idx11 - .101, .102: 3, 4, 5, 10, 11, 15, 16
# idx11 - .103, .104: 1, 2, 6, 7, 8, 9, 12, 13, 14
# idx12 - .101, .102: 1, 2, 6, 7, 8, 9, 12, 13, 14
# idx12 - .103, .104: 3, 4, 5, 10, 11, 15, 16
validation = True
for i in range(0, len(index_hosts_list)):
for j in range(i + 1, len(index_hosts_list)):
if (index_hosts_list[i].sort() != index_hosts_list[j].sort()):
continue
else:
validation &= False
self.assertTrue(validation,
"Partitions of replica indexes do not honour server grouping")
def test_create_partitioned_index_one_node_already_down(self):
self._load_emp_dataset(end=self.num_items)
node_out = self.servers[self.node_out]
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init],
[node_out],
self.graceful, wait_for_pending=60)
failover_task.result()
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name)"
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("Failed to create index with one node failed")
if node_out == self.index_servers[0]:
rest = RestConnection(self.index_servers[1])
else:
rest = self.rest
index_metadata = rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
hosts = index_metadata["status"][0]["hosts"]
self.log.info("Actual nodes : {0}".format(hosts))
node_out_str = node_out.ip + ":" + node_out.port
self.assertTrue(node_out_str not in hosts,
"Partitioned index not created on expected hosts")
def test_create_partitioned_index_one_node_network_partitioned(self):
self._load_emp_dataset(end=self.num_items)
node_out = self.servers[self.node_out]
self.start_firewall_on_node(node_out)
self.sleep(10)
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name)"
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("Failed to create index with one node failed")
finally:
# Heal network partition and wait for some time to allow indexes
# to get built automatically on that node
self.stop_firewall_on_node(node_out)
self.sleep(120)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
hosts = index_metadata["status"][0]["hosts"]
self.log.info("Actual nodes : {0}".format(hosts))
node_out_str = node_out.ip + ":" + node_out.port
self.assertTrue(node_out_str not in hosts,
"Partitioned index not created on expected hosts")
def test_node_fails_during_create_partitioned_index(self):
self._load_emp_dataset(end=self.num_items)
node_out = self.servers[self.node_out]
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name)"
threads = []
threads.append(
Thread(target=self.n1ql_helper.run_cbq_query, name="run_query",
args=(create_index_statement, 10, self.n1ql_node)))
threads.append(
Thread(target=self.cluster.failover, name="failover", args=(
self.servers[:self.nodes_init], [node_out], self.graceful,
False, 60)))
for thread in threads:
thread.start()
self.sleep(5)
for thread in threads:
thread.join()
self.sleep(30)
if node_out == self.index_servers[0]:
rest = RestConnection(self.index_servers[1])
else:
rest = self.rest
index_metadata = rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
def test_node_nw_partitioned_during_create_partitioned_index(self):
self._load_emp_dataset(end=self.num_items)
node_out = self.servers[self.node_out]
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name)"
threads = []
threads.append(
Thread(target=self.start_firewall_on_node,
name="network_partitioning", args=(node_out,)))
threads.append(
Thread(target=self.n1ql_helper.run_cbq_query, name="run_query",
args=(create_index_statement, 10, self.n1ql_node)))
for thread in threads:
thread.start()
self.sleep(5)
for thread in threads:
thread.join()
self.sleep(10)
try:
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
if index_metadata != {}:
hosts = index_metadata["status"][0]["hosts"]
self.log.info("Actual nodes : {0}".format(hosts))
node_out_str = node_out.ip + ":" + node_out.port
self.assertTrue(node_out_str not in hosts,
"Partitioned index not created on expected hosts")
else:
self.log.info(
"Cannot retrieve index metadata since one node is down")
except Exception as ex:
self.log.info(str(ex))
finally:
self.stop_firewall_on_node(node_out)
self.sleep(30)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
hosts = index_metadata["status"][0]["hosts"]
node_out_str = node_out.ip + ":" + node_out.port
self.assertTrue(node_out_str in hosts,
"Partitioned index not created on all hosts")
def test_node_nw_partitioned_during_create_partitioned_index_with_node_list(
self):
self._load_emp_dataset(end=self.num_items)
node_out = self.servers[self.node_out]
node_list_str = "[\"" + "\",\"".join(self.node_list) + "\"]"
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'nodes' : {0}}}".format(
node_list_str)
threads = []
threads.append(
Thread(target=self.start_firewall_on_node,
name="network_partitioning", args=(node_out,)))
threads.append(
Thread(target=self.n1ql_helper.run_cbq_query, name="run_query",
args=(create_index_statement, 10, self.n1ql_node)))
for thread in threads:
thread.start()
self.sleep(5)
for thread in threads:
thread.join()
self.sleep(10)
try:
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
if index_metadata != {}:
hosts = index_metadata["status"][0]["hosts"]
self.log.info("Actual nodes : {0}".format(hosts))
node_out_str = node_out.ip + ":" + node_out.port
self.assertTrue(node_out_str not in hosts,
"Partitioned index not created on expected hosts")
else:
self.log.info(
"Cannot retrieve index metadata since one node is down")
except Exception as ex:
self.log.info(str(ex))
finally:
self.stop_firewall_on_node(node_out)
self.sleep(30)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
hosts = index_metadata["status"][0]["hosts"]
node_out_str = node_out.ip + ":" + node_out.port
self.assertTrue(node_out_str in hosts,
"Partitioned index not created on all hosts")
def test_build_partitioned_index(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
if self.num_index_replicas > 0:
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI WITH {{'num_partition': {0}, 'defer_build': true, 'num_replica':{1}}};".format(
self.num_index_partitions, self.num_index_replicas)
else:
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI WITH {{'num_partition': {0}, 'defer_build': true}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = True
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
# Validation for replica indexes
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_details[
"index_name"] = index_name_prefix + " (replica {0})".format(
str(i))
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
build_index_query = "BUILD INDEX on `default`(" + index_name_prefix + ")"
try:
self.n1ql_helper.run_cbq_query(query=build_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index building failed with error : {0}".format(str(ex)))
self.sleep(30)
index_map = self.get_index_map()
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata After Build:")
self.log.info(index_metadata)
index_details["index_name"] = index_name_prefix
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
# Validation for replica indexes
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_details[
"index_name"] = index_name_prefix + " (replica {0})".format(
str(i))
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
def test_build_partitioned_index_one_failed_node(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
node_list_str = "[\"" + "\",\"".join(self.node_list) + "\"]"
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI WITH {{'num_partition': {0}, 'nodes': {1}, 'defer_build': true}};".format(
self.num_index_partitions, node_list_str)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = True
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
node_out = self.servers[self.node_out]
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init],
[node_out],
self.graceful, wait_for_pending=180)
failover_task.result()
build_index_query = "BUILD INDEX on `default`(" + index_name_prefix + ")"
try:
self.n1ql_helper.run_cbq_query(query=build_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index building failed with error : {0}".format(str(ex)))
self.sleep(30)
index_map = self.get_index_map()
if node_out == self.index_servers[0]:
rest = RestConnection(self.index_servers[1])
else:
rest = self.rest
index_metadata = rest.get_indexer_metadata()
self.log.info("Indexer Metadata After Build:")
self.log.info(index_metadata)
index_details["defer_build"] = False
# At this point, since one node is in a failed state, all partitions would not be built.
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata, skip_numpartitions_check=True),
"Deferred Partitioned index created not as expected")
# Recover the failed node and check if after recovery, all partitions are built.
if self.recover_failed_node:
nodes_all = self.rest.node_statuses()
for node in nodes_all:
if node.ip == node_out.ip:
break
self.rest.set_recovery_type(node.id, self.recovery_type)
self.rest.add_back_node(node.id)
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init],
[], [])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached,
"rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(180)
index_map = self.get_index_map()
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata After Build:")
self.log.info(index_metadata)
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
def test_failover_during_build_partitioned_index(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
node_list_str = "[\"" + "\",\"".join(self.node_list) + "\"]"
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI WITH {{'num_partition': {0}, 'nodes': {1}, 'defer_build': true}};".format(
self.num_index_partitions, node_list_str)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = True
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
node_out = self.servers[self.node_out]
build_index_query = "BUILD INDEX on `default`(" + index_name_prefix + ")"
threads = []
threads.append(
Thread(target=self.n1ql_helper.run_cbq_query, name="run_query",
args=(build_index_query, 10, self.n1ql_node)))
threads.append(
Thread(target=self.cluster.async_failover, name="failover", args=(
self.servers[:self.nodes_init], [node_out], self.graceful)))
for thread in threads:
thread.start()
thread.join()
self.sleep(30)
index_map = self.get_index_map()
if node_out == self.index_servers[0]:
rest = RestConnection(self.index_servers[1])
else:
rest = self.rest
index_metadata = rest.get_indexer_metadata()
self.log.info("Indexer Metadata After Build:")
self.log.info(index_metadata)
index_details["defer_build"] = False
# At this point, since one node is in a failed state, all partitions would not be built.
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata, skip_numpartitions_check=True),
"Deferred Partitioned index created not as expected")
def test_build_partitioned_index_with_network_partitioning(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
node_list_str = "[\"" + "\",\"".join(self.node_list) + "\"]"
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI WITH {{'num_partition': {0}, 'nodes': {1}, 'defer_build': true}};".format(
self.num_index_partitions, node_list_str)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = True
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
node_out = self.servers[self.node_out]
build_index_query = "BUILD INDEX on `default`(" + index_name_prefix + ")"
try:
self.start_firewall_on_node(node_out)
self.sleep(10)
self.n1ql_helper.run_cbq_query(query=build_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
if not ("Index build will be retried in background" in str(ex) or "Terminate Request during cleanup" in str(ex)):
self.fail("index building failed with error : {0}".format(str(ex)))
else:
self.log.info("Index build failed with expected error")
finally:
# Heal network partition and wait for some time to allow indexes
# to get built automatically on that node
self.stop_firewall_on_node(node_out)
self.sleep(360)
index_map = self.get_index_map()
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata After Build:")
self.log.info(index_metadata)
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
def test_drop_partitioned_index(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
with_clause = "WITH {{'num_partition': {0} ".format(
self.num_index_partitions)
if self.num_index_replicas > 0:
with_clause += ", 'num_replica':{0}".format(self.num_index_replicas)
if self.defer_build:
with_clause += ", 'defer_build':True"
with_clause += " }"
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI {0}".format(
with_clause)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = self.defer_build
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
# Validation for replica indexes
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_details[
"index_name"] = index_name_prefix + " (replica {0})".format(
str(i))
self.assertTrue(
self.validate_partitioned_indexes(index_details,
index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
drop_index_query = "DROP INDEX `default`." + index_name_prefix
try:
self.n1ql_helper.run_cbq_query(query=drop_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"Drop index failed with error : {0}".format(str(ex)))
self.sleep(30)
index_map = self.get_index_map()
self.log.info("Index map after drop index: %s", index_map)
if not index_map == {}:
self.fail("Indexes not dropped correctly")
def test_delete_bucket_cascade_drop_partitioned_index(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
with_clause = "WITH {{'num_partition': {0} ".format(
self.num_index_partitions)
if self.num_index_replicas > 0:
with_clause += ", 'num_replica':{0}".format(self.num_index_replicas)
if self.defer_build:
with_clause += ", 'defer_build':True"
with_clause += " }"
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI {0}".format(
with_clause)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_partition':{0}}}".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = self.defer_build
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
# Validation for replica indexes
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_details[
"index_name"] = index_name_prefix + " (replica {0})".format(
str(i))
self.assertTrue(
self.validate_partitioned_indexes(index_details,
index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
self.cluster.bucket_delete(server=self.master, bucket='default')
self.sleep(30)
index_map = self.get_index_map()
self.log.info("Index map after drop index: %s", index_map)
if not index_map == {}:
self.fail("Indexes not dropped correctly")
def test_drop_partitioned_index_one_failed_node(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
node_list_str = "[\"" + "\",\"".join(self.node_list) + "\"]"
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI WITH {{'num_partition': {0}, 'nodes': {1}}};".format(
self.num_index_partitions, node_list_str)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
node_out = self.servers[self.node_out]
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init],
[node_out],
self.graceful, wait_for_pending=180)
failover_task.result()
drop_index_query = "DROP INDEX `default`." + index_name_prefix
try:
self.n1ql_helper.run_cbq_query(query=drop_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"Drop index failed with error : {0}".format(str(ex)))
self.sleep(30)
index_map = self.get_index_map()
self.log.info("Index map after drop index: %s", index_map)
if not index_map == {}:
self.fail("Indexes not dropped correctly")
if self.recover_failed_node:
nodes_all = self.rest.node_statuses()
for node in nodes_all:
if node.ip == node_out.ip:
break
self.rest.set_recovery_type(node.id, self.recovery_type)
self.rest.add_back_node(node.id)
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init],
[], [])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached,
"rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(180)
index_map = self.get_index_map()
self.log.info("Index map after drop index: %s", index_map)
if not index_map == {}:
self.fail("Indexes not dropped correctly")
def test_failover_during_drop_partitioned_index(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
node_list_str = "[\"" + "\",\"".join(self.node_list) + "\"]"
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI WITH {{'num_partition': {0}, 'nodes': {1}}};".format(
self.num_index_partitions, node_list_str)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(
str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
node_out = self.servers[self.node_out]
drop_index_query = "DROP INDEX `default`." + index_name_prefix
threads = []
threads.append(
Thread(target=self.n1ql_helper.run_cbq_query,
name="run_query",
args=(drop_index_query, 10, self.n1ql_node)))
threads.append(
Thread(target=self.cluster.async_failover, name="failover",
args=(
self.servers[:self.nodes_init], [node_out],
self.graceful)))
for thread in threads:
thread.start()
thread.join()
self.sleep(30)
index_map = self.get_index_map()
self.log.info("Index map after drop index: %s", index_map)
if not index_map == {}:
self.fail("Indexes not dropped correctly")
def test_drop_partitioned_index_with_network_partitioning(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
node_list_str = "[\"" + "\",\"".join(self.node_list) + "\"]"
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI WITH {{'num_partition': {0}, 'nodes': {1}}};".format(
self.num_index_partitions, node_list_str)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
node_out = self.servers[self.node_out]
self.start_firewall_on_node(node_out)
drop_index_query = "DROP INDEX `default`." + index_name_prefix
try:
self.start_firewall_on_node(node_out)
self.sleep(10)
self.n1ql_helper.run_cbq_query(query=drop_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
if not "the operation will automaticaly retry after cluster is back to normal" in str(ex):
self.fail(
"index drop failed with error : {0}".format(str(ex)))
else:
self.log.info("Index drop failed with expected error")
finally:
# Heal network partition and wait for some time to allow indexes
# to get built automatically on that node
self.stop_firewall_on_node(node_out)
self.sleep(360)
index_map = self.get_index_map()
self.log.info("Index map after drop index: %s", index_map)
if not index_map == {}:
self.fail("Indexes not dropped correctly")
def test_partitioned_index_warmup_behaviour(self):
node_out = self.servers[self.node_out]
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI"
if self.defer_build:
create_index_query += " WITH {'defer_build':true}"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = self.defer_build
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
remote_client = RemoteMachineShellConnection(node_out)
if self.node_operation == "kill_indexer":
remote_client.terminate_process(process_name="indexer")
remote_client.disconnect()
else:
self.reboot_node(node_out)
# wait for restart and warmup on all node
self.sleep(self.wait_timeout * 3)
# disable firewall on these nodes
self.stop_firewall_on_node(node_out)
# wait till node is ready after warmup
ClusterOperationHelper.wait_for_ns_servers_or_assert([node_out], self,
wait_if_warmup=True)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata After:")
self.log.info(index_metadata)
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index warmup behavior not as expected")
def test_mutations_on_partitioned_indexes(self):
self.run_async_index_operations(operation_type="create_index")
self.run_doc_ops()
self.sleep(30)
# Get item counts
bucket_item_count, total_item_count, total_num_docs_processed = self.get_stats_for_partitioned_indexes()
self.assertEqual(bucket_item_count, total_item_count,
"# Items indexed {0} do not match bucket items {1}".format(
total_item_count, bucket_item_count))
def test_update_mutations_on_indexed_keys_partitioned_indexes(self):
create_index_query = "CREATE INDEX idx1 ON default(name,mutated) partition by hash(name) USING GSI;"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.run_doc_ops()
self.sleep(30)
# Get item counts
bucket_item_count, total_item_count, total_num_docs_processed = self.get_stats_for_partitioned_indexes(
index_name="idx1")
self.assertEqual(bucket_item_count, total_item_count,
"# Items indexed {0} do not match bucket items {1}".format(
total_item_count, bucket_item_count))
def test_kv_full_rollback_on_partitioned_indexes(self):
self.run_async_index_operations(operation_type="create_index")
self.sleep(30)
self.cluster.bucket_flush(self.master)
self.sleep(60)
# Get item counts
bucket_item_count, total_item_count, total_num_docs_processed = self.get_stats_for_partitioned_indexes()
self.assertEqual(total_item_count, 0, "Rollback to zero fails")
def test_kv_partial_rollback_on_partitioned_indexes(self):
self.run_async_index_operations(operation_type="create_index")
# Stop Persistence on Node A & Node B
self.log.info("Stopping persistence on NodeA & NodeB")
mem_client = MemcachedClientHelper.direct_client(self.servers[0],
"default")
mem_client.stop_persistence()
mem_client = MemcachedClientHelper.direct_client(self.servers[1],
"default")
mem_client.stop_persistence()
self.run_doc_ops()
self.sleep(10)
# Get count before rollback
bucket_count_before_rollback, item_count_before_rollback, num_docs_processed_before_rollback = self.get_stats_for_partitioned_indexes()
# Kill memcached on Node A so that Node B becomes master
self.log.info("Kill Memcached process on NodeA")
shell = RemoteMachineShellConnection(self.master)
shell.kill_memcached()
# Start persistence on Node B
self.log.info("Starting persistence on NodeB")
mem_client = MemcachedClientHelper.direct_client(
self.input.servers[1], "default")
mem_client.start_persistence()
# Failover Node B
self.log.info("Failing over NodeB")
self.sleep(10)
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init], [self.servers[1]], self.graceful,
wait_for_pending=120)
failover_task.result()
# Wait for a couple of mins to allow rollback to complete
self.sleep(120)
# Get count after rollback
bucket_count_after_rollback, item_count_after_rollback, num_docs_processed_after_rollback = self.get_stats_for_partitioned_indexes()
self.assertEqual(bucket_count_after_rollback, item_count_after_rollback,
"Partial KV Rollback not processed by Partitioned indexes")
def test_scan_availability(self):
create_index_query = "CREATE INDEX idx1 ON default(name,mutated) partition by hash(BASE64(meta().id)) USING GSI"
if self.num_index_replicas:
create_index_query += " with {{'num_replica':{0}}};".format(
self.num_index_replicas)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
node_out = self.servers[self.node_out]
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init],
[node_out],
self.graceful, wait_for_pending=60)
failover_task.result()
self.sleep(30)
# Run query
scan_query = "select name,mutated from default where name > 'a' and mutated >=0;"
try:
self.n1ql_helper.run_cbq_query(query=scan_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
if self.num_index_replicas == 0:
if self.expected_err_msg in str(ex):
pass
else:
self.fail(
"Scan failed with unexpected error message".format(
str(ex)))
else:
self.fail("Scan failed")
def test_scan_availability_with_network_partitioning(self):
create_index_query = "CREATE INDEX idx1 ON default(name,mutated) partition by hash(BASE64(meta().id)) USING GSI"
if self.num_index_replicas:
create_index_query += " with {{'num_replica':{0}}};".format(
self.num_index_replicas)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
# Induce network partitioning on one of the nodes
node_out = self.servers[self.node_out]
self.start_firewall_on_node(node_out)
# Run query
scan_query = "select name,mutated from default where name > 'a' and mutated >=0;"
try:
self.n1ql_helper.run_cbq_query(query=scan_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(
"Scan failed as one indexer node was experiencing network partititioning. Error : %s",
str(ex))
# Heal Network Partitioning
self.stop_firewall_on_node(node_out)
# Re-run query
scan_query = "select name,mutated from default where name > 'a' and mutated >=0;"
try:
self.n1ql_helper.run_cbq_query(query=scan_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
if self.num_index_replicas:
if self.expected_err_msg in str(ex):
pass
else:
self.fail(
"Scan failed with unexpected error message".format(
str(ex)))
else:
self.fail("Scan failed")
def test_index_scans(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned and non-partitioned indexes
if self.num_index_partitions > 0:
self.rest.set_index_settings(
{"indexer.numPartitions": self.num_index_partitions})
create_partitioned_index1_query = "CREATE INDEX partitioned_idx1 ON default(name,dept,salary) partition by hash(name,dept,salary) USING GSI;"
create_index1_query = "CREATE INDEX non_partitioned_idx1 ON default(name,dept,salary) USING GSI;"
create_partitioned_index2_query = "create index partitioned_idx2 on default(name,manages.team_size) partition by hash(manages.team_size) USING GSI;"
create_index2_query = "create index non_partitioned_idx2 on default(name,manages.team_size) USING GSI;"
create_partitioned_index3_query = "create index partitioned_idx3 on default(name,manages.team_size) partition by hash(name,manages.team_size) USING GSI;"
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index1_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index1_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index2_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index2_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index3_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
# Scans
queries = []
# 1. Small lookup query with equality predicate on the partition key
query_details = {}
query_details[
"query"] = "select name,dept,salary from default USE INDEX (indexname USING GSI) where name='Safiya Palmer'"
query_details["partitioned_idx_name"] = "partitioned_idx1"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx1"
queries.append(query_details)
# 2. Pagination query with equality predicate on the partition key
query_details = {}
query_details[
"query"] = "select name,dept,salary from default USE INDEX (indexname USING GSI) where name is not missing AND dept='HR' offset 0 limit 10"
query_details["partitioned_idx_name"] = "partitioned_idx1"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx1"
queries.append(query_details)
# 3. Large aggregated query
query_details = {}
query_details[
"query"] = "select count(name), dept from default USE INDEX (indexname USING GSI) where name is not missing group by dept"
query_details["partitioned_idx_name"] = "partitioned_idx1"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx1"
queries.append(query_details)
# 4. Scan with large result sets
query_details = {}
query_details[
"query"] = "select name,dept,salary from default USE INDEX (indexname USING GSI) where name is not missing AND salary > 10000"
query_details["partitioned_idx_name"] = "partitioned_idx1"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx1"
queries.append(query_details)
# 5. Scan that does not require sorted data
query_details = {}
query_details[
"query"] = "select name,dept,salary from default USE INDEX (indexname USING GSI) where name is not missing AND salary > 100000"
query_details["partitioned_idx_name"] = "partitioned_idx1"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx1"
queries.append(query_details)
# 6. Scan that requires sorted data
query_details = {}
query_details[
"query"] = "select name,dept,salary from default USE INDEX (indexname USING GSI) where name is not missing AND salary > 10000 order by dept asc,salary desc"
query_details["partitioned_idx_name"] = "partitioned_idx1"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx1"
queries.append(query_details)
# 7. Scan with predicate on a dataset that has some values for the partition key missing, and present for some
query_details = {}
query_details[
"query"] = "select name from default USE INDEX (indexname USING GSI) where name is not missing AND manages.team_size > 3"
query_details["partitioned_idx_name"] = "partitioned_idx2"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx2"
queries.append(query_details)
# 8. Index partitioned on multiple keys. Scan with predicate on multiple keys with a dataset that has some values for the partition keys missing, and present for some
query_details = {}
query_details[
"query"] = "select name from default USE INDEX (indexname USING GSI) where manages.team_size >= 3 and manages.team_size <= 7 and name like 'A%'"
query_details["partitioned_idx_name"] = "partitioned_idx3"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx2"
queries.append(query_details)
# 9. Overlap scans on partition keys
query_details = {}
query_details[
"query"] = "select name from default USE INDEX (indexname USING GSI) where name is not missing AND (manages.team_size >= 3 or manages.team_size >= 7)"
query_details["partitioned_idx_name"] = "partitioned_idx2"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx2"
queries.append(query_details)
total_scans = 0
failures = 0
for query_details in queries:
total_scans += 1
try:
query_partitioned_index = query_details["query"].replace(
"indexname", query_details["partitioned_idx_name"])
query_non_partitioned_index = query_details["query"].replace(
"indexname", query_details["non_partitioned_idx_name"])
result_partitioned_index = \
self.n1ql_helper.run_cbq_query(
query=query_partitioned_index,
min_output_size=10000000,
server=self.n1ql_node)["results"]
result_non_partitioned_index = self.n1ql_helper.run_cbq_query(
query=query_non_partitioned_index, min_output_size=10000000,
server=self.n1ql_node)["results"]
self.log.info("Partitioned : {0}".format(
str(result_partitioned_index.sort())))
self.log.info("Non Partitioned : {0}".format(
str(result_non_partitioned_index.sort())))
if result_partitioned_index.sort() != result_non_partitioned_index.sort():
failures += 1
self.log.info(
"*** This query does not return same results for partitioned and non-partitioned indexes.")
except Exception as ex:
self.log.info(str(ex))
self.log.info(
"Total scans : {0}, Matching results : {1}, Non-matching results : {2}".format(
total_scans, total_scans - failures, failures))
self.assertEqual(failures, 0,
"Some scans did not yield the same results for partitioned index and non-partitioned indexes. Details above.")
def test_load_balancing_amongst_partitioned_index_replicas(self):
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(age) partition by hash (meta().id) USING GSI WITH {{'num_replica': {0},'num_partition':{1}}};".format(
self.num_index_replicas, self.num_index_partitions)
select_query = "SELECT count(age) from default"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
if self.expected_err_msg not in str(ex):
self.fail(
"index creation did not fail with expected error : {0}".format(
str(ex)))
else:
self.log.info("Index creation failed as expected")
self.sleep(30)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
self.assertTrue(self.validate_partition_map(index_metadata, index_name_prefix,
self.num_index_replicas,
self.num_index_partitions),
"Partition map validation failed")
# Run select query 100 times
for i in range(0, 100):
self.n1ql_helper.run_cbq_query(query=select_query,
server=self.n1ql_node)
index_stats = self.get_index_stats(perNode=True)
load_balanced = True
for i in range(0, self.num_index_replicas + 1):
if i == 0:
index_name = index_name_prefix
else:
index_name = index_name_prefix + " (replica {0})".format(str(i))
hosts, _ = self.n1ql_helper.get_index_details_using_index_name(
index_name, index_map)
for hostname in hosts:
num_request_served = index_stats[hostname]['default'][index_name][
"num_completed_requests"]
self.log.info("# Requests served by %s on %s = %s" % (
index_name, hostname, num_request_served))
if num_request_served == 0:
load_balanced = False
if not load_balanced:
self.fail("Load is not balanced amongst index replicas")
def test_indexer_pushdowns_multiscan(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(name,dept,salary) partition by hash(meta().id) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select name from default where name is not missing and dept='HR' and salary > 120000 and salary < 150000"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
span_pushdown, _, _, _, _ = self.validate_query_plan(
plan=results["results"][0]["plan"], index_name="partitioned_idx1",
num_spans=3)
self.assertTrue(span_pushdown, "Operators not pushed down to indexer")
explain_query2 = "EXPLAIN select name from default where name is not missing and dept='HR' and salary BETWEEN 120000 and 150000"
results = self.n1ql_helper.run_cbq_query(query=explain_query2,
server=self.n1ql_node)
self.log.info("Explain plan for query 2 : {0}".format(results))
span_pushdown, _, _, _, _ = self.validate_query_plan(
plan=results["results"][0]["plan"], index_name="partitioned_idx1",
num_spans=3)
self.assertTrue(span_pushdown, "Operators not pushed down to indexer")
explain_query3 = "EXPLAIN select name from default where name is not missing and dept='HR' and (salary > 120000 or salary > 180000)"
results = self.n1ql_helper.run_cbq_query(query=explain_query3,
server=self.n1ql_node)
self.log.info("Explain plan for query 3 : {0}".format(results))
span_pushdown, _, _, _, _ = self.validate_query_plan(
plan=results["results"][0]["plan"], index_name="partitioned_idx1",
num_spans=3)
self.assertTrue(span_pushdown, "Operators not pushed down to indexer")
def test_indexer_pushdowns_offset_limit(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(name,dept,salary) partition by hash(meta().id) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select name from default where name is not missing and dept='HR' and salary > 120000 and salary < 150000 OFFSET 10 LIMIT 10"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
_, limit_pushdown, offset_pushdown, _, _ = self.validate_query_plan(
plan=results["results"][0]["plan"], index_name="partitioned_idx1",
offset=10, limit=10)
self.assertTrue(limit_pushdown & offset_pushdown,
"Operators not pushed down to indexer")
def test_indexer_pushdowns_projection(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(name,dept,salary) partition by hash(meta().id) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select name from default where name is not missing and lower(dept) > 'accounts'"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
self.sleep(30)
_, _, _, projection_pushdown, _ = self.validate_query_plan(
plan=results["results"][0]["plan"], index_name="partitioned_idx1",
projection_list=[0, 1])
self.assertTrue(projection_pushdown,
"Operators not pushed down to indexer")
explain_query2 = "EXPLAIN select name,dept,salary from default where name is not missing and lower(dept) > 'accounts'"
results = self.n1ql_helper.run_cbq_query(query=explain_query2,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
_, _, _, projection_pushdown, _ = self.validate_query_plan(
plan=results["results"][0]["plan"], index_name="partitioned_idx1",
projection_list=[0, 1, 2])
self.assertTrue(projection_pushdown,
"Operators not pushed down to indexer")
explain_query3 = "EXPLAIN select meta().id from default where name is not missing and lower(dept) > 'accounts'"
results = self.n1ql_helper.run_cbq_query(query=explain_query3,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
_, _, _, projection_pushdown, _ = self.validate_query_plan(
plan=results["results"][0]["plan"], index_name="partitioned_idx1",
projection_list=[0, 1])
self.assertTrue(projection_pushdown,
"Operators not pushed down to indexer")
def test_indexer_pushdowns_sorting(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(name,dept,salary) partition by hash(meta().id) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select name,dept,salary from default where name is not missing order by name,dept,salary"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
_, _, _, _, sorting_pushdown = self.validate_query_plan(
plan=results["results"][0]["plan"],
index_name="partitioned_idx1",
index_order_list=[{'keypos': 0}, {'keypos': 1}, {'keypos': 2}])
self.assertTrue(sorting_pushdown,
"Operators not pushed down to indexer")
explain_query2 = "EXPLAIN select name,dept,salary from default where name is not missing order by name,dept"
results = self.n1ql_helper.run_cbq_query(query=explain_query2,
server=self.n1ql_node)
self.log.info("Explain plan for query 2 : {0}".format(results))
_, _, _, _, sorting_pushdown = self.validate_query_plan(
plan=results["results"][0]["plan"],
index_name="partitioned_idx1",
index_order_list=[{'keypos': 0}, {'keypos': 1}])
self.assertTrue(sorting_pushdown,
"Operators not pushed down to indexer")
explain_query3 = "EXPLAIN select meta().id from default where name is not missing order by name"
results = self.n1ql_helper.run_cbq_query(query=explain_query3,
server=self.n1ql_node)
self.log.info("Explain plan for query 3 : {0}".format(results))
_, _, _, _, sorting_pushdown = self.validate_query_plan(
plan=results["results"][0]["plan"],
index_name="partitioned_idx1",
index_order_list=[{'keypos': 0}])
self.assertTrue(sorting_pushdown,
"Operators not pushed down to indexer")
def test_indexer_pushdowns_sorting_desc(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(name,dept,salary desc) partition by hash(meta().id) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select name,dept,salary from default where name is not missing order by name,dept,salary desc"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
_, _, _, _, sorting_pushdown = self.validate_query_plan(
plan=results["results"][0]["plan"],
index_name="partitioned_idx1",
index_order_list=[{'keypos': 0}, {'keypos': 1},
{"desc": True, 'keypos': 2}])
self.assertTrue(sorting_pushdown,
"Operators not pushed down to indexer")
def test_multiple_operator_indexer_pushdowns(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(name,dept,salary) partition by hash(meta().id) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select name from default where name is not missing order by name OFFSET 10 LIMIT 10"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
scan_pushdown, limit_pushdown, offset_pushdown, projection_pushdown, sorting_pushdown = self.validate_query_plan(
plan=results["results"][0]["plan"], index_name="partitioned_idx1",
num_spans=1, offset=10, limit=10, index_order_list=[{'keypos': 0}],
projection_list=[0])
self.assertTrue(
scan_pushdown & limit_pushdown & offset_pushdown & projection_pushdown & sorting_pushdown,
"Operators not pushed down to indexer")
def test_aggregate_indexer_pushdowns_group_by_leading_keys(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(dept,name,salary) partition by hash(meta().id) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select dept,count(*) from default where dept is not missing GROUP BY dept"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
agg_pushdown = False
if "index_group_aggs" in str(results):
agg_pushdown = True
self.assertTrue(agg_pushdown, "Operators not pushed down to indexer")
explain_query2 = "EXPLAIN select dept,sum(salary), min(salary), max(salary), avg(salary) from default where dept is not missing GROUP BY dept"
results = self.n1ql_helper.run_cbq_query(query=explain_query2,
server=self.n1ql_node)
self.log.info("Explain plan for query 2 : {0}".format(results))
agg_pushdown = False
if "index_group_aggs" in str(results):
agg_pushdown = True
self.assertTrue(agg_pushdown, "Operators not pushed down to indexer")
def test_aggregate_indexer_pushdowns_group_by_partition_keys(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(dept,name,salary) partition by hash(LOWER(name),UPPER(dept)) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select name,dept,count(*) from default where dept is not missing GROUP BY name,dept"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
agg_pushdown = False
if "index_group_aggs" in str(results):
agg_pushdown = True
self.assertTrue(agg_pushdown, "Operators not pushed down to indexer")
explain_query2 = "EXPLAIN select dept,sum(salary), min(salary), max(salary), avg(salary) from default where dept is not missing GROUP BY dept"
results = self.n1ql_helper.run_cbq_query(query=explain_query2,
server=self.n1ql_node)
self.log.info("Explain plan for query 2 : {0}".format(results))
agg_pushdown = False
if "index_group_aggs" in str(results):
agg_pushdown = True
self.assertTrue(agg_pushdown, "Operators not pushed down to indexer")
def test_aggregate_indexer_pushdowns_partition_keys_index_keys(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(dept,name,salary) partition by hash(LOWER(dept)) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select salary,count(*) from default where dept is not missing GROUP BY salary"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
agg_pushdown = False
if "index_group_aggs" in str(results):
agg_pushdown = True
self.assertTrue(agg_pushdown, "Operators not pushed down to indexer")
explain_query2 = "EXPLAIN select dept,sum(salary), min(salary), max(salary), avg(salary) from default where dept is not missing GROUP BY dept"
results = self.n1ql_helper.run_cbq_query(query=explain_query2,
server=self.n1ql_node)
self.log.info("Explain plan for query 2 : {0}".format(results))
agg_pushdown = False
if "index_group_aggs" in str(results):
agg_pushdown = True
self.assertTrue(agg_pushdown, "Operators not pushed down to indexer")
def test_aggregate_indexer_pushdowns_groupby_trailing_keys_partition_keys(
self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(dept,name,salary) partition by hash(salary) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select salary,count(*) from default where dept is not missing GROUP BY salary"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
agg_pushdown = False
if "index_group_aggs" in str(results):
agg_pushdown = True
self.assertTrue(agg_pushdown, "Operators not pushed down to indexer")
def test_aggregate_indexer_pushdowns_groupby_trailing_keys_not_partition_keys(
self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(dept,name,salary) partition by hash(dept) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select salary,count(*) from default where dept is not missing GROUP BY salary"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
agg_pushdown = False
if "index_group_aggs" in str(results):
agg_pushdown = True
self.assertTrue(agg_pushdown, "Operators not pushed down to indexer")
def test_rebalance_out_with_partitioned_indexes_with_concurrent_querying(
self):
self._load_emp_dataset(end=self.num_items)
with_statement = "with {{'num_partition':{0}".format(self.num_index_partitions)
if self.num_index_replicas > 0:
with_statement += ", 'num_replica':{0}".format(self.num_index_replicas)
if self.defer_build:
with_statement += ", 'defer_build': true"
with_statement += " }"
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) " + with_statement
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) " + with_statement
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_out = self.servers[self.node_out]
#port = node_out.port
#if self.use_https:
# port = CbServer.ssl_port_map.get(str(node_out.port),
# str(node_out.port))
node_out_str = node_out.ip + ":" + str(node_out.port)
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# rebalance out a indexer node when querying is in progress
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
self.sleep(30)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.remove(node_out_str)
self.log.info(node_list)
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(
self.index_servers[0]).get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [],
[node_out]),
"Partition distribution post cluster ops has some issues")
def test_rebalance_out_with_partitioned_indexes_with_concurrent_querying_stop_and_resume(
self):
resume = self.input.param("resume_stopped_rebalance", False)
resume_delay = self.input.param("resume_delay", 0)
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
if self.num_index_replicas > 0:
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
else:
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_partition':{0}}}".format(
self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_partition':{0}}}".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# rebalance out a indexer node when querying is in progress
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
stopped = RestConnection(self.master).stop_rebalance(
wait_timeout=self.wait_timeout // 3)
self.assertTrue(stopped, msg="unable to stop rebalance")
rebalance.result()
if resume:
if resume_delay > 0:
self.sleep(resume_delay,
"Sleep for some time before resume stopped rebalance")
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached,
"rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
self.sleep(30)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.remove(node_out_str)
self.log.info(node_list)
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [],
[node_out]),
"Partition distribution post cluster ops has some issues")
def test_rebalance_in_with_partitioned_indexes_with_concurrent_querying(
self):
self._load_emp_dataset(end=self.num_items)
with_statement = "with {{'num_partition':{0}".format(
self.num_index_partitions)
if self.num_index_replicas > 0:
with_statement += ", 'num_replica':{0}".format(
self.num_index_replicas)
if self.defer_build:
with_statement += ", 'defer_build': true"
with_statement += " }"
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) " + with_statement
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) " + with_statement
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_in = self.servers[self.nodes_init]
node_in_str = node_in.ip + ":" + str(node_in.port)
services_in = ["index"]
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# rebalance out a indexer node when querying is in progress
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[node_in], [],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
self.sleep(30)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.append(node_in_str)
self.log.info(node_list)
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [node_in],
[]),
"Partition distribution post cluster ops has some issues")
def test_swap_rebalance_with_partitioned_indexes_with_concurrent_querying(
self):
self._load_emp_dataset(end=self.num_items)
with_statement = "with {{'num_partition':{0}".format(
self.num_index_partitions)
if self.num_index_replicas > 0:
with_statement += ", 'num_replica':{0}".format(
self.num_index_replicas)
if self.defer_build:
with_statement += ", 'defer_build': true"
with_statement += " }"
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) " + with_statement
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) " + with_statement
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
node_in = self.servers[self.nodes_init]
node_in_str = node_in.ip + ":" + str(node_in.port)
services_in = ["index"]
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
try:
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# rebalance out a indexer node when querying is in progress
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[node_in], [node_out],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.append(node_in_str)
node_list.remove(node_out_str)
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [node_in],
[node_out]),
"Partition distribution post cluster ops has some issues")
def test_failover_with_partitioned_indexes_with_concurrent_querying(
self):
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
if self.num_index_replicas > 0:
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
else:
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# failover and rebalance out a indexer node when querying is in progress
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init],
[node_out],
self.graceful, wait_for_pending=180)
failover_task.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
self.sleep(30)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.remove(node_out_str)
self.log.info(node_list)
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [],
[node_out]),
"Partition distribution post cluster ops has some issues")
def test_failover_addback_with_partitioned_indexes_with_concurrent_querying(
self):
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
if self.num_index_replicas > 0:
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
else:
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# failover and rebalance out a indexer node when querying is in progress
nodes_all = self.rest.node_statuses()
for node in nodes_all:
if node.ip == node_out.ip:
break
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init],
[node_out],
self.graceful, wait_for_pending=180)
failover_task.result()
self.rest.set_recovery_type(node.id, self.recovery_type)
self.rest.add_back_node(node.id)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
self.sleep(30)
# Get Stats and index partition map after rebalance
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [],
[]),
"Partition distribution post cluster ops has some issues")
def test_kv_rebalance_out_with_partitioned_indexes_with_concurrent_querying(
self):
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
if self.num_index_replicas > 0:
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
else:
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# rebalance out a indexer node when querying is in progress
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
self.sleep(30)
# Get Stats and index partition map after rebalance
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [],
[]),
"Partition distribution post cluster ops has some issues")
def test_rebalance_out_with_replica_partitioned_indexes_partition_loss(
self):
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# Get Index Names
index_names = ["idx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# rebalance out an indexer node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
#Allow indexer metadata to catch up with the last rebalance
self.sleep(60)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.remove(node_out_str)
self.log.info(node_list)
index_data_after = {}
total_index_item_count = 0
bucket_item_count = 0
total_partition_count = 0
for index in index_names:
bucket_item_count, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
total_index_item_count += total_item_count_after
total_partition_count += self.get_num_partitions_for_index(
RestConnection(self.index_servers[0]).get_indexer_metadata(), index)
self.assertEqual(total_index_item_count, bucket_item_count,
"Item count in index do not match after cluster ops.")
self.assertEqual(self.num_index_partitions, total_partition_count,
"Some partitions are not available after rebalance")
def test_node_failure_during_rebalance_out_partitioned_indexes(
self):
fail_node = self.input.param("fail_node", None)
if fail_node:
node_to_fail = self.servers[fail_node]
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name)"
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# Get Index Names
index_names = ["idx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
try:
# rebalance out an indexer node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
remote_client = RemoteMachineShellConnection(node_to_fail)
if self.node_operation == "kill_indexer":
remote_client.terminate_process(process_name="indexer")
elif self.node_operation == "kill_kv":
remote_client.kill_memcached()
else:
self.reboot_node(node_to_fail)
remote_client.disconnect()
# wait for restart and warmup on all node
self.sleep(self.wait_timeout*2)
# wait till node is ready after warmup
ClusterOperationHelper.wait_for_ns_servers_or_assert([node_to_fail],
self,
wait_if_warmup=True)
rebalance.result()
except Exception as ex:
self.log.info(str(ex))
# Rerun rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
self.sleep(30)
reached_rerun = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached_rerun,
"retry of the failed rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.remove(node_out_str)
self.log.info(node_list)
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
self.log.info(index_data_after[index]["index_metadata"])
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [],
[node_out]),
"Partition distribution post cluster ops has some issues")
def test_node_failure_during_rebalance_in_partitioned_indexes(
self):
fail_node = self.input.param("fail_node", None)
if fail_node:
node_to_fail = self.servers[fail_node]
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name)"
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_in = self.servers[self.nodes_init]
node_in_str = node_in.ip + ":" + str(node_in.port)
services_in = ["index"]
# Get Index Names
index_names = ["idx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
# rebalance in an indexer node
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[node_in], [],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
remote_client = RemoteMachineShellConnection(node_to_fail)
if self.node_operation == "kill_indexer":
remote_client.terminate_process(process_name="indexer")
elif self.node_operation == "kill_kv":
remote_client.kill_memcached()
else:
self.reboot_node(node_to_fail)
remote_client.disconnect()
# wait for restart and warmup on all node
self.sleep(self.wait_timeout)
# wait till node is ready after warmup
ClusterOperationHelper.wait_for_ns_servers_or_assert([node_to_fail],
self,
wait_if_warmup=True)
rebalance.result()
except Exception as ex:
self.log.info(str(ex))
# Rerun Rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [])
self.sleep(30)
reached_rerun = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached_rerun, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(10)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.append(node_in_str)
self.log.info(node_list)
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [node_in],
[]),
"Partition distribution post cluster ops has some issues")
def test_replica_partition_index_with_excluded_nodes_failover(self):
self._load_emp_dataset(end=self.num_items)
# Setting to exclude a node for planner
self.rest.set_index_planner_settings("excludeNode=in")
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}}}".format(
self.num_index_replicas)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
self.assertTrue(self.validate_partition_map(index_metadata, "idx1",
self.num_index_replicas,
self.num_index_partitions),
"Partition map validation failed")
# Validate index created and check the hosts on which partitions are hosted.
expected_hosts = self.node_list[1:]
expected_hosts.sort()
index_names = []
index_names.append("idx1")
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
index_validated = 0
for index_name in index_names:
for index in index_metadata["status"]:
if index["name"] == index_name:
self.log.info("Expected Hosts : {0}".format(expected_hosts))
self.log.info("Actual Hosts : {0}".format(index["hosts"]))
self.assertEqual(index["hosts"].sort(), expected_hosts.sort(),
"Planner did not ignore excluded node during index creation for {0}".format(
index_name))
index_validated += 1
self.assertEqual(index_validated, (self.num_index_replicas + 1),
"All index replicas not created")
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# failover and rebalance out a indexer node
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init],
[node_out],
self.graceful, wait_for_pending=180)
failover_task.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
node_list = copy.deepcopy(self.node_list[1:])
if node_out_str in node_list:
node_list.remove(node_out_str)
self.log.info(node_list)
index_data_after = {}
total_index_item_count = 0
bucket_item_count = 0
total_partition_count = 0
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
bucket_item_count, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
total_index_item_count += total_item_count_after
total_partition_count += self.get_num_partitions_for_index(
self.rest.get_indexer_metadata(), index)
self.assertEqual(total_index_item_count, bucket_item_count,
"Item count in index do not match after cluster ops.")
self.assertEqual(self.num_index_partitions, total_partition_count,
"Some partitions are not available after rebalance")
def test_replica_partition_index_with_excluded_nodes_failover_addback(self):
self._load_emp_dataset(end=self.num_items)
# Setting to exclude a node for planner
self.rest.set_index_planner_settings("excludeNode=in")
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}}}".format(
self.num_index_replicas)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
self.assertTrue(self.validate_partition_map(index_metadata, "idx1",
self.num_index_replicas,
self.num_index_partitions),
"Partition map validation failed")
# Validate index created and check the hosts on which partitions are hosted.
expected_hosts = self.node_list[1:]
expected_hosts.sort()
index_names = []
index_names.append("idx1")
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
# Need to see if the indexes get created in the first place
# Validate index created and check the hosts on which partitions are hosted.
expected_hosts = self.node_list[1:]
expected_hosts.sort()
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
index_validated = 0
for index_name in index_names:
for index in index_metadata["status"]:
if index["name"] == index_name:
self.log.info("Expected Hosts : {0}".format(expected_hosts))
self.log.info("Actual Hosts : {0}".format(index["hosts"]))
#TODO revert after bug fix https://issues.couchbase.com/browse/MB-51119
if self.use_https:
expected_hosts = ["{}:18091".format(host.split(":")[0]) for host in expected_hosts]
self.assertEqual(index["hosts"], expected_hosts,
"Planner did not ignore excluded node during index creation for {0}".format(
index_name))
index_validated += 1
self.assertEqual(index_validated, (self.num_index_replicas + 1),
"All index replicas not created")
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# failover and rebalance out a indexer node
nodes_all = self.rest.node_statuses()
for node in nodes_all:
if node.ip == node_out.ip:
break
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init],
[node_out],
self.graceful, wait_for_pending=180)
failover_task.result()
self.rest.set_recovery_type(node.id, self.recovery_type)
self.rest.add_back_node(node.id)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
self.assertTrue(self.validate_partition_map(index_metadata, "idx1",
self.num_index_replicas,
self.num_index_partitions),
"Partition map validation failed")
def test_partition_placement_one_node_in_paused_state(self):
index_server = self.index_servers[0]
create_index_query1 = "CREATE PRIMARY INDEX ON default USING GSI"
create_index_query2 = "CREATE INDEX idx_job_title ON default(job_title) USING GSI"
create_index_query3 = "CREATE INDEX idx_join_yr ON default(join_yr) USING GSI"
create_index_query4 = "CREATE INDEX idx_job_title_join_yr ON default(job_title,join_yr) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query1,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query3,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query4,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.rest.set_service_memoryQuota(service='indexMemoryQuota',
memoryQuota=256)
# Ensure indexer reaches to paused state
self._saturate_indexer_memory(index_server)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[self.servers[
self.nodes_init]], [],
services=services_in)
rebalance.result()
create_index_query = "CREATE INDEX pidx1 ON default(name,mutated) partition by hash(BASE64(meta().id)) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = "pidx1"
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
index_node_list = self.node_list
index_node_list.append(
self.servers[self.nodes_init].ip + ":" + self.servers[
self.nodes_init].port)
index_node_list.remove(index_server.ip + ":" + index_server.port)
index_node_list.sort()
validated = False
for index in index_metadata["status"]:
if index["name"] == "pidx1":
self.log.info("Expected Hosts : {0}".format(index_node_list))
self.log.info("Actual Hosts : {0}".format(index["hosts"]))
self.assertEqual(index["hosts"], index_node_list,
"Planner did not exclude node in Paused state during index creation")
validated = True
if not validated:
self.fail("Looks like index was not created.")
def test_index_scans_one_node_memory_saturated(self):
index_server = self.index_servers[0]
index_server_str = index_server.ip + ":" + index_server.port
create_index_query1 = "CREATE PRIMARY INDEX ON default USING GSI with {{'nodes':['{0}']}}".format(
index_server_str)
create_index_query2 = "CREATE INDEX idx_job_title ON default(job_title) USING GSI with {{'nodes':['{0}']}}".format(
index_server_str)
create_index_query3 = "CREATE INDEX idx_join_yr ON default(join_yr) USING GSI with {{'nodes':['{0}']}}".format(
index_server_str)
create_index_query4 = "CREATE INDEX idx_job_title_join_yr ON default(job_title,join_yr) USING GSI with {{'nodes':['{0}']}}".format(
index_server_str)
create_index_query5 = "CREATE INDEX pidx1 ON default(name,mutated) partition by hash(BASE64(meta().id)) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query1,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query3,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query4,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query5,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
query = "select name,mutated from default where name is not null order by name limit 1000"
results = self.n1ql_helper.run_cbq_query(query=query,
server=self.n1ql_node)
self.assertIsNotNone(results["results"], "No results")
num_results_before = results["metrics"]["resultCount"]
self.log.info("num_results_before : {0}".format(num_results_before))
self.rest.set_service_memoryQuota(service='indexMemoryQuota',
memoryQuota=256)
# Ensure indexer reaches to paused state
self._saturate_indexer_memory(index_server)
query = "select name,mutated from default where name is not null order by name limit 1000"
results = self.n1ql_helper.run_cbq_query(query=query,
server=self.n1ql_node)
self.assertIsNotNone(results["results"], "No results")
num_results_after = results["metrics"]["resultCount"]
self.log.info("num_results_after : {0}".format(str(num_results_after)))
def test_rebalance_out_concurrent_querying_one_node_nw_partitioned(self):
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
node_nw_partition_out = self.servers[self.node_out - 1]
self.start_firewall_on_node(node_nw_partition_out)
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# rebalance out a indexer node when querying is in progress
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
self.sleep(30)
self.stop_firewall_on_node(node_nw_partition_out)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.remove(node_out_str)
self.log.info(node_list)
index_data_after = {}
for index in index_names:
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = self.rest.get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [],
[node_out]),
"Partition distribution post cluster ops has some issues")
def test_rebalance_out_concurrent_querying_server_group_nw_partitioned(
self):
self._load_emp_dataset(end=self.num_items)
self._create_server_groups()
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# Network partition out Server Group
server_group_out = self.input.param("server_group_out", None)
server_group_nodes = []
if server_group_out:
server_group_nodes = server_group_out.split(":")
for node in server_group_nodes:
self.start_firewall_on_node(node)
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# rebalance out a indexer node when querying is in progress
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
self.sleep(30)
if server_group_nodes:
for node in server_group_nodes:
self.stop_firewall_on_node(node)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.remove(node_out_str)
self.log.info(node_list)
index_data_after = {}
for index in index_names:
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = self.rest.get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [],
[node_out]),
"Partition distribution post cluster ops has some issues")
def test_partitioned_index_recoverability(self):
node_out = self.servers[self.node_out]
create_index_query = "CREATE INDEX idx1 ON default(name,mutated) partition by hash(meta().id) USING GSI"
if self.num_index_replicas:
create_index_query += " with {{'num_replica':{0}}};".format(
self.num_index_replicas)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
# Allow index to be built completely
self.sleep(30)
# Run query
scan_query = "select name,mutated from default where name > 'a' and mutated >=0;"
try:
result_before = self.n1ql_helper.run_cbq_query(query=scan_query, min_output_size=10000000,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("Scan failed")
# Kill indexer and allow it to recover and rebuild index
remote = RemoteMachineShellConnection(node_out)
remote.terminate_process(process_name="indexer")
self.sleep(30, "Sleep after killing indexer")
# Run same query again and check if results match from before recovery
scan_query = "select name,mutated from default where name > 'a' and mutated >=0;"
try:
result_after = self.n1ql_helper.run_cbq_query(query=scan_query, min_output_size=10000000,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("Scan failed")
# Validate if the same count of docs are returned after recovery
self.assertEqual(result_before["metrics"]["resultCount"], result_after["metrics"]["resultCount"], "No. of rows returned before recovery and after recovery are different")
def test_backup_restore_partitioned_index(self):
self._load_emp_dataset(end=self.num_items)
index_details = []
index_detail = {}
index_detail["index_name"] = "idx1"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx1 on default(name,dept) partition by hash(salary) USING GSI"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx2"
index_detail["num_partitions"] = 64
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx2 on default(name,dept) partition by hash(salary) USING GSI with {'num_partition':64}"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx3"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx3 on default(name,dept) partition by hash(salary) USING GSI with {'num_replica':1}"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx4"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = True
index_detail[
"definition"] = "CREATE INDEX idx4 on default(name,dept) partition by hash(salary) USING GSI with {'defer_build':true}"
index_details.append(index_detail)
index_detail = {}
try:
for index in index_details:
self.n1ql_helper.run_cbq_query(query=index["definition"],
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
for index_detail in index_details:
self.assertTrue(
self.validate_partitioned_indexes(index_detail, index_map,
index_metadata),
"Partitioned index created not as expected")
kv_node = self.get_nodes_from_services_map(service_type="kv",
get_all_nodes=False)
self._create_backup(kv_node)
# Drop and recreate bucket
self.cluster.bucket_delete(kv_node, bucket="default")
default_params = self._create_bucket_params(server=self.master,
size=self.bucket_size,
replicas=self.num_replicas)
self.cluster.create_default_bucket(default_params)
if self.node_out > 0:
node_out = self.servers[self.node_out]
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached,
"rebalance failed, stuck or did not complete")
rebalance.result()
# Restore backup
self._create_restore(kv_node)
self.sleep(60)
# Validate all indexes restored correctly
index_map = self.get_index_map()
self.log.info(index_map)
if self.node_out > 0:
if self.node_out == self.index_servers[0]:
rest = RestConnection(self.index_servers[1])
else:
rest = self.rest
else:
rest = self.rest
index_metadata = rest.get_indexer_metadata()
self.log.info("Indexer Metadata After Build:")
self.log.info(index_metadata)
# After restore, all indexes are going to be in unbuilt. So change the expected state of indexes.
for index in index_details:
index["defer_build"] = True
for index_detail in index_details:
self.assertTrue(
self.validate_partitioned_indexes(index_detail, index_map,
index_metadata),
"Partitioned index created not as expected")
def test_backup_partitioned_index_with_failed_node(self):
self._load_emp_dataset(end=self.num_items)
node_out = self.servers[self.node_out]
index_details = []
index_detail = {}
index_detail["index_name"] = "idx1"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx1 on default(name,dept) partition by hash(salary) USING GSI"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx2"
index_detail["num_partitions"] = 64
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx2 on default(name,dept) partition by hash(salary) USING GSI with {'num_partition':64}"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx3"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx3 on default(name,dept) partition by hash(salary) USING GSI with {'num_replica':1}"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx4"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = True
index_detail[
"definition"] = "CREATE INDEX idx4 on default(name,dept) partition by hash(salary) USING GSI with {'defer_build':true}"
index_details.append(index_detail)
index_detail = {}
try:
for index in index_details:
self.n1ql_helper.run_cbq_query(query=index["definition"],
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
for index_detail in index_details:
self.assertTrue(
self.validate_partitioned_indexes(index_detail, index_map,
index_metadata),
"Partitioned index created not as expected")
kv_node = self.get_nodes_from_services_map(service_type="kv",
get_all_nodes=False)
try:
# Stop couchbase on indexer node before taking backup if test config specifies it
remote = RemoteMachineShellConnection(node_out)
remote.stop_couchbase()
self.sleep(30, "Allow node to be marked as a failed node")
self._create_backup(kv_node)
except Exception as ex:
self.log.info(str(ex))
finally:
remote = RemoteMachineShellConnection(node_out)
remote.start_couchbase()
def test_restore_partitioned_index_with_failed_node(self):
self._load_emp_dataset(end=self.num_items)
node_out = self.servers[self.node_out]
index_details = []
index_detail = {}
index_detail["index_name"] = "idx1"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx1 on default(name,dept) partition by hash(salary) USING GSI"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx2"
index_detail["num_partitions"] = 64
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx2 on default(name,dept) partition by hash(salary) USING GSI with {'num_partition':64}"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx3"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx3 on default(name,dept) partition by hash(salary) USING GSI with {'num_replica':1}"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx4"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = True
index_detail[
"definition"] = "CREATE INDEX idx4 on default(name,dept) partition by hash(salary) USING GSI with {'defer_build':true}"
index_details.append(index_detail)
index_detail = {}
try:
for index in index_details:
self.n1ql_helper.run_cbq_query(query=index["definition"],
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
for index_detail in index_details:
self.assertTrue(
self.validate_partitioned_indexes(index_detail, index_map,
index_metadata),
"Partitioned index created not as expected")
kv_node = self.get_nodes_from_services_map(service_type="kv",
get_all_nodes=False)
self._create_backup(kv_node)
# Drop and recreate bucket
self.cluster.bucket_delete(kv_node, bucket="default")
default_params = self._create_bucket_params(server=self.master,
size=self.bucket_size,
replicas=self.num_replicas)
self.cluster.create_default_bucket(default_params)
try:
# Restore backup
# Stop couchbase on indexer node before restoring backup if test config specifies it
remote = RemoteMachineShellConnection(node_out)
remote.stop_couchbase()
self.sleep(30, "Allow node to be marked as a failed node")
self._create_restore(kv_node)
except Exception as ex:
self.log.info(str(ex))
finally:
remote = RemoteMachineShellConnection(node_out)
remote.start_couchbase()
def test_backup_restore_partitioned_index_default_num_partitions(self):
self._load_emp_dataset(end=self.num_items)
index_details = []
index_detail = {}
index_detail["index_name"] = "idx1"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx1 on default(name,dept) partition by hash(salary) USING GSI"
index_detail["num_partitions_post_restore"] = 8
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx2"
index_detail["num_partitions"] = 64
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx2 on default(name,dept) partition by hash(salary) USING GSI with {'num_partition':64}"
index_detail["num_partitions_post_restore"] = 64
index_details.append(index_detail)
index_detail = {}
try:
for index in index_details:
self.n1ql_helper.run_cbq_query(query=index["definition"],
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
for index_detail in index_details:
self.assertTrue(
self.validate_partitioned_indexes(index_detail, index_map,
index_metadata),
"Partitioned index created not as expected")
kv_node = self.get_nodes_from_services_map(service_type="kv",
get_all_nodes=False)
self._create_backup(kv_node)
# Drop and recreate bucket
self.cluster.bucket_delete(kv_node, bucket="default")
default_params = self._create_bucket_params(server=self.master,
size=self.bucket_size,
replicas=self.num_replicas)
self.cluster.create_default_bucket(default_params)
# Set default number of partitions
self.rest.set_index_settings(
{"indexer.numPartitions": 4})
# Change expected num of partitions
for index in index_details:
index["num_partitions"] = index["num_partitions_post_restore"]
# Restore backup
self._create_restore(kv_node)
self.sleep(60)
# Validate all indexes restored correctly
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata After Build:")
self.log.info(index_metadata)
# After restore, all indexes are going to be in unbuilt. So change the expected state of indexes.
for index in index_details:
index["defer_build"] = True
for index_detail in index_details:
self.assertTrue(
self.validate_partitioned_indexes(index_detail, index_map,
index_metadata),
"Partitioned index created not as expected")
def get_stats_for_partitioned_indexes(self, bucket_name="default",
index_name=None, node_list=None):
if node_list == None:
node_list = self.node_list
bucket_item_count = self.get_item_count(self.servers[0], bucket_name)
index_stats = self.get_index_stats(perNode=True)
total_item_count = 0
total_items_processed = 0
for node in node_list:
if not index_name:
index_names = []
for key in index_stats[node][bucket_name]:
index_names.append(key)
index_name = index_names[0]
try:
total_item_count += index_stats[node][bucket_name][index_name][
"items_count"]
total_items_processed = \
index_stats[node][bucket_name][index_name][
"num_docs_processed"]
except Exception as ex:
self.log.info(str(ex))
self.log.info(
"Index {0} : Total Item Count={1} Total Items Processed={2}".format(
index_name, str(total_item_count), str(total_items_processed)))
return (bucket_item_count, total_item_count, total_items_processed)
# Description : Validate index metadata : num_partitions, index status, index existence
def validate_partitioned_indexes(self, index_details, index_map,
index_metadata, skip_numpartitions_check=False):
isIndexPresent = False
isNumPartitionsCorrect = False
isDeferBuildCorrect = False
# Check if index exists
for index in index_metadata["status"]:
if index["name"] == index_details["index_name"]:
isIndexPresent = True
# If num-partitions are set, check no. of partitions
expected_num_partitions = 16
if index_details["num_partitions"] > 0:
expected_num_partitions = index_details["num_partitions"]
if index["partitioned"] and index[
"numPartition"] == expected_num_partitions:
isNumPartitionsCorrect = True
else:
self.log.info(
"Index {0} on /getIndexStatus : Partitioned={1}, num_partition={2}.. Expected numPartitions={3}".format(
index["name"], index["partitioned"],
index["numPartition"],
index_details["num_partitions"]))
if index_details["defer_build"] == True and index[
"status"] == "Created":
isDeferBuildCorrect = True
elif index_details["defer_build"] == False and index[
"status"] == "Ready":
isDeferBuildCorrect = True
else:
self.log.info(
"Incorrect build status for index created with defer_build=True. Status for {0} is {1}".format(
index["name"], index["status"]))
if not isIndexPresent:
self.log.info("Index not listed in /getIndexStatus")
if skip_numpartitions_check:
return isIndexPresent and isDeferBuildCorrect
else:
return isIndexPresent and isNumPartitionsCorrect and isDeferBuildCorrect
# Description : Checks if same host contains same partitions from different replica, and also if for each replica, if the partitions are distributed across nodes
def validate_partition_map(self, index_metadata, index_name, num_replica, num_partitions,dropped_replica=False, replicaId=0):
index_names = []
index_names.append(index_name)
hosts = []
# hosts = index_metadata["status"][0]["hosts"]
for index in index_metadata['status']:
for host in index['hosts']:
if host not in hosts:
hosts.append(host)
for i in range(1, num_replica + 1):
if dropped_replica:
if not i == replicaId:
index_names.append(index_name + " (replica {0})".format(str(i)))
else:
dropped_replica_name = index_name + " (replica {0})".format(str(i))
else:
index_names.append(index_name + " (replica {0})".format(str(i)))
partition_validation_per_host = True
for host in hosts:
pmap_host = []
for idx_name in index_names:
for index in index_metadata["status"]:
if (index["name"] == idx_name) and (host in index["hosts"]):
pmap_host += index["partitionMap"][host]
self.log.info(
"List of partitions on {0} : {1}".format(host, pmap_host))
if len(set(pmap_host)) != len(pmap_host):
partition_validation_per_host &= False
self.log.info(
"Partitions on {0} for all replicas are not correct, host contains duplicate partitions".format(host))
partitions_distributed_for_index = True
for idx_name in index_names:
for index in index_metadata["status"]:
if index["name"] == idx_name:
totalPartitions = 0
for host in hosts:
if host in index["partitionMap"]:
totalPartitions += len(index["partitionMap"][host])
partitions_distributed_for_index &= (
totalPartitions == num_partitions)
if dropped_replica:
if index['name'] == dropped_replica_name:
partitions_distributed_for_index = False
return partition_validation_per_host & partitions_distributed_for_index
def validate_partition_distribution_after_cluster_ops(self, index_name,
map_before_rebalance,
map_after_rebalance,
nodes_in, nodes_out):
# Check for number of partitions before and after rebalance
# Check the host list before rebalance and after rebalance, and see if the incoming or outgoing node is added/removed from the host list
# Check for partition distribution across all indexer nodes
for index in map_before_rebalance["status"]:
if index["name"] == index_name:
host_list_before = index["hosts"]
num_partitions_before = index["numPartition"]
partition_map_before = index["partitionMap"]
for index in map_after_rebalance["status"]:
if index["name"] == index_name:
host_list_after = index["hosts"]
num_partitions_after = index["numPartition"]
partition_map_after = index["partitionMap"]
is_num_partitions_equal = False
if num_partitions_before == num_partitions_after:
is_num_partitions_equal = True
else:
self.log.info(
"Number of partitions before and after cluster operations is not equal. Some partitions missing/extra.")
self.log.info(
"Num Partitions Before : {0}, Num Partitions After : {1}".format(
num_partitions_before, num_partitions_after))
expected_host_list_after = copy.deepcopy(host_list_before)
for node in nodes_in:
node_str = node.ip + ":" + str(node.port)
expected_host_list_after.append(node_str)
for node in nodes_out:
node_str = node.ip + ":" + str(node.port)
if node_str in expected_host_list_after:
expected_host_list_after.remove(node_str)
is_node_list_correct = False
if (expected_host_list_after.sort() == host_list_after.sort()):
is_node_list_correct = True
else:
self.log.info(
"Host list for index is not expected after cluster operations.")
self.log.info("Expected Nodes : {0}, Actual nodes : {1}",
format(str(expected_host_list_after),
str(host_list_after)))
is_partitions_distributed = False
pmap_host_list = list(partition_map_after.keys())
if pmap_host_list.sort() == host_list_after.sort():
is_partitions_distributed = True
else:
self.log.info(
"Partitions not distributed correctly post cluster ops")
return is_num_partitions_equal & is_node_list_correct & is_partitions_distributed
def get_num_partitions_for_index(self, index_map, index_name):
num_partitions = 0
for index_map_item in index_map["status"]:
if index_map_item["name"] == index_name:
num_partitions = index_map_item["numPartition"]
if num_partitions == 0:
self.log.info("Index not found, or some other issue")
else:
return num_partitions
# Description : Returns a list of create index statements generated randomly for emp dataset.
# The create index statements are generated by randomizing various parts of the statements like list of
# index keys, partition keys, primary/secondary indexes, deferred index, partial index, replica index, etc.
def generate_random_create_index_statements(self, bucketname="default",
idx_node_list=None,
num_statements=1):
num_idx_nodes = len(idx_node_list)
emp_fields = {
'text': ["name", "dept", "languages_known", "email", "meta().id"],
'number': ["mutated", "salary"],
'boolean': ["is_manager"],
'datetime': ["join_date"],
'object': ["manages"] # denote nested fields
}
emp_nested_fields = {
'manages': {
'text': ["reports"],
'number': ["team_size"]
}
}
index_variations_list = ["num_partitions", "num_replica", "defer_build",
"partial_index", "primary_index", "nodes",
"sizing_estimates"]
all_emp_fields = ["name", "dept", "languages_known", "email", "mutated",
"salary", "is_manager", "join_date", "reports",
"team_size"]
partition_key_type_list = ["leading_key", "trailing_key",
"function_applied_key",
"document_id", "function_applied_doc_id"]
index_details = []
for i in range(num_statements):
random.seed()
# 1. Generate a random no. of fields to be indexed
num_index_keys = random.randint(1, len(all_emp_fields) - 1)
# 2. Generate random fields
index_fields = []
for index in range(0, num_index_keys):
index_field_list_idx = random.randint(0, len(
all_emp_fields) - 1)
if all_emp_fields[
index_field_list_idx] not in index_fields:
index_fields.append(
all_emp_fields[index_field_list_idx])
else:
# Generate a random index again
index_field_list_idx = random.randint(0,
len(
all_emp_fields) - 1)
if all_emp_fields[
index_field_list_idx] not in index_fields:
index_fields.append(
all_emp_fields[index_field_list_idx])
# 3. Generate a random no. for no. of partition keys (this should be < #1)
if num_index_keys > 1:
num_partition_keys = random.randint(1, num_index_keys - 1)
else:
num_partition_keys = num_index_keys
# 4. For each partition key, randomly select a partition key type from the list and generate a partition key with it
partition_keys = []
for index in range(num_partition_keys):
key = None
partition_key_type = partition_key_type_list[
random.randint(0, len(partition_key_type_list) - 1)]
if partition_key_type == partition_key_type_list[0]:
key = index_fields[0]
if partition_key_type == partition_key_type_list[1]:
if len(index_fields) > 1:
randval = random.randint(1, len(index_fields)-1)
key = index_fields[randval]
else:
key = index_fields[0]
if partition_key_type == partition_key_type_list[2]:
idx_key = index_fields[
random.randint(0, len(index_fields) - 1)]
if idx_key in emp_fields["text"]:
key = ("LOWER({0})".format(idx_key))
elif idx_key in emp_fields["number"]:
key = ("({0} % 10) + ({0} * 2) ").format(idx_key)
elif idx_key in emp_fields["boolean"]:
key = ("NOT {0}".format(idx_key))
elif idx_key in emp_fields["datetime"]:
key = ("DATE_ADD_STR({0},-1,'year')".format(idx_key))
elif idx_key in emp_nested_fields["manages"]["text"]:
key = ("LOWER({0})".format(idx_key))
elif idx_key in emp_nested_fields["manages"]["number"]:
key = ("({0} % 10) + ({0} * 2)").format(idx_key)
if partition_key_type == partition_key_type_list[3]:
key = "meta().id"
if partition_key_type == partition_key_type_list[4]:
key = "SUBSTR(meta().id, POSITION(meta().id, '__')+2)"
if ((key is not None) or (key != "")) and (key not in partition_keys):
partition_keys.append(key)
self.log.info("Partition Keys : {0}, Partition Key Type : {1}".format(key, partition_key_type))
# 6. Choose other variation in queries from the list.
num_index_variations = random.randint(0, len(
index_variations_list) - 1)
index_variations = []
for index in range(num_index_variations):
index_variation = index_variations_list[
random.randint(0, len(index_variations_list) - 1)]
if index_variation not in index_variations:
index_variations.append(index_variation)
# Primary indexes cannot be partial, so remove partial index if primary index is in the list
if ("primary_index" in index_variations) and (
"partial_index" in index_variations):
index_variations.remove("partial_index")
# 7. Build create index queries.
index_name = "idx" + str(random.randint(0, 1000000))
if "primary_index" in index_variations:
create_index_statement = "CREATE PRIMARY INDEX {0} on {1}".format(
index_name, bucketname)
else:
create_index_statement = "CREATE INDEX {0} on {1}(".format(
index_name, bucketname)
create_index_statement += ",".join(index_fields) + ")"
create_index_statement += " partition by hash("
create_index_statement += ",".join(partition_keys) + ")"
if "partial_index" in index_variations:
create_index_statement += " where meta().id > 10"
with_list = ["num_partitions", "num_replica", "defer_build",
"nodes", "sizing_estimates"]
num_partitions = 0
num_replica = 0
defer_build = False
nodes = []
if (any(x in index_variations for x in with_list)):
with_statement = []
create_index_statement += " with {"
if "num_partitions" in index_variations:
if self.gsi_type == "memory_optimized":
num_partitions = random.randint(4, 20)
else:
num_partitions = random.randint(4, 100)
with_statement.append(
"'num_partition':{0}".format(num_partitions))
if "num_replica" in index_variations:
# We do not want 'num_replica' and 'nodes' both in the with clause, as it can cause errors if they do not match.
if "nodes" in index_variations:
index_variations.remove("nodes")
num_replica = random.randint(1, num_idx_nodes - 1)
with_statement.append(
"'num_replica':{0}".format(num_replica))
if "defer_build" in index_variations:
defer_build = True
with_statement.append("'defer_build':true")
if "sizing_estimates" in index_variations:
with_statement.append("'secKeySize':20")
with_statement.append("'docKeySize':20")
with_statement.append("'arrSize':10")
if "nodes" in index_variations:
num_nodes = random.randint(1, num_idx_nodes - 1)
for i in range(0, num_nodes):
node = idx_node_list[
random.randint(0, num_idx_nodes - 1)]
if node not in nodes:
nodes.append(node)
node_list_str = ""
if nodes is not None and len(nodes) > 1:
node_list_str = "\"" + "\",\"".join(nodes) + "\""
else:
node_list_str = "\"" + nodes[0] + "\""
with_statement.append("'nodes':[{0}]".format(node_list_str))
create_index_statement += ",".join(with_statement) + "}"
index_detail = {}
index_detail["index_name"] = index_name
if num_partitions == 0:
num_partitions = 8
index_detail["num_partitions"] = num_partitions
index_detail["num_replica"] = num_replica
index_detail["defer_build"] = defer_build
index_detail["index_definition"] = create_index_statement
index_detail["nodes"] = nodes
if key is not None or key != "":
index_details.append(index_detail)
else:
self.log.info(
"Generated a malformed index definition. Discarding it.")
return index_details
def validate_query_plan(self, plan, index_name, num_spans=0, limit=0,
offset=0, projection_list=[], index_order_list=[]):
span_pushdown = False
limit_pushdown = False
offset_pushdown = False
projection_pushdown = False
sorting_pushdown = False
index_section_found = False
plan_index_section = {}
for plan_child in plan["~children"]:
if "index" in plan_child:
index_section_found = True
plan_index_section = plan_child
break;
for plan_child in plan["~children"]:
if not index_section_found:
for plan_child_child in plan_child["~children"]:
if "index" in plan_child_child:
index_section_found = True
plan_index_section = plan_child_child
break;
else:
break
if index_section_found:
if plan_index_section["index"] == index_name:
if num_spans > 0:
if "spans" in plan_index_section:
if len(plan_index_section["spans"][0][
"range"]) != num_spans:
self.log.info(
"Looks like all spans not pushed down to indexer. Spans pushed down to indexer = %s",
len(plan_index_section["spans"]["range"]))
else:
self.log.info(
"All spans pushed down to indexer")
span_pushdown = True
else:
self.log.info("Spans not pushed down to indexer")
if limit > 0:
if "limit" in plan_index_section:
if int(plan_index_section["limit"]) != limit:
self.log.info(
"Limit not correctly pushed down to indexer")
else:
self.log.info(
"Limit pushed down to indexer")
limit_pushdown = True
else:
self.log.info("Limit not pushed down to indexer")
if offset > 0:
if "offset" in plan_index_section:
if int(plan_index_section["offset"]) != offset:
self.log.info(
"Offset not correctly pushed down to indexer")
else:
self.log.info(
"Offset pushed down to indexer")
offset_pushdown = True
else:
self.log.info("Offset not pushed down to indexer")
if projection_list:
if "index_projection" in plan_index_section:
if plan_index_section["index_projection"][
"entry_keys"] != projection_list:
self.log.info(
"Projection not correctly pushed down to indexer")
else:
self.log.info(
"Projection pushed down to indexer")
projection_pushdown = True
if index_order_list:
if "index_order" in plan_index_section:
if plan_index_section[
"index_order"] != index_order_list:
self.log.info(
"Sorting not correctly pushed down to indexer")
else:
self.log.info(
"Sorting pushed down to indexer")
sorting_pushdown = True
return span_pushdown, limit_pushdown, offset_pushdown, projection_pushdown, sorting_pushdown
def _load_emp_dataset(self, op_type="create", expiration=0, start=0,
end=1000):
# Load Emp Dataset
self.cluster.bucket_flush(self.master)
if end > 0:
self._kv_gen = JsonDocGenerator("emp_",
encoding="utf-8",
start=start,
end=end)
gen = copy.deepcopy(self._kv_gen)
self._load_bucket(self.buckets[0], self.servers[0], gen, op_type,
expiration)
def _run_queries(self, query, count=10):
for i in range(0, count):
try:
self.n1ql_helper.run_cbq_query(query=query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
raise Exception("query failed")
self.sleep(1)
def _saturate_indexer_memory(self, index_server=None):
cnt = 0
step = 100000
docs = 100000
while cnt < 50:
if self.gsi_type == "memory_optimized":
if self._validate_indexer_status_oom(index_server):
self.log.info("OOM on index server is achieved")
return True
elif self.gsi_type == "plasma":
if self._validate_indexer_in_dgm(index_server):
self.log.info("DGM on index server is achieved")
return True
for task in self.kv_mutations(docs, start=docs - step):
task.result()
self.sleep(5)
cnt += 1
docs += step
return False
def _validate_indexer_status_oom(self, index_server=None):
if not index_server:
index_server = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=False)
rest = RestConnection(index_server)
index_stats = rest.get_indexer_stats()
self.log.info(index_stats["indexer_state"])
if index_stats["indexer_state"].lower() == "paused":
return True
else:
return False
def _validate_indexer_in_dgm(self, index_server=None):
indexer_rest = RestConnection(index_server)
content = indexer_rest.get_index_storage_stats()
for index in list(content.values()):
for stats in list(index.values()):
if stats["MainStore"]["resident_ratio"] >= 1.00:
return False
return True
def kv_mutations(self, docs=1, start=0):
self.log.info("Inside kv_mutations")
if not docs:
docs = self.docs_per_day
gens_load = self.generate_docs(docs, start=start)
self.full_docs_list = self.generate_full_docs_list(gens_load)
self.gen_results = TuqGenerators(self.log, self.full_docs_list)
tasks = self.async_load(generators_load=gens_load, op_type="create",
batch_size=1000)
return tasks
|
word2vec.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec mini-batched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does traditional minibatching.
The key ops used are:
* placeholder for feeding in tensors for each example.
* embedding_lookup for fetching rows from the embedding matrix.
* sigmoid_cross_entropy_with_logits to calculate the loss.
* GradientDescentOptimizer for optimizing the loss.
* skipgram custom op that does input processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries.")
flags.DEFINE_string("train_data", None, "Training text file. "
"E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "File consisting of analogies of four tokens."
"embedding 2 - embedding 1 + embedding 3 should be close "
"to embedding 4."
"See README.md for how to get 'questions-words.txt'.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 100,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 16,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval).")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval).")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
def read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def forward(self, examples, labels):
"""Build the graph for the forward pass."""
opts = self._options
# Declare all variables we need.
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / opts.emb_dim
emb = tf.Variable(
tf.random_uniform(
[opts.vocab_size, opts.emb_dim], -init_width, init_width),
name="emb")
self._emb = emb
# Softmax weight: [vocab_size, emb_dim]. Transposed.
sm_w_t = tf.Variable(
tf.zeros([opts.vocab_size, opts.emb_dim]),
name="sm_w_t")
# Softmax bias: [emb_dim].
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b")
# Global step: scalar, i.e., shape [].
self.global_step = tf.Variable(0, name="global_step")
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(
tf.cast(labels,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.num_samples,
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist()))
# Embeddings for examples: [batch_size, emb_dim]
example_emb = tf.nn.embedding_lookup(emb, examples)
# Weights for labels: [batch_size, emb_dim]
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
# Biases for labels: [batch_size, 1]
true_b = tf.nn.embedding_lookup(sm_b, labels)
# Weights for sampled ids: [num_sampled, emb_dim]
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
# Biases for sampled ids: [num_sampled, 1]
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.mul(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise labels for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
sampled_w,
transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(self, true_logits, sampled_logits):
"""Build the graph for the NCE loss."""
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
true_logits, tf.ones_like(true_logits))
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
sampled_logits, tf.zeros_like(sampled_logits))
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def build_eval_graph(self):
"""Build the eval graph."""
# Eval graph
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._emb, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, self._options.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.scalar_summary("NCE loss", loss)
self._loss = loss
self.optimize(loss)
# Properly initialize all variables.
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(opts.save_path, self._session.graph)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
return epoch
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError("Need to read analogy questions.")
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
print(c)
break
print("unknown")
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
model.read_analogies() # Read analogy questions
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
generators.py
|
import json
from PIL import Image
from io import BytesIO
import selenium.webdriver
from shobdokutir.optical.image_utils import trim_image
from shobdokutir.web.servers import run_parrot_server
from multiprocessing import Process
from subprocess import check_output
class OpticalTextBuilder:
"""
Definition: Optical Text Generator uses a server and a web browser to generate any unicode text.
Assumptions: The environment must be configured properly to correctly render the text.
"""
def __init__(self, server_port: int = 6976, server_host: str = '0.0.0.0') -> None:
"""
Starts a parrot server and a web browser
"""
self.server_host = server_host
self.server_port = server_port
self.process = Process(target=run_parrot_server, args=(self.server_host, self.server_port))
self.process.start()
self.driver = selenium.webdriver.Firefox()
def clear_all(self) -> None:
"""
Tears down both the server and the client
"""
self.process.terminate()
self.driver.close()
self.driver.quit()
def get_text_image(self, txt: str, font_size: int = None, font_name: str = None,) -> Image:
"""
Get an image of the text in the specified font_name and font_size
"""
if font_size:
font_size_text = f"&size={font_size}"
else:
font_size_text = ""
if font_name:
font_name_text = f"&font={font_name}"
else:
font_name_text = ""
url = f"http://{self.server_host}:{str(self.server_port)}" \
f"?message={json.dumps(txt)}{font_name_text}{font_size_text}"
print(url)
self.driver.get(url)
data = self.driver.get_full_page_screenshot_as_png()
img = Image.open(BytesIO(data))
return trim_image(img)
def get_font_details():
font_list = check_output(["fc-list"]).decode("unicode-escape").split("\n")
font_details = {}
for a_font in font_list:
if not a_font.strip():
continue
font_entry = [a_col.strip().split("=")[1] if i == 2 else a_col.strip()
for i, a_col in enumerate(a_font.split(":"))]
font_entry[2] = set(font_entry[2].split(",")) if "," in font_entry[2] else {font_entry[2]}
font_details.setdefault(font_entry[1], {'path': [], 'style': []})['path'].append(font_entry[0])
font_details[font_entry[1]]['style'].append(font_entry[2])
return font_details
# if __name__=='__main__':
# # TODO: Render the Folas: ro fola, bo fola, jo fola
# for afont in fontlist:
# process,driver = initiate(font_name=afont,font_size=14)
# sleep(5)
# for acomp in letters:
# fname = 'pics/'+letters[acomp]+'_'+afont+'.png'
# txt2img(driver,letters[acomp],fname)
# sleep(0.125)
# call(['convert',fname,'-trim',fname])
# for akar in kars:
# for acomp in letters:
# fname = 'pics/'+letters[acomp]+kars[akar]+'_'+afont+'.png'
# txt2img(driver,letters[acomp]+kars[akar],fname)
# sleep(0.125)
# call(['convert',fname,'-trim',fname])
# for acomp in allcompound:
# fname = 'pics/'+acomp+'_'+afont+'.png'
# txt2img(driver,acomp,fname)
# sleep(0.125)
# call(['convert',fname,'-trim',fname])
# for akar in kars:
# for acomp in allcompound:
# fname = 'pics/'+acomp+kars[akar]+'_'+afont+'.png'
# txt2img(driver,acomp+kars[akar],fname)
# sleep(0.125)
# call(['convert',fname,'-trim',fname])
# clear_all(process,driver)
|
multithread.py
|
from threading import Thread
import time
COUNT = 5000000
def countdown(n):
while n>0:
n -= 1
t1 = Thread(target=countdown, args=(COUNT//4,))
t2 = Thread(target=countdown, args=(COUNT//4,))
t3 = Thread(target=countdown, args=(COUNT//4,))
t4 = Thread(target=countdown, args=(COUNT//4,))
start = time.time()
t1.start()
t2.start()
t3.start()
t4.start()
t1.join()
t2.join()
t3.join()
t4.join()
end = time.time()
print('Time taken in seconds -', end - start)
|
__main__.py
|
# Copyright Jamie Allsop 2019-2019
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import sys
import threading
import platform
import subprocess
import re
import os
import six
from cuppa.utility.python2to3 import as_str, as_byte_str, Exception
class AutoFlushFile(object):
def __init__( self, f ):
self.f = f
def flush( self ):
self.f.flush()
def write( self, x ):
self.f.write(x)
self.f.flush()
class LineConsumer(object):
_empty_str = as_byte_str("")
def __init__( self, call_readline, processor=None ):
self.call_readline = call_readline
self.processor = processor
def __call__( self ):
for line in iter( self.call_readline, self._empty_str ):
line = as_str( line )
if line:
if self.processor:
line = self.processor( line )
if line:
sys.stdout.write( line )
else:
sys.stdout.write( line )
class MaskSecrets(object):
def __init__( self ):
secret_regex = re.compile( r'.*TOKEN.*' )
self.secrets = {}
for key, val in six.iteritems(os.environ):
if re.match( secret_regex, key ):
self.secrets[as_str(val)] = key
def mask( self, message ):
for secret, mask in six.iteritems(self.secrets):
message = message.replace( secret, mask )
return message
def run_scons( args_list ):
masker = MaskSecrets()
#print "The following tokens will be masked in output {}".format( str( sorted( six.itervalues(masker.secrets) ) ) )
process = None
stderr_thread = None
try:
args_list = ['scons'] + args_list + ['--cuppa-mode']
stdout_processor = masker.mask
stderr_processor = masker.mask
sys.stdout = AutoFlushFile( sys.stdout )
sys.stderr = AutoFlushFile( sys.stderr )
kwargs = {}
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
kwargs['close_fds'] = platform.system() == "Windows" and False or True
use_shell = False
process = subprocess.Popen(
use_shell and " ".join(args_list) or args_list,
**dict( kwargs, shell=use_shell )
)
stderr_consumer = LineConsumer( process.stderr.readline, stderr_processor )
stdout_consumer = LineConsumer( process.stdout.readline, stdout_processor )
stderr_thread = threading.Thread( target=stderr_consumer )
stderr_thread.start()
stdout_consumer();
stderr_thread.join()
process.wait()
return process.returncode
except Exception as e:
if process:
process.kill()
if stderr_thread:
stderr_thread.join()
return process.returncode
except KeyboardInterrupt:
if process:
process.terminate()
process.wait()
if stderr_thread:
stderr_thread.join()
return process.returncode
return 1
def main():
sys.exit( run_scons( sys.argv[1:] ) )
if __name__ == "__main__":
main()
|
pool.py
|
from ximea import xiapi
from imutils.video import FPS
import imutils
import cv2
import numpy as np
import time
import multiprocessing
from multiprocessing import Pool, Queue
RESIZE = 500
def worker(input_q, output_q):
fps = FPS().start()
while True:
fps.update()
frame = input_q.get()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
time.sleep(.05)
output = imutils.resize(frame, width=RESIZE, height=RESIZE)
output_q.put(output)
fps.stop()
if __name__ == '__main__':
qu_limit = 100
threadn = cv2.getNumberOfCPUs()
print("Threads : ", threadn)
input_q = Queue(qu_limit) # fps is better if queue is higher but then more lags
output_q = Queue()
for i in range(threadn-6):
p = multiprocessing.Process(target=worker, args=[input_q, output_q])
p.start()
cam = xiapi.Camera()
print('Opening first camera...')
cam.open_device()
cam.set_exposure(1000)
cam.set_param('width',128)
cam.set_param('height',128)
cam.set_param('downsampling_type', 'XI_SKIPPING')
cam.set_acq_timing_mode('XI_ACQ_TIMING_MODE_FREE_RUN')
img = xiapi.Image()
print('Starting data acquisition...')
cam.start_acquisition()
fps = FPS().start()
frame_count = 0
while True and frame_count< 10000:
frame_count += 1
cam.get_image(img)
frame = 20*img.get_image_data_numpy()
# if frame_count % qu_limit == 0:
# input_q.put(frame)
input_q.put(frame)
if output_q.empty():
pass # fill up queue
else:
data = output_q.get()
cv2.imshow('Video', data)
cv2.waitKey(1)
fps.update()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps.stop()
print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
cam.stop_acquisition()
print("Max camera framerate :",cam.get_param('framerate:max'))
cam.close_device()
cv2.destroyAllWindows()
|
client.py
|
from runtime.manager import RuntimeManager
from config.GantryConfig import Configuration
from config.object import ConfigParseException
from gantryd.componentwatcher import ComponentWatcher
from gantryd.machinestate import MachineState
from gantryd.componentstate import ComponentState, STOPPED_STATUS, KILLED_STATUS
from gantryd.etcdpaths import getProjectConfigPath
from util import report, fail, ReportLevels
import etcd
import uuid
import atexit
import threading
import time
import socket
import json
import logging
REPORT_TTL = 60 # Report that this machine is running, every 60 seconds
class GantryDClient(object):
""" A client in gantryd. """
def __init__(self, etcdHost, projectName):
self.project_name = projectName
self.runtime_manager = None
self.components = []
self.is_running = False
# Generate a unique ID for this machine/client.
self.machine_id = str(uuid.uuid1())
# Logging.
self.logger = logging.getLogger(__name__)
# Initialize the etcd client that we'll use.
self.etcd_client = etcd.Client(host=etcdHost)
# Initialize the thread used for reporting the status of this machine to etcd.
self.reporting_thread = threading.Thread(target=self.reportMachineStatus, args=[])
self.reporting_thread.daemon = True
def getConfigJSON(self):
""" Returns the project's config JSON or raises an exception if none. """
# Lookup the project on etcd. If none, report an error.
config_json = None
try:
self.logger.debug('Looking up configuration for project %s in etcd', self.project_name)
config_json = self.etcd_client.get(getProjectConfigPath(self.project_name)).value
except KeyError as k:
self.logger.exception(k)
fail('Unknown project ' + self.project_name, project=self.project_name)
return config_json
def getConfig(self):
""" Returns the project's config or raises an exception if none. """
config_json = self.getConfigJSON()
# Parse the project's configuration and save it.
try:
self.config = Configuration.parse(config_json)
except ConfigParseException as cpe:
fail('Error parsing gantry config', project=self.project_name, exception=cpe)
except Exception as e:
self.logger.exception(e)
return self.config
def setConfig(self, config):
""" Sets the project's config in etcd. """
config_json = json.dumps(config)
self.logger.debug('Updating configuration for project %s', self.project_name)
self.etcd_client.set(getProjectConfigPath(self.project_name), config_json)
def stopComponents(self, component_names):
""" Tells all the given components on all systems to stop. """
self.initialize(component_names)
report('Marking components as stopped', project=self.project_name)
for component in self.components:
report('Marking component as stopped', project=self.project_name, component=component,
level = ReportLevels.EXTRA)
state = ComponentState(self.project_name, component, self.etcd_client)
state.setStatus(STOPPED_STATUS)
def killComponents(self, component_names):
""" Tells all the given components on all systems to die. """
self.initialize(component_names)
report('Marking components as killed', project=self.project_name)
for component in self.components:
report('Marking component as killed', project=self.project_name, component=component,
level = ReportLevels.EXTRA)
state = ComponentState(self.project_name, component, self.etcd_client)
state.setStatus(KILLED_STATUS)
def markUpdated(self, component_names):
""" Tells all the given components to update themselves. """
self.initialize(component_names)
report('Updating the image IDs on components', project=self.project_name)
for component in self.components:
image_id = component.getImageId()
state = ComponentState(self.project_name, component, self.etcd_client)
report('Component %s->%s' % (component.getName(), image_id[0:12]), project=self.project_name,
component = component)
state.setReadyStatus(image_id)
def listStatus(self):
""" Lists the status of all components in this project. """
self.getConfig()
self.initialize([c.name for c in self.config.components])
print "%-20s %-20s %-20s" % ('COMPONENT', 'STATUS', 'IMAGE ID')
for component in self.components:
state = ComponentState(self.project_name, component, self.etcd_client).getState()
status = ComponentState.getStatusOf(state)
imageid = ComponentState.getImageIdOf(state)
print "%-20s %-20s %-20s" % (component.getName(), status, imageid)
def run(self, component_names):
""" Runs the given components on this machine. """
self.initialize(component_names)
# Register a handler to remove this machine from the list when the daemon is
# shutdown. The controller will also occasionally ping a machine to verify it
# is present.
self.logger.debug('Registering exit listener')
atexit.register(self.handleExit)
# Start the thread to register this machine as being part of the project.
self.startReporter()
# Start watcher thread(s), one for each component, to see when to update them.
report('Gantryd running', project=self.project_name)
for component in self.components:
self.logger.debug('Starting component watcher for component: %s', component.getName())
watcher = ComponentWatcher(component, self.project_name, self.machine_id, self.etcd_client)
watcher.start()
# And sleep until new stuff comes in.
while True:
time.sleep(1)
########################################################################
def initialize(self, component_names):
""" Initializes this client for working with the components given. """
# Load the project configuration.
self.getConfig()
# Initialize the runtime manager.
self.runtime_manager = RuntimeManager(self.config)
# Find all the components for this machine.
for component_name in component_names:
component = self.runtime_manager.getComponent(component_name)
if not component:
fail('Unknown component named ' + component_name, project=self.project_name)
self.components.append(component)
def handleExit(self):
""" Function executed when the Python system exits. This unregisters the machine in etcd. """
self.is_running = False
try:
machine_state = MachineState(self.project_name, self.machine_id, self.etcd_client)
machine_state.removeMachine()
# Shut down the runtime manager if we have one
if self.runtime_manager is not None:
self.runtime_manager.join()
except Exception as e:
self.logger.exception(e)
pass
def startReporter(self):
""" Starts reporting that this machine is running. """
self.is_running = True
self.reporting_thread.start()
def reportMachineStatus(self):
""" Reports that this machine has running components. """
while self.is_running:
# Perform the update.
self.logger.debug('Reporting status for machine %s to etcd', self.machine_id)
machine_state = MachineState(self.project_name, self.machine_id, self.etcd_client)
machine_state.registerMachine([c.getName() for c in self.components], ttl=REPORT_TTL)
# Sleep for the TTL minus a few seconds.
time.sleep(REPORT_TTL - 5)
|
coach.py
|
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
sys.path.append('.')
import copy
from configparser import ConfigParser, Error
from rl_coach.core_types import EnvironmentSteps
import os
from rl_coach import logger
import traceback
from rl_coach.logger import screen, failed_imports
import argparse
import atexit
import time
import sys
import json
from rl_coach.base_parameters import Frameworks, VisualizationParameters, TaskParameters, DistributedTaskParameters, \
RunType, DistributedCoachSynchronizationType
from multiprocessing import Process
from multiprocessing.managers import BaseManager
import subprocess
from rl_coach.graph_managers.graph_manager import HumanPlayScheduleParameters, GraphManager
from rl_coach.utils import list_all_presets, short_dynamic_import, get_open_port, SharedMemoryScratchPad, get_base_dir
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.environments.environment import SingleLevelSelection
from rl_coach.memories.backend.redis import RedisPubSubMemoryBackendParameters
from rl_coach.memories.backend.memory_impl import construct_memory_params
from rl_coach.data_stores.data_store import DataStoreParameters
from rl_coach.data_stores.s3_data_store import S3DataStoreParameters
from rl_coach.data_stores.nfs_data_store import NFSDataStoreParameters
from rl_coach.data_stores.data_store_impl import get_data_store, construct_data_store_params
from rl_coach.training_worker import training_worker
from rl_coach.rollout_worker import rollout_worker, wait_for_checkpoint
if len(set(failed_imports)) > 0:
screen.warning("Warning: failed to import the following packages - {}".format(', '.join(set(failed_imports))))
def add_items_to_dict(target_dict, source_dict):
updated_task_parameters = copy.copy(source_dict)
updated_task_parameters.update(target_dict)
return updated_task_parameters
def open_dashboard(experiment_path):
"""
open X11 based dashboard in a new process (nonblocking)
"""
dashboard_path = 'python {}/dashboard.py'.format(get_base_dir())
cmd = "{} --experiment_dir {}".format(dashboard_path, experiment_path)
screen.log_title("Opening dashboard - experiment path: {}".format(experiment_path))
# subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True, executable="/bin/bash")
subprocess.Popen(cmd, shell=True, executable="/bin/bash")
def start_graph(graph_manager: 'GraphManager', task_parameters: 'TaskParameters'):
"""
Runs the graph_manager using the configured task_parameters.
This stand-alone method is a convenience for multiprocessing.
"""
graph_manager.create_graph(task_parameters)
# let the adventure begin
if task_parameters.evaluate_only:
graph_manager.evaluate(EnvironmentSteps(sys.maxsize))
else:
graph_manager.improve()
graph_manager.close()
def handle_distributed_coach_tasks(graph_manager, args, task_parameters):
ckpt_inside_container = "/checkpoint"
memory_backend_params = None
if args.memory_backend_params:
memory_backend_params = json.loads(args.memory_backend_params)
memory_backend_params['run_type'] = str(args.distributed_coach_run_type)
graph_manager.agent_params.memory.register_var('memory_backend_params', construct_memory_params(memory_backend_params))
data_store_params = None
if args.data_store_params:
data_store_params = construct_data_store_params(json.loads(args.data_store_params))
data_store_params.expt_dir = args.experiment_path
data_store_params.checkpoint_dir = ckpt_inside_container
graph_manager.data_store_params = data_store_params
if args.distributed_coach_run_type == RunType.TRAINER:
task_parameters.checkpoint_save_dir = ckpt_inside_container
training_worker(
graph_manager=graph_manager,
task_parameters=task_parameters
)
if args.distributed_coach_run_type == RunType.ROLLOUT_WORKER:
task_parameters.checkpoint_restore_dir = ckpt_inside_container
data_store = None
if args.data_store_params:
data_store = get_data_store(data_store_params)
rollout_worker(
graph_manager=graph_manager,
data_store=data_store,
num_workers=args.num_workers,
task_parameters=task_parameters
)
def handle_distributed_coach_orchestrator(args):
from rl_coach.orchestrators.kubernetes_orchestrator import KubernetesParameters, Kubernetes, \
RunTypeParameters
ckpt_inside_container = "/checkpoint"
arg_list = sys.argv[1:]
try:
i = arg_list.index('--distributed_coach_run_type')
arg_list.pop(i)
arg_list.pop(i)
except ValueError:
pass
trainer_command = ['python3', 'rl_coach/coach.py', '--distributed_coach_run_type', str(RunType.TRAINER)] + arg_list
rollout_command = ['python3', 'rl_coach/coach.py', '--distributed_coach_run_type', str(RunType.ROLLOUT_WORKER)] + arg_list
if '--experiment_name' not in rollout_command:
rollout_command = rollout_command + ['--experiment_name', args.experiment_name]
if '--experiment_name' not in trainer_command:
trainer_command = trainer_command + ['--experiment_name', args.experiment_name]
memory_backend_params = None
if args.memory_backend == "redispubsub":
memory_backend_params = RedisPubSubMemoryBackendParameters()
ds_params_instance = None
if args.data_store == "s3":
ds_params = DataStoreParameters("s3", "", "")
ds_params_instance = S3DataStoreParameters(ds_params=ds_params, end_point=args.s3_end_point, bucket_name=args.s3_bucket_name,
creds_file=args.s3_creds_file, checkpoint_dir=ckpt_inside_container, expt_dir=args.experiment_path)
elif args.data_store == "nfs":
ds_params = DataStoreParameters("nfs", "kubernetes", "")
ds_params_instance = NFSDataStoreParameters(ds_params)
worker_run_type_params = RunTypeParameters(args.image, rollout_command, run_type=str(RunType.ROLLOUT_WORKER), num_replicas=args.num_workers)
trainer_run_type_params = RunTypeParameters(args.image, trainer_command, run_type=str(RunType.TRAINER))
orchestration_params = KubernetesParameters([worker_run_type_params, trainer_run_type_params],
kubeconfig='~/.kube/config',
memory_backend_parameters=memory_backend_params,
data_store_params=ds_params_instance)
orchestrator = Kubernetes(orchestration_params)
if not orchestrator.setup():
print("Could not setup.")
return
if orchestrator.deploy_trainer():
print("Successfully deployed trainer.")
else:
print("Could not deploy trainer.")
return
if orchestrator.deploy_worker():
print("Successfully deployed rollout worker(s).")
else:
print("Could not deploy rollout worker(s).")
return
if args.dump_worker_logs:
screen.log_title("Dumping rollout worker logs in: {}".format(args.experiment_path))
orchestrator.worker_logs(path=args.experiment_path)
try:
orchestrator.trainer_logs()
except KeyboardInterrupt:
pass
orchestrator.undeploy()
class CoachLauncher(object):
"""
This class is responsible for gathering all user-specified configuration options, parsing them,
instantiating a GraphManager and then starting that GraphManager with either improve() or evaluate().
This class is also responsible for launching multiple processes.
It is structured so that it can be sub-classed to provide alternate mechanisms to configure and launch
Coach jobs.
The key entry-point for this class is the .launch() method which is expected to be called from __main__
and handle absolutely everything for a job.
"""
def launch(self):
"""
Main entry point for the class, and the standard way to run coach from the command line.
Parses command-line arguments through argparse, instantiates a GraphManager and then runs it.
"""
parser = self.get_argument_parser()
args = self.get_config_args(parser)
graph_manager = self.get_graph_manager_from_args(args)
self.run_graph_manager(graph_manager, args)
def get_graph_manager_from_args(self, args: argparse.Namespace) -> 'GraphManager':
"""
Return the graph manager according to the command line arguments given by the user.
:param args: the arguments given by the user
:return: the graph manager, not bound to task_parameters yet.
"""
graph_manager = None
# if a preset was given we will load the graph manager for the preset
if args.preset is not None:
graph_manager = short_dynamic_import(args.preset, ignore_module_case=True)
# for human play we need to create a custom graph manager
if args.play:
from rl_coach.agents.human_agent import HumanAgentParameters
env_params = short_dynamic_import(args.environment_type, ignore_module_case=True)()
env_params.human_control = True
schedule_params = HumanPlayScheduleParameters()
graph_manager = BasicRLGraphManager(HumanAgentParameters(), env_params, schedule_params, VisualizationParameters())
# Set framework
# Note: Some graph managers (e.g. HAC preset) create multiple agents and the attribute is called agents_params
if hasattr(graph_manager, 'agent_params'):
for network_parameters in graph_manager.agent_params.network_wrappers.values():
network_parameters.framework = args.framework
elif hasattr(graph_manager, 'agents_params'):
for ap in graph_manager.agents_params:
for network_parameters in ap.network_wrappers.values():
network_parameters.framework = args.framework
if args.level:
if isinstance(graph_manager.env_params.level, SingleLevelSelection):
graph_manager.env_params.level.select(args.level)
else:
graph_manager.env_params.level = args.level
# set the seed for the environment
if args.seed is not None:
graph_manager.env_params.seed = args.seed
# visualization
graph_manager.visualization_parameters.dump_gifs = graph_manager.visualization_parameters.dump_gifs or args.dump_gifs
graph_manager.visualization_parameters.dump_mp4 = graph_manager.visualization_parameters.dump_mp4 or args.dump_mp4
graph_manager.visualization_parameters.render = args.render
graph_manager.visualization_parameters.tensorboard = args.tensorboard
graph_manager.visualization_parameters.print_networks_summary = args.print_networks_summary
# update the custom parameters
if args.custom_parameter is not None:
unstripped_key_value_pairs = [pair.split('=') for pair in args.custom_parameter.split(';')]
stripped_key_value_pairs = [tuple([pair[0].strip(), pair[1].strip()]) for pair in
unstripped_key_value_pairs if len(pair) == 2]
# load custom parameters into run_dict
for key, value in stripped_key_value_pairs:
exec("graph_manager.{}={}".format(key, value))
return graph_manager
def display_all_presets_and_exit(self):
# list available presets
screen.log_title("Available Presets:")
for preset in sorted(list_all_presets()):
print(preset)
sys.exit(0)
def expand_preset(self, preset):
"""
Replace a short preset name with the full python path, and verify that it can be imported.
"""
if preset.lower() in [p.lower() for p in list_all_presets()]:
preset = "{}.py:graph_manager".format(os.path.join(get_base_dir(), 'presets', preset))
else:
preset = "{}".format(preset)
# if a graph manager variable was not specified, try the default of :graph_manager
if len(preset.split(":")) == 1:
preset += ":graph_manager"
# verify that the preset exists
preset_path = preset.split(":")[0]
if not os.path.exists(preset_path):
screen.error("The given preset ({}) cannot be found.".format(preset))
# verify that the preset can be instantiated
try:
short_dynamic_import(preset, ignore_module_case=True)
except TypeError as e:
traceback.print_exc()
screen.error('Internal Error: ' + str(e) + "\n\nThe given preset ({}) cannot be instantiated."
.format(preset))
return preset
def get_config_args(self, parser: argparse.ArgumentParser) -> argparse.Namespace:
"""
Returns a Namespace object with all the user-specified configuration options needed to launch.
This implementation uses argparse to take arguments from the CLI, but this can be over-ridden by
another method that gets its configuration from elsewhere. An equivalent method however must
return an identically structured Namespace object, which conforms to the structure defined by
get_argument_parser.
This method parses the arguments that the user entered, does some basic validation, and
modification of user-specified values in short form to be more explicit.
:param parser: a parser object which implicitly defines the format of the Namespace that
is expected to be returned.
:return: the parsed arguments as a Namespace
"""
args = parser.parse_args()
if args.nocolor:
screen.set_use_colors(False)
# if no arg is given
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
# list available presets
if args.list:
self.display_all_presets_and_exit()
# Read args from config file for distributed Coach.
if args.distributed_coach and args.distributed_coach_run_type == RunType.ORCHESTRATOR:
coach_config = ConfigParser({
'image': '',
'memory_backend': 'redispubsub',
'data_store': 's3',
's3_end_point': 's3.amazonaws.com',
's3_bucket_name': '',
's3_creds_file': ''
})
try:
coach_config.read(args.distributed_coach_config_path)
args.image = coach_config.get('coach', 'image')
args.memory_backend = coach_config.get('coach', 'memory_backend')
args.data_store = coach_config.get('coach', 'data_store')
if args.data_store == 's3':
args.s3_end_point = coach_config.get('coach', 's3_end_point')
args.s3_bucket_name = coach_config.get('coach', 's3_bucket_name')
args.s3_creds_file = coach_config.get('coach', 's3_creds_file')
except Error as e:
screen.error("Error when reading distributed Coach config file: {}".format(e))
if args.image == '':
screen.error("Image cannot be empty.")
data_store_choices = ['s3', 'nfs']
if args.data_store not in data_store_choices:
screen.warning("{} data store is unsupported.".format(args.data_store))
screen.error("Supported data stores are {}.".format(data_store_choices))
memory_backend_choices = ['redispubsub']
if args.memory_backend not in memory_backend_choices:
screen.warning("{} memory backend is not supported.".format(args.memory_backend))
screen.error("Supported memory backends are {}.".format(memory_backend_choices))
if args.data_store == 's3':
if args.s3_bucket_name == '':
screen.error("S3 bucket name cannot be empty.")
if args.s3_creds_file == '':
args.s3_creds_file = None
if args.play and args.distributed_coach:
screen.error("Playing is not supported in distributed Coach.")
# replace a short preset name with the full path
if args.preset is not None:
args.preset = self.expand_preset(args.preset)
# validate the checkpoints args
if args.checkpoint_restore_dir is not None and not os.path.exists(args.checkpoint_restore_dir):
screen.error("The requested checkpoint folder to load from does not exist.")
# no preset was given. check if the user requested to play some environment on its own
if args.preset is None and args.play and not args.environment_type:
screen.error('When no preset is given for Coach to run, and the user requests human control over '
'the environment, the user is expected to input the desired environment_type and level.'
'\nAt least one of these parameters was not given.')
elif args.preset and args.play:
screen.error("Both the --preset and the --play flags were set. These flags can not be used together. "
"For human control, please use the --play flag together with the environment type flag (-et)")
elif args.preset is None and not args.play:
screen.error("Please choose a preset using the -p flag or use the --play flag together with choosing an "
"environment type (-et) in order to play the game.")
# get experiment name and path
args.experiment_name = logger.get_experiment_name(args.experiment_name)
args.experiment_path = logger.get_experiment_path(args.experiment_name)
if args.play and args.num_workers > 1:
screen.warning("Playing the game as a human is only available with a single worker. "
"The number of workers will be reduced to 1")
args.num_workers = 1
args.framework = Frameworks[args.framework.lower()]
# checkpoints
args.checkpoint_save_dir = os.path.join(args.experiment_path, 'checkpoint') if args.checkpoint_save_secs is not None else None
if args.export_onnx_graph and not args.checkpoint_save_secs:
screen.warning("Exporting ONNX graphs requires setting the --checkpoint_save_secs flag. "
"The --export_onnx_graph will have no effect.")
return args
def get_argument_parser(self) -> argparse.ArgumentParser:
"""
This returns an ArgumentParser object which defines the set of options that customers are expected to supply in order
to launch a coach job.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--preset',
help="(string) Name of a preset to run (class name from the 'presets' directory.)",
default=None,
type=str)
parser.add_argument('-l', '--list',
help="(flag) List all available presets",
action='store_true')
parser.add_argument('-e', '--experiment_name',
help="(string) Experiment name to be used to store the results.",
default='',
type=str)
parser.add_argument('-r', '--render',
help="(flag) Render environment",
action='store_true')
parser.add_argument('-f', '--framework',
help="(string) Neural network framework. Available values: tensorflow, mxnet",
default='tensorflow',
type=str)
parser.add_argument('-n', '--num_workers',
help="(int) Number of workers for multi-process based agents, e.g. A3C",
default=1,
type=int)
parser.add_argument('-c', '--use_cpu',
help="(flag) Use only the cpu for training. If a GPU is not available, this flag will have no "
"effect and the CPU will be used either way.",
action='store_true')
parser.add_argument('-ew', '--evaluation_worker',
help="(int) If multiple workers are used, add an evaluation worker as well which will "
"evaluate asynchronously and independently during the training. NOTE: this worker will "
"ignore the evaluation settings in the preset's ScheduleParams.",
action='store_true')
parser.add_argument('--play',
help="(flag) Play as a human by controlling the game with the keyboard. "
"This option will save a replay buffer with the game play.",
action='store_true')
parser.add_argument('--evaluate',
help="(flag) Run evaluation only. This is a convenient way to disable "
"training in order to evaluate an existing checkpoint.",
action='store_true')
parser.add_argument('-v', '--verbosity',
help="(flag) Sets the verbosity level of Coach print outs. Can be either low or high.",
default="low",
type=str)
parser.add_argument('-tfv', '--tf_verbosity',
help="(flag) TensorFlow verbosity level",
default=3,
type=int)
parser.add_argument('--nocolor',
help="(flag) Turn off color-codes in screen logging. Ascii text only",
action='store_true')
parser.add_argument('-s', '--checkpoint_save_secs',
help="(int) Time in seconds between saving checkpoints of the model.",
default=None,
type=int)
parser.add_argument('-crd', '--checkpoint_restore_dir',
help='(string) Path to a folder containing a checkpoint to restore the model from.',
type=str)
parser.add_argument('-dg', '--dump_gifs',
help="(flag) Enable the gif saving functionality.",
action='store_true')
parser.add_argument('-dm', '--dump_mp4',
help="(flag) Enable the mp4 saving functionality.",
action='store_true')
parser.add_argument('-et', '--environment_type',
help="(string) Choose an environment type class to override on top of the selected preset.",
default=None,
type=str)
parser.add_argument('-ept', '--exploration_policy_type',
help="(string) Choose an exploration policy type class to override on top of the selected "
"preset."
"If no preset is defined, a preset can be set from the command-line by combining settings "
"which are set by using --agent_type, --experiment_type, --environemnt_type"
,
default=None,
type=str)
parser.add_argument('-lvl', '--level',
help="(string) Choose the level that will be played in the environment that was selected."
"This value will override the level parameter in the environment class."
,
default=None,
type=str)
parser.add_argument('-cp', '--custom_parameter',
help="(string) Semicolon separated parameters used to override specific parameters on top of"
" the selected preset (or on top of the command-line assembled one). "
"Whenever a parameter value is a string, it should be inputted as '\\\"string\\\"'. "
"For ex.: "
"\"visualization.render=False; num_training_iterations=500; optimizer='rmsprop'\"",
default=None,
type=str)
parser.add_argument('--print_networks_summary',
help="(flag) Print network summary to stdout",
action='store_true')
parser.add_argument('-tb', '--tensorboard',
help="(flag) When using the TensorFlow backend, enable TensorBoard log dumps. ",
action='store_true')
parser.add_argument('-ns', '--no_summary',
help="(flag) Prevent Coach from printing a summary and asking questions at the end of runs",
action='store_true')
parser.add_argument('-d', '--open_dashboard',
help="(flag) Open dashboard with the experiment when the run starts",
action='store_true')
parser.add_argument('--seed',
help="(int) A seed to use for running the experiment",
default=None,
type=int)
parser.add_argument('-onnx', '--export_onnx_graph',
help="(flag) Export the ONNX graph to the experiment directory. "
"This will have effect only if the --checkpoint_save_secs flag is used in order to store "
"checkpoints, since the weights checkpoint are needed for the ONNX graph. "
"Keep in mind that this can cause major overhead on the experiment. "
"Exporting ONNX graphs requires manually installing the tf2onnx package "
"(https://github.com/onnx/tensorflow-onnx).",
action='store_true')
parser.add_argument('-dc', '--distributed_coach',
help="(flag) Use distributed Coach.",
action='store_true')
parser.add_argument('-dcp', '--distributed_coach_config_path',
help="(string) Path to config file when using distributed rollout workers."
"Only distributed Coach parameters should be provided through this config file."
"Rest of the parameters are provided using Coach command line options."
"Used only with --distributed_coach flag."
"Ignored if --distributed_coach flag is not used.",
type=str)
parser.add_argument('--memory_backend_params',
help=argparse.SUPPRESS,
type=str)
parser.add_argument('--data_store_params',
help=argparse.SUPPRESS,
type=str)
parser.add_argument('--distributed_coach_run_type',
help=argparse.SUPPRESS,
type=RunType,
default=RunType.ORCHESTRATOR,
choices=list(RunType))
parser.add_argument('-asc', '--apply_stop_condition',
help="(flag) If set, this will apply a stop condition on the run, defined by reaching a"
"target success rate as set by the environment or a custom success rate as defined "
"in the preset. ",
action='store_true')
parser.add_argument('--dump_worker_logs',
help="(flag) Only used in distributed coach. If set, the worker logs are saved in the experiment dir",
action='store_true')
return parser
def run_graph_manager(self, graph_manager: 'GraphManager', args: argparse.Namespace):
if args.distributed_coach and not graph_manager.agent_params.algorithm.distributed_coach_synchronization_type:
screen.error("{} algorithm is not supported using distributed Coach.".format(graph_manager.agent_params.algorithm))
if args.distributed_coach and args.checkpoint_save_secs and graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.SYNC:
screen.warning("The --checkpoint_save_secs or -s argument will be ignored as SYNC distributed coach sync type is used. Checkpoint will be saved every training iteration.")
if args.distributed_coach and not args.checkpoint_save_secs and graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.ASYNC:
screen.error("Distributed coach with ASYNC distributed coach sync type requires --checkpoint_save_secs or -s.")
# Intel optimized TF seems to run significantly faster when limiting to a single OMP thread.
# This will not affect GPU runs.
os.environ["OMP_NUM_THREADS"] = "1"
# turn TF debug prints off
if args.framework == Frameworks.tensorflow:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_verbosity)
# turn off the summary at the end of the run if necessary
if not args.no_summary and not args.distributed_coach:
atexit.register(logger.summarize_experiment)
screen.change_terminal_title(args.experiment_name)
task_parameters = TaskParameters(
framework_type=args.framework,
evaluate_only=args.evaluate,
experiment_path=args.experiment_path,
seed=args.seed,
use_cpu=args.use_cpu,
checkpoint_save_secs=args.checkpoint_save_secs,
checkpoint_restore_dir=args.checkpoint_restore_dir,
checkpoint_save_dir=args.checkpoint_save_dir,
export_onnx_graph=args.export_onnx_graph,
apply_stop_condition=args.apply_stop_condition
)
# open dashboard
if args.open_dashboard:
open_dashboard(args.experiment_path)
if args.distributed_coach and args.distributed_coach_run_type != RunType.ORCHESTRATOR:
handle_distributed_coach_tasks(graph_manager, args, task_parameters)
return
if args.distributed_coach and args.distributed_coach_run_type == RunType.ORCHESTRATOR:
handle_distributed_coach_orchestrator(args)
return
# Single-threaded runs
if args.num_workers == 1:
self.start_single_threaded(task_parameters, graph_manager, args)
else:
self.start_multi_threaded(graph_manager, args)
def start_single_threaded(self, task_parameters, graph_manager: 'GraphManager', args: argparse.Namespace):
# Start the training or evaluation
start_graph(graph_manager=graph_manager, task_parameters=task_parameters)
def start_multi_threaded(self, graph_manager: 'GraphManager', args: argparse.Namespace):
total_tasks = args.num_workers
if args.evaluation_worker:
total_tasks += 1
ps_hosts = "localhost:{}".format(get_open_port())
worker_hosts = ",".join(["localhost:{}".format(get_open_port()) for i in range(total_tasks)])
# Shared memory
class CommManager(BaseManager):
pass
CommManager.register('SharedMemoryScratchPad', SharedMemoryScratchPad, exposed=['add', 'get', 'internal_call'])
comm_manager = CommManager()
comm_manager.start()
shared_memory_scratchpad = comm_manager.SharedMemoryScratchPad()
def start_distributed_task(job_type, task_index, evaluation_worker=False,
shared_memory_scratchpad=shared_memory_scratchpad):
task_parameters = DistributedTaskParameters(
framework_type=args.framework,
parameters_server_hosts=ps_hosts,
worker_hosts=worker_hosts,
job_type=job_type,
task_index=task_index,
evaluate_only=evaluation_worker,
use_cpu=args.use_cpu,
num_tasks=total_tasks, # training tasks + 1 evaluation task
num_training_tasks=args.num_workers,
experiment_path=args.experiment_path,
shared_memory_scratchpad=shared_memory_scratchpad,
seed=args.seed+task_index if args.seed is not None else None, # each worker gets a different seed
checkpoint_save_secs=args.checkpoint_save_secs,
checkpoint_restore_dir=args.checkpoint_restore_dir,
checkpoint_save_dir=args.checkpoint_save_dir,
export_onnx_graph=args.export_onnx_graph,
apply_stop_condition=args.apply_stop_condition
)
# we assume that only the evaluation workers are rendering
graph_manager.visualization_parameters.render = args.render and evaluation_worker
p = Process(target=start_graph, args=(graph_manager, task_parameters))
# p.daemon = True
p.start()
return p
# parameter server
parameter_server = start_distributed_task("ps", 0)
# training workers
# wait a bit before spawning the non chief workers in order to make sure the session is already created
workers = []
workers.append(start_distributed_task("worker", 0))
time.sleep(2)
for task_index in range(1, args.num_workers):
workers.append(start_distributed_task("worker", task_index))
# evaluation worker
if args.evaluation_worker or args.render:
evaluation_worker = start_distributed_task("worker", args.num_workers, evaluation_worker=True)
# wait for all workers
[w.join() for w in workers]
if args.evaluation_worker:
evaluation_worker.terminate()
def main():
launcher = CoachLauncher()
launcher.launch()
if __name__ == "__main__":
main()
|
explorer.py
|
# -*- coding: utf-8 -*-
from .lib.goto import with_goto
__action_menu_style = '[COLOR white][B]%s[/B][/COLOR]'
def __get_season_title(core, season, year, episodes):
season_template = core.kodi.get_setting('general.season_title_template')
if season_template == '1':
return 'Season %s (%s)' % (season, year)
if season_template == '2':
return 'Season %s' % season
if season_template == '3':
return 'Season %s - %s Episodes' % (season, episodes)
return 'Season %s (%s) - %s Episodes' % (season, year, episodes)
def __get_episode_title(core, season, episode, title):
season_template = core.kodi.get_setting('general.episode_title_template')
season_zfill = str(season).zfill(2)
episode_zfill = str(episode).zfill(2)
if season_template == '1':
return '%s. %s' % (episode, title)
if season_template == '2':
return 'E%s. %s' % (episode_zfill, title)
if season_template == '3':
return '%sx%s. %s' % (season_zfill, episode_zfill, title)
if season_template == '4':
return 'S%sE%s. %s' % (season_zfill, episode_zfill, title)
return '%s' % title
def __handle_request_error(core, params, response=None):
if not params.silent:
core.kodi.notification('Something went wrong. Check logs')
if response:
core.logger.notice(response.text)
def __check_imdb_auth_config(core, params):
if core.kodi.get_setting('imdb.at-main') == '':
if not params.silent:
core.kodi.notification('Missing IMDb authentication cookies')
core.utils.end_action(core, True)
return False
return True
def __set_wide_image_as_primary(title):
title_images = title.get('images', None)
if title_images and len(title_images) > 0:
if title['primaryImage']:
title_images.insert(0, title['primaryImage'])
wide_images = list(filter(lambda v: v['width'] > v['height'] and v.get('type', None) != 'poster', title_images))
if len(wide_images) > 0:
title['primaryImage'] = wide_images[0]
def __set_title_contextmenu(core, title, list_item):
titleType = title['titleType']
if titleType == 'person':
return
trailer = ''
if title.get('primaryVideos', None) and len(title['primaryVideos']) > 0:
trailer = title['primaryVideos'][0]
tvseries = titleType == 'tvSeries'
has_rating = title.get('userRating', None) is not None
context_menu_items = [
('IMDb: %s rating' % ('Update' if has_rating else 'Set'), 'RunPlugin(%s?action=profile&type=rate&id=%s)' % (core.url, title['id'])),
('IMDb: Trailer', 'RunPlugin(%s?action=trailer&id=%s&play=true)' % (core.url, trailer)),
('IMDb: Cast & Crew', 'ActivateWindow(Videos,%s?action=query&type=browse&id=%s,return)' % (core.url, title['id'])),
]
if titleType != 'tvEpisode':
context_menu_items.append(
('IMDb: More like this', 'ActivateWindow(Videos,%s?action=query&type=more_like_this&id=%s,return)' % (core.url, title['id']))
)
if not tvseries:
if has_rating:
list_item.setInfo('video', {
'overlay': 5,
'playcount': 1
})
context_menu_items.append(
('IMDb: Mark as unwatched', 'RunPlugin(%s?action=profile&type=mark_as_unwatched&id=%s)' % (core.url, title['id']))
)
else:
context_menu_items.append(
('IMDb: Mark as watched', 'RunPlugin(%s?action=profile&type=mark_as_watched&id=%s)' % (core.url, title['id']))
)
context_menu_items.extend([
('IMDb: Add to watchlist', 'RunPlugin(%s?action=profile&type=watchlist_add&id=%s)' % (core.url, title['id'])),
('IMDb: Remove from watchlist', 'RunPlugin(%s?action=profile&type=watchlist_remove&id=%s)' % (core.url, title['id'])),
('IMDb: Add to list', 'RunPlugin(%s?action=profile&type=list_add&id=%s)' % (core.url, title['id'])),
('IMDb: Remove from list', 'RunPlugin(%s?action=profile&type=list_remove&id=%s)' % (core.url, title['id'])),
])
if not tvseries:
context_menu_items.extend([
('Debrid: Add sources', 'RunPlugin(%s?action=cache_sources&id=%s)' % (core.url, title['id']))
])
if core.kodi.get_bool_setting('general.autoplay'):
context_menu_items.extend([
('Force source select', 'PlayMedia(%s?action=play&id=%s&force_sourceselect=true)' % (core.url, title['id']))
])
list_item.addContextMenuItems(context_menu_items)
def __generate_mutation_query(action, ids, vars=''):
vars = 'fn(%s) ' % vars if vars else 'fn'
result = '''
'''.join([action % (id, id) for id in ids])
result = '''
mutation %s {
%s
}
''' % (vars, result)
return result
def __add_lists(core, data):
items = []
for imdb_list in data['lists']:
if imdb_list['listType'] not in ['TITLES', 'PEOPLE']:
continue
titles_label = 'Movie or TV Series from '
peoples_label = 'Stars, Directors or Writers from '
items.append({
'label': imdb_list['name'],
'type': 'list',
'info': (titles_label if imdb_list['listType'] == 'TITLES' else peoples_label) + imdb_list['name'],
'action': 'query',
'subitems': True,
'params': {
'id': imdb_list['id']
}
})
list_items = core.utils.generic_list_items(core, items)
core.kodi.xbmcplugin.addDirectoryItems(core.handle, list_items, len(list_items))
core.kodi.xbmcplugin.addSortMethod(core.handle, core.kodi.xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
def __add_seasons(core, title):
seasons = {}
episodes = title['episodes']['episodes']
episodes = list(filter(lambda ep: ep['releaseDate'], episodes))
prev_rld = None
for index, episode in enumerate(episodes):
try:
current_rld = episode['releaseDate']
if prev_rld:
if current_rld['year'] < prev_rld['year'] or current_rld['month'] < prev_rld['month'] or current_rld['day'] < prev_rld['day']:
prev_rld['year'] = current_rld['year']
prev_rld['month'] = max(current_rld['month'] - 1, 1)
prev_rld['day'] = 1
prev_rld = current_rld
except:
pass
for index, episode in enumerate(episodes):
try:
episode_number = episode['series']
episode_season = episode_number['seasonNumber']
episode_number = episode_number['episodeNumber']
episode_rld = episode['releaseDate']
if not seasons.get(episode_season, None) and episode_number <= 1:
seasons[episode_season] = core.utils.DictAsObject({
'episodes': 0,
'episode_ids': [],
'first_episode_year': episode_rld['year'],
'year': episode_rld['year'],
'month': episode_rld['month'],
'day': episode_rld['day'],
})
if index + 1 < len(episodes) and episodes[index + 1]['releaseDate']['year'] < episode_rld['year'] and (episode_season - 1) in seasons:
prev_season_last_ep_release_date = seasons[episode_season - 1].last_episode['releaseDate']
seasons[episode_season].update({
'year': prev_season_last_ep_release_date['year'],
'month': prev_season_last_ep_release_date['month'],
'day': prev_season_last_ep_release_date['day'] + 1,
})
seasons[episode_season].episodes += 1
seasons[episode_season].episode_ids.append(episode['id'])
seasons[episode_season].last_episode = episode
if index > 0:
season_to_update = None
rld = None
if episodes[index - 1]['series']['seasonNumber'] + 1 == episode_season:
season_to_update = episode_season - 1
rld = episodes[index - 1]['releaseDate']
if index + 1 == len(episodes):
season_to_update = episode_season
rld = episode_rld
if season_to_update:
seasons[season_to_update].update({
'year_end': rld['year'],
'month_end': min(rld['month'] + 1, 12),
'day_end': rld['day'],
})
except:
pass
list_items = []
for key in seasons:
season = seasons[key]
season.key = key
season.title = __get_season_title(core, key, season.first_episode_year if season.first_episode_year else 'N/A', season.episodes)
list_item = core.kodi.xbmcgui.ListItem(label=season.title, offscreen=True)
poster = core.utils.fix_poster_size(title['primaryImage'])
list_item.setArt({
'thumb': poster,
'poster': poster,
})
video_meta = {
'mediatype': 'season',
'imdbnumber': title['id'],
'title': season.title,
'tvshowtitle': season.title,
'year': season.year,
'season': key,
'episode': season.episodes,
'plot': title.get('plot', None)
}
list_item.setInfo('video', video_meta)
url = '%s?action=query&type=episodes&id=%s&season=%s' % (core.url, title['id'], key)
if season.year:
url += '&year=%s' % season.year
if season.month:
url += '&month=%s' % season.month
if season.day:
url += '&day=%s' % season.day
if season.year_end:
url += '&year_end=%s' % season.year_end
if season.month_end:
url += '&month_end=%s' % season.month_end
if season.day_end:
url += '&day_end=%s' % season.day_end
context_menu_items = []
last_episode_has_rating = season.last_episode and season.last_episode.get('userRating', None) is not None
if last_episode_has_rating:
list_item.setInfo('video', {
'overlay': 5,
'playcount': 1
})
context_menu_items.append(
('IMDb: Mark as unwatched', 'RunPlugin(plugin://plugin.video.a4kstreaming/?action=profile&type=mark_as_unwatched&id=%s&ids=%s)' % ('Season %s' % season.key, '__'.join(season.episode_ids)))
)
else:
context_menu_items.append(
('IMDb: Mark as watched', 'RunPlugin(plugin://plugin.video.a4kstreaming/?action=profile&type=mark_as_watched&id=%s&ids=%s)' % ('Season %s' % season.key, '__'.join(season.episode_ids)))
)
list_item.addContextMenuItems(context_menu_items)
list_item.setContentLookup(False)
list_items.append((url, list_item, True))
core.kodi.xbmcplugin.addDirectoryItems(core.handle, list_items, len(list_items))
def __add_episodes(core, title, season):
seasons = []
if isinstance(title, list):
raw_episodes = title
title = {}
else:
raw_episodes = title['episodes']
if not isinstance(raw_episodes, list):
seasons = raw_episodes['seasons']
raw_episodes = raw_episodes['episodes']
episodes = []
for episode in raw_episodes:
if not episode or not episode.get('series', None) or episode['series'].get('seasonNumber', None) != season or episode['series'].get('episodeNumber', None) is None:
continue
episodeNumber = episode['series']['episodeNumber']
if title.get('id', None):
episode['tvshowid'] = title['id']
if title.get('titleText', None):
episode['tvshowtitle'] = title['titleText']
if title.get('primaryImage', None):
episode['poster'] = title['primaryImage']
if title.get('certificate', None):
episode['certificate'] = title['certificate']
if title.get('isAdult', None):
episode['isAdult'] = title['isAdult']
if title.get('plot', None) and not episode.get('plot', None):
episode['plot'] = title['plot']
if title.get('genres', None):
episode['genres'] = title['genres']
if title.get('countriesOfOrigin', None):
episode['countriesOfOrigin'] = title['countriesOfOrigin']
if title.get('principalCredits', None):
episode['principalCredits'] = title['principalCredits']
if title.get('credits', None):
episode['credits'] = title['credits']
if title.get('credits', None):
episode['credits'] = title['credits']
if len(seasons) > 0:
episode['no_seasons'] = seasons[-1]
episode['seasons'] = seasons
episode['titleText'] = __get_episode_title(core, season, episodeNumber, episode['titleText'])
if episode.get('releaseDate', None):
release_date = episode['releaseDate']
now = core.datetime.now()
year = release_date.get('year', None)
month = release_date.get('month', None)
day = release_date.get('day', None)
released = False
if year and month and day and core.date(now.year, now.month, now.day) >= core.date(year, month, day):
released = True
if not released:
episode['titleTextStyled'] = '[COLOR red][I]%s[/I][/COLOR]' % episode['titleText']
__set_wide_image_as_primary(episode)
episodes.append(episode)
return __add_titles(core, episodes, browse=False)
def __add_title(core, title, silent=False):
items = []
if title.get('series', None):
if title['series'].get('series', None):
series = title['series']['series']
if series.get('id', None):
title['tvshowid'] = series['id']
if series.get('primaryImage', None):
title['seriesPoster'] = series['primaryImage']
if series.get('seasons', None):
title['seasons'] = series['seasons']
if series.get('nextEpisodeSeasonNumber', None):
title['nextEpisodeSeasonNumber'] = next(iter(series['nextEpisodeSeasonNumber']), -1)
if not title.get('countriesOfOrigin', None) and series.get('countriesOfOrigin', None):
title['countriesOfOrigin'] = series['countriesOfOrigin']
if not title.get('poster', None) and title.get('seriesPoster', None):
title['poster'] = title['seriesPoster']
__set_wide_image_as_primary(title)
if silent:
return __add_titles(core, [title], False, silent)
items.append(title)
ids = {}
def add_person(category, credit):
person = credit
if credit.get('name', None):
person = credit['name']
if credit.get('characters', None):
person['characters'] = credit['characters']
if person['id'] in ids:
return
ids[person['id']] = True
items.append({
'id': person['id'],
'titleType': 'person',
'titleText': '(%s) %s' % (category, person['nameText']),
'primaryImage': person.get('primaryImage', None),
'plot': ', '.join(person['characters']) if person.get('characters', None) else None
})
if title.get('principalCredits', None):
for credits in title['principalCredits']:
if credits['category'] == 'Stars':
for credit in credits['credits']:
add_person(credits['category'], credit)
if title.get('credits', None):
for credit in title['credits']:
add_person('Cast', credit)
if title.get('principalCredits', None):
for credits in title['principalCredits']:
if credits['category'] != 'Stars':
for credit in credits['credits']:
add_person(credits['category'], credit)
return __add_titles(core, items, False)
def __add_titles(core, titles, browse, silent=False):
list_items = []
for title in titles:
titleType = title['titleType']
if titleType in ['tvMovie', 'tvSpecial', 'video']:
titleType = 'movie'
if titleType not in ['movie', 'tvSeries', 'tvEpisode', 'person']:
continue
list_item = core.kodi.xbmcgui.ListItem(label=title['titleTextStyled'] if title.get('titleTextStyled', None) else title['titleText'], offscreen=True)
primary_image = title.get('primaryImage', None)
poster_image = title.get('poster', None)
if poster_image:
poster_image = core.utils.fix_poster_size(poster_image)
thumb_image = core.utils.fix_thumb_size(primary_image) if primary_image else poster_image
else:
poster_image = core.utils.fix_poster_size(primary_image)
thumb_image = poster_image
fanart_image = title.get('fanart', None)
if fanart_image:
fanart_image = core.utils.fix_fanart_size(fanart_image)
elif titleType in ['tvEpisode']:
fanart_image = core.utils.fix_fanart_size(primary_image)
list_item.setArt({
'thumb': thumb_image,
'poster': poster_image,
'fanart': fanart_image,
})
releaseDate = title.get('releaseDate', {})
mediatypes = {
'movie': 'movie',
'tvSeries': 'tvshow',
'tvEpisode': 'episode',
'person': 'movie'
}
mediatype = mediatypes[titleType]
overlay = 0 if title.get('userRating', 'unknown') == 'unknown' else (4 if not title.get('userRating', None) else 5)
if mediatype == 'tvshow':
overlay = 5 if title.get('tvshow_watched', False) else 0
video_meta = {
'mediatype': mediatype,
'imdbnumber': title['id'],
'title': title['titleText'],
'originaltitle': title.get('originalTitleText', title['titleText']),
'tvshowtitle': title.get('tvshowtitle', title['titleText'] if titleType in ['tvSeries'] else ''),
'year': title.get('releaseYear', None),
'premiered': '%s-%s-%s' % (releaseDate['year'], str(releaseDate['month']).zfill(2), str(releaseDate['day']).zfill(2)) if isinstance(releaseDate, dict) and len(releaseDate) == 3 else None,
'duration': title.get('runtime', None),
'mpaa': title.get('certificate', None),
'genre': title.get('genres', None),
'country': title.get('countriesOfOrigin', None),
'trailer': '%s?action=trailer&id=%s' % (core.url, title['primaryVideos'][0]) if title.get('primaryVideos', None) else None,
'plot': title.get('plot', None),
'tagline': next(iter(title.get('taglines', [])), None),
'overlay': overlay,
'playcount': 1 if overlay == 5 else (None if overlay == 0 else 0),
'userrating': title.get('userRating', None)
}
if title.get('ratingsSummary', None):
ratingsSummary = title['ratingsSummary']
if ratingsSummary.get('aggregateRating', None) and ratingsSummary.get('voteCount', None):
list_item.setRating("imdb", ratingsSummary['aggregateRating'], ratingsSummary['voteCount'], True)
if title.get('episodes', None):
episodes = title['episodes']
if episodes.get('isOngoing', None) is not None:
video_meta.update({ 'status': 'Continuing' if episodes['isOngoing'] else 'Ended' })
if core.utils.safe_list_get(episodes.get('seasons', [None]), -1, None):
list_item.setProperty('TotalSeasons', str(episodes['seasons'][-1]))
if not video_meta.get('season', None):
video_meta.update({ 'season': episodes['seasons'][-1] })
if episodes.get('totalEpisodes', None):
total_episodes = episodes['totalEpisodes']
list_item.setProperty('WatchedEpisodes', '*')
list_item.setProperty('UnWatchedEpisodes', '*')
list_item.setProperty('TotalEpisodes', str(total_episodes))
list_item.setProperty('NumEpisodes', str(total_episodes))
video_meta.update({ 'episode': total_episodes })
if title.get('series', None):
series = title['series']
if series.get('series', None):
if series['series'].get('titleText', None) and not video_meta.get('tvshowtitle', None):
video_meta.update({ 'tvshowtitle': series['series']['titleText'] })
if isinstance(series.get('episodeNumber', None), dict):
series = series.get('episodeNumber', None)
if series.get('episodeNumber', None):
video_meta.update({ 'episode': series['episodeNumber'] })
if series.get('seasonNumber', None):
video_meta.update({ 'season': series['seasonNumber'] })
if title.get('companyCredits', None):
video_meta.update({
'studio': [item['company'] for item in title['companyCredits']],
})
if title.get('principalCredits', None):
director = [item['credits'] if item['category'] in ['Director', 'Creator'] else None for item in title['principalCredits']]
director = next(iter(filter(lambda v: v, director)), None)
if director is None:
director = [item['credits'] if item['category'] in ['Directors', 'Creators'] else None for item in title['principalCredits']]
director = next(iter(filter(lambda v: v, director)), {'nameText': None})
writers = [item['credits'] if item['category'] == 'Writer' else None for item in title['principalCredits']]
writers = next(iter(filter(lambda v: v, writers)), None)
if writers is None:
writers = [item['credits'] if item['category'] == 'Writers' else None for item in title['principalCredits']]
writers = next(iter(filter(lambda v: v, writers)), {'nameText': None})
video_meta.update({
'director': [item['nameText'] for item in director] if isinstance(director, list) else director['nameText'],
'writer': [item['nameText'] for item in writers] if isinstance(writers, list) else writers['nameText'],
})
list_item.setInfo('video', video_meta)
cast = []
if 'principalCredits' in title:
cast = [item['credits'] if item['category'] == 'Stars' else None for item in title['principalCredits']]
cast = next(iter(filter(lambda v: v, cast)), [])
cast_ids = [c.get('name', c)['id'] for c in cast]
if 'credits' in title and title['credits']:
for credit in title['credits']:
credit_id = credit.get('name', credit)['id']
if credit_id not in cast_ids:
cast.append(credit)
cast_meta = []
for member in cast:
characters = member.get('characters', [''])
cast_meta.append({
'name': member.get('nameText', member.get('name', {}).get('nameText', None)),
'role': ' / '.join(characters) if characters else None,
'thumbnail': core.utils.fix_poster_size(member.get('primaryImage', member.get('name', {}).get('primaryImage', None)))
})
list_item.setCast(cast_meta)
if titleType in ['movie', 'tvEpisode']:
if browse:
action = 'query'
type = 'browse'
else:
list_item.setProperty('IsPlayable', 'true')
action = 'play'
if title.get('releaseYear', None) is None:
type = ''
else:
title_meta = video_meta.copy()
title_meta.update({
'tvshowid': title.get('tvshowid', None),
'seasons': title.get('seasons', None),
'is_airing': title_meta.get('season', None) == title.get('nextEpisodeSeasonNumber', -1),
'poster': thumb_image if thumb_image else poster_image,
})
type = core.base64.b64encode(core.json.dumps(title_meta).encode())
if core.utils.py3:
type = type.decode('ascii')
if silent:
return type
elif titleType in ['person']:
action = 'query'
type = 'knownfor'
else: # tvSeries
if browse or browse is None:
action = 'query'
type = 'seasons'
else:
action = 'play'
type = titleType
list_item.setProperty('IsPlayable', 'false')
url = '%s?action=%s&type=%s' % (core.url, action, type)
if action != 'play' or type == '':
url += '&id=%s' % title['id']
list_item.setContentLookup(False)
__set_title_contextmenu(core, title, list_item)
list_items.append((url, list_item, action != 'play'))
core.kodi.xbmcplugin.addDirectoryItems(core.handle, list_items, len(list_items))
def root(core):
items = [
{
'label': 'Trending',
'action': 'query',
'type': 'popular',
'info': 'IMDb\'s latest trending movie or TV series.',
'subitems': True
},
{
'label': 'Fan Favorites',
'action': 'query',
'type': 'fan_picks',
'info': 'IMDb\'s fan favorites for movie or TV series.',
'subitems': True
},
{
'label': 'Recommended',
'action': 'query',
'type': 'top_picks',
'info': 'IMDb\'s personalized recommendations for movie or TV series.\n(Requires authentication)',
'subitems': True
},
{
'label': 'Watchlist',
'action': 'query',
'type': 'watchlist',
'info': 'Your IMDb watchlist for movie or TV series.\n(Requires authentication)',
'subitems': True
},
{
'label': 'Lists',
'action': 'query',
'type': 'lists',
'info': 'Your IMDb lists for movie or TV series.\n(Requires authentication)',
'subitems': True
},
{
'label': 'Discover by Year',
'action': 'years',
'type': 'root',
'info': 'Find a movie or TV series from a specific year.',
'subitems': True
},
{
'label': 'Debrid',
'action': 'cloud',
'type': 'root',
'info': 'Browse debrid files.',
'subitems': True,
},
{
'label': 'Search...',
'action': 'search',
'type': 'input',
'info': 'Find movie or TV series by name.',
'subitems': True,
},
]
list_items = core.utils.generic_list_items(core, items)
core.kodi.xbmcplugin.addDirectoryItems(core.handle, list_items, len(list_items))
def years(core, params):
items = []
now = core.datetime.now().year
if params.type == 'root':
start = 1900
end = now + 1
while(start < end):
items.append({
'label': '%s-%s' % (start, start + 9),
'type': start,
'info': 'Between %s and %s' % (start, start + 9),
'action': 'years',
'subitems': True
})
start += 10
else:
start = int(params.type)
end = start + 10
while(start < end):
if start > now:
break
items.append({
'label': '%s' % start,
'type': 'year',
'info': 'Movie or TV Series from %s' % start,
'action': 'query',
'subitems': True,
'params': {
'target_year': start
}
})
start += 1
items.reverse()
list_items = core.utils.generic_list_items(core, items)
core.kodi.xbmcplugin.addDirectoryItems(core.handle, list_items, len(list_items))
def search(core, params):
query = params.query
confirmed = True if query else False
search = core.cache.get_search()
if params.type == 'input':
if search.history:
selection = core.kodi.xbmcgui.Dialog().select(
'Search for title or person',
[__action_menu_style % 'New Search'] + list(map(lambda v: str(v), search.history)),
)
confirmed = True
selection = int(selection)
if selection > 0:
query = search.history[selection - 1]
elif selection == 0:
confirmed = False
if not confirmed and not query:
keyboard = core.kodi.xbmc.Keyboard('', 'Enter part of title or person\'s name')
keyboard.doModal()
query = keyboard.getText()
confirmed = keyboard.isConfirmed()
if not confirmed or not query:
core.utils.end_action(core, True)
if not query:
return
if not search.history:
search.history = []
search.history.insert(0, query)
temp_history = set()
search.history = [item for item in search.history if item not in temp_history and (temp_history.add(item) or True)]
if len(search.history) > 5:
search.history.pop()
core.cache.save_search(search)
request = {
'method': 'GET',
'url': 'https://v2.sg.media-imdb.com/suggestion/%s/%s.json' % (query[:1], query),
}
response = core.request.execute(core, request)
if response.status_code != 200:
__handle_request_error(core, params, response)
return []
results = core.json.loads(response.content)
if len(results['d']) == 0:
return []
items = []
for result in results['d']:
titleType = result.get('q', 'person' if result['id'].startswith('nm') else None)
if not titleType:
continue
titleType = titleType.lower()
types = {
'tv series': 'tvSeries',
'tv mini-series': 'tvSeries',
'tv movie': 'movie',
'tv special': 'movie',
'feature': 'movie',
'video': 'movie',
}
if types.get(titleType, None):
titleType = types[titleType]
try:
items.append({
'id': result['id'],
'titleType': titleType,
'titleText': '%s' % result['l'],
'primaryImage': { 'url': result['i']['imageUrl'], 'width': result['i']['width'], 'height': result['i']['height'] }
})
except:
pass
__add_titles(core, items, browse=None)
return items
def cloud(core, params):
items = []
video_ext = list(map(lambda v: '.%s' % v.upper(), core.utils.video_containers()))
if params.type == 'root':
items.extend([
{
'label': 'Premiumize - Files',
'action': 'cloud',
'type': 'premiumize_files',
'info': 'Browse Premiumize files.',
'subitems': True
},
{
'label': 'Premiumize - Transfers',
'action': 'cloud',
'type': 'premiumize_transfers',
'info': 'See Premiumize transfers.',
'subitems': True
},
{
'label': 'RealDebrid - Transfers',
'action': 'cloud',
'type': 'realdebrid_transfers',
'info': 'See RealDebrid transfers.',
'subitems': True
},
{
'label': 'AllDebrid - Transfers',
'action': 'cloud',
'type': 'alldebrid_transfers',
'info': 'See AllDebrid transfers.',
'subitems': True
}
])
elif params.type.startswith('premiumize_'):
apikey = core.utils.get_premiumize_apikey(core)
if not apikey or apikey == '':
core.kodi.notification('Missing Premiumize service API key')
return
if params.type == 'premiumize_files':
id = params.id if params.id else ''
request = core.debrid.premiumize_files(apikey, id)
response = core.request.execute(core, request)
if response.status_code != 200:
__handle_request_error(core, params, response)
return
parsed_response = core.json.loads(response.content)
files = parsed_response.get('content', [])
for file in files:
if not file.get('type', None):
continue
if file['type'] == 'file':
isvideo = core.os.path.splitext(file['name'])[1].upper() in video_ext
subfile = None
if not isvideo and not params.force_allfiles:
continue
filename_without_ext = core.os.path.splitext(file['name'])[0]
subfile_ext = ['srt', 'sub', 'ass', 'smi', 'ssa']
subfile_names = ['%s.%s' % (filename_without_ext, ext) for ext in subfile_ext]
subfiles = [f for f in files if any(subfile_name in f['name'] for subfile_name in subfile_names)]
subfile = next(iter(subfiles), None)
items.append({
'label': file['name'],
'subitems': False,
'url': file.get('link', file.get('stream_link', None)) if isvideo else '',
'subfile': subfile.get('link', None) if subfile else None,
'contextmenu': {
'Premiumize: Delete': 'RunPlugin(plugin://plugin.video.a4kstreaming/?action=cloud&type=premiumize_file_delete&id=%s)' % file['id']
}
})
elif file['type'] == 'folder':
items.append({
'label': file['name'],
'action': 'cloud',
'type': 'premiumize_files',
'info': '',
'subitems': True,
'params': {
'id': file['id'],
},
'contextmenu': {
'Premiumize: All Files': 'ActivateWindow(Videos,plugin://plugin.video.a4kstreaming/?action=cloud&type=premiumize_files&id=%s&force_allfiles=true,return)' % file['id'],
'Premiumize: Delete': 'RunPlugin(plugin://plugin.video.a4kstreaming/?action=cloud&type=premiumize_folder_delete&id=%s)' % file['id'],
}
})
elif params.type == 'premiumize_transfers':
request = core.debrid.premiumize_transfers(apikey)
response = core.request.execute(core, request)
if response.status_code != 200:
__handle_request_error(core, params, response)
return
parsed_response = core.json.loads(response.content)
transfers = parsed_response.get('transfers', [])
for transfer in transfers:
isfinished = transfer['status'] == 'finished'
label = '[%s] %s' % (('Completed' if isfinished else transfer.get('message', ('%s%%' % transfer['progress']))), transfer['name'])
items.append({
'label': label,
'action': 'cloud',
'type': 'premiumize_files',
'info': '',
'subitems': isfinished if transfer['file_id'] is None else False,
'url': None if isfinished and transfer['file_id'] is None else '',
'params': {
'id': transfer['folder_id'],
},
'contextmenu': {
'Premiumize: Delete': 'RunPlugin(plugin://plugin.video.a4kstreaming/?action=cloud&type=premiumize_transfer_delete&id=%s)' % transfer['id'],
'Premiumize: Clear Finished': 'RunPlugin(plugin://plugin.video.a4kstreaming/?action=cloud&type=premiumize_transfer_clearfinished&id=%s)' % transfer['id']
}
})
elif params.type == 'premiumize_file_delete':
request = core.debrid.premiumize_file_delete(apikey, params.id)
response = core.request.execute(core, request)
if response.status_code != 200:
__handle_request_error(core, params, response)
else:
core.kodi.notification('PM file removed: %s' % params.id)
core.utils.end_action(core, True)
return core.skip_end_of_dir
elif params.type == 'premiumize_folder_delete':
request = core.debrid.premiumize_folder_delete(apikey, params.id)
response = core.request.execute(core, request)
if response.status_code != 200:
__handle_request_error(core, params, response)
else:
core.kodi.notification('PM folder removed: %s' % params.id)
core.utils.end_action(core, True)
return core.skip_end_of_dir
elif params.type == 'premiumize_transfer_delete':
request = core.debrid.premiumize_transfer_delete(apikey, params.id)
response = core.request.execute(core, request)
if response.status_code != 200:
__handle_request_error(core, params, response)
else:
core.kodi.notification('PM transfer removed: %s' % params.id)
core.utils.end_action(core, True)
return core.skip_end_of_dir
elif params.type == 'premiumize_transfer_clearfinished':
request = core.debrid.premiumize_transfer_clearfinished(apikey)
response = core.request.execute(core, request)
if response.status_code != 200:
__handle_request_error(core, params, response)
else:
core.kodi.notification('PM cleared finished transfers')
core.utils.end_action(core, True)
return core.skip_end_of_dir
else:
core.not_supported()
return
elif params.type.startswith('realdebrid_'):
apikey = core.utils.get_realdebrid_apikey(core)
if not apikey or apikey == '':
core.kodi.notification('Missing RealDebrid service API key')
return
auth = core.utils.rd_auth_query_params(core, apikey)
if params.type == 'realdebrid_transfers':
request = core.debrid.realdebrid_transfers(auth)
response = core.request.execute(core, request)
if response.status_code != 200:
__handle_request_error(core, params, response)
return
parsed_response = core.json.loads(response.content)
for transfer in parsed_response:
isfinished = transfer['status'] == 'downloaded'
label = '[%s] %s' % (('Completed' if isfinished else '%s%%' % transfer['progress']), transfer['filename'])
items.append({
'label': label,
'action': 'cloud',
'type': 'realdebrid_files',
'info': '',
'subitems': isfinished,
'url': None if isfinished else '',
'params': {
'id': transfer['id'],
},
'contextmenu': {
'RealDebrid: Delete': 'RunPlugin(plugin://plugin.video.a4kstreaming/?action=cloud&type=realdebrid_delete&id=%s)' % transfer['id']
}
})
elif params.type == 'realdebrid_files':
request = core.debrid.realdebrid_files(auth, params.id)
response = core.request.execute(core, request)
if response.status_code != 200:
__handle_request_error(core, params, response)
return
parsed_response = core.json.loads(response.content)
selected_files = []
for file in parsed_response['files']:
if file.get('selected', None):
selected_files.append(file)
for i, file in enumerate(selected_files):
items.append({
'label': file['path'].strip('/'),
'action': 'cloud',
'type': 'realdebrid_file',
'info': '',
'subitems': False,
'params': {
'id': parsed_response['links'][i],
}
})
elif params.type == 'realdebrid_file':
request = core.debrid.realdebrid_resolve(auth, params.id)
response = core.request.execute(core, request)
if response.status_code != 200:
__handle_request_error(core, params, response)
return
parsed_response = core.json.loads(response.content)
link = parsed_response['download']
item = core.kodi.xbmcgui.ListItem(path=link, offscreen=True)
item.setInfo('video', {'mediatype': 'video'})
core.utils.end_action(core, True, item)
return core.skip_end_of_dir
elif params.type == 'realdebrid_delete':
request = core.debrid.realdebrid_delete(auth, params.id)
response = core.request.execute(core, request)
if response.status_code != 204:
__handle_request_error(core, params, response)
else:
core.kodi.notification('RD transfer removed: %s' % params.id)
core.utils.end_action(core, True)
return core.skip_end_of_dir
else:
core.not_supported()
return
elif params.type.startswith('alldebrid_'):
apikey = core.utils.get_alldebrid_apikey(core)
if not apikey or apikey == '':
core.kodi.notification('Missing AllDebrid service API key')
return
auth = core.utils.ad_auth_query_params(core, apikey)
if params.type == 'alldebrid_transfers':
request = core.debrid.alldebrid_transfers(auth)
response = core.request.execute(core, request)
if response.status_code != 200:
__handle_request_error(core, params, response)
return
parsed_response = core.json.loads(response.content)
for transfer in parsed_response.get('data', parsed_response)['magnets']:
isfinished = transfer['status'] == 'Ready'
progress = transfer['downloaded'] / transfer['size'] * 100 if not isfinished else 100
label = '[%s] %s' % (('Completed' if isfinished else '%s%%' % progress), transfer['filename'])
items.append({
'label': label,
'action': 'cloud',
'type': 'alldebrid_files',
'info': '',
'subitems': isfinished,
'url': None if isfinished else '',
'params': {
'id': transfer['id'],
},
'contextmenu': {
'AllDebrid: Delete': 'RunPlugin(plugin://plugin.video.a4kstreaming/?action=cloud&type=alldebrid_delete&id=%s)' % transfer['id']
}
})
elif params.type == 'alldebrid_files':
request = core.debrid.alldebrid_files(auth, params.id)
response = core.request.execute(core, request)
if response.status_code != 200:
__handle_request_error(core, params, response)
return
parsed_response = core.json.loads(response.content)
magnet = parsed_response.get('data', parsed_response)['magnets']
if isinstance(magnet, list):
magnet = magnet[0]
for file in magnet['links']:
items.append({
'label': file['filename'],
'action': 'cloud',
'type': 'alldebrid_file',
'info': '',
'subitems': False,
'params': {
'id': file['link'],
}
})
elif params.type == 'alldebrid_file':
request = core.debrid.alldebrid_resolve(auth, params.id)
response = core.request.execute(core, request)
if response.status_code != 200:
__handle_request_error(core, params, response)
return
parsed_response = core.json.loads(response.content)
status = parsed_response.get('status', None)
if status != 'success':
__handle_request_error(core, params, response)
return
link = parsed_response.get('data', parsed_response)['link']
item = core.kodi.xbmcgui.ListItem(path=link, offscreen=True)
item.setInfo('video', {'mediatype': 'video'})
core.utils.end_action(core, True, item)
return core.skip_end_of_dir
elif params.type == 'alldebrid_delete':
request = core.debrid.alldebrid_delete(auth, params.id)
response = core.request.execute(core, request)
if response.status_code != 200:
__handle_request_error(core, params, response)
else:
core.kodi.notification('AD transfer removed: %s' % params.id)
core.utils.end_action(core, True)
return core.skip_end_of_dir
else:
core.not_supported()
return
else:
core.not_supported()
return
list_items = core.utils.generic_list_items(core, items)
core.kodi.xbmcplugin.addDirectoryItems(core.handle, list_items, len(list_items))
return items
def query(core, params):
no_auth_required_actions = ['popular', 'year', 'fan_picks', 'more_like_this', 'seasons', 'episodes', 'browse']
bool_response_actions = ['rate', 'unrate', 'add_to_list', 'remove_from_list', 'add_to_predefined_list', 'remove_from_predefined_list']
if params.type not in no_auth_required_actions and not __check_imdb_auth_config(core, params):
return
now = core.datetime.now()
releasedOnOrAfter = {}
if params.year:
releasedOnOrAfter['year'] = int(params.year)
if params.month:
releasedOnOrAfter['month'] = int(params.month)
if params.day:
releasedOnOrAfter['day'] = int(params.day)
releasedOnOrBefore = {}
if params.year_end:
releasedOnOrBefore['year'] = int(params.year_end)
elif params.year:
releasedOnOrBefore['year'] = int(params.year) + 1
if params.month_end:
releasedOnOrBefore['month'] = int(params.month_end)
if params.day_end:
releasedOnOrBefore['day'] = int(params.day_end)
page_size = core.kodi.get_int_setting('general.page_size')
lists_page_size = core.kodi.get_int_setting('general.lists_page_size')
requests = {
'popular': lambda: core.utils.get_graphql_query({
'query': '''
query fn($limit: Int!, $paginationToken: String, $popularTitlesQueryFilter: PopularTitlesQueryFilter!, $EXTRA_PARAMS) {
popularTitles(limit: $limit, paginationToken: $paginationToken, queryFilter: $popularTitlesQueryFilter) {
titles {
...Title
}
paginationToken
}
}
''',
'operationName': 'fn',
'variables': {
'limit': page_size,
'popularTitlesQueryFilter': {
'releaseDateRange': {'end': '%s-%s-%s' % (now.year, str(now.month).zfill(2), str(now.day).zfill(2))}
},
'paginationToken': params.paginationToken
}
}),
'year': lambda: core.utils.get_graphql_query({
'query': '''
query fn($limit: Int!, $paginationToken: String, $popularTitlesQueryFilter: PopularTitlesQueryFilter!, $EXTRA_PARAMS) {
popularTitles(limit: $limit, paginationToken: $paginationToken, queryFilter: $popularTitlesQueryFilter) {
titles {
...Title
}
paginationToken
}
}
''',
'operationName': 'fn',
'variables': {
'limit': page_size,
'popularTitlesQueryFilter': {
'releaseDateRange': {'start': '%s-01-01' % params.target_year, 'end': '%s-12-31' % params.target_year }
},
'paginationToken': params.paginationToken
}
}),
'fan_picks': lambda: core.utils.get_graphql_query({
'query': '''
query fn($first: Int!, $EXTRA_PARAMS) {
fanPicksTitles(first: $first) {
titles: edges {
node {
...Title
}
}
}
}
''',
'operationName': 'fn',
'variables': {
'first': 100,
}
}),
'more_like_this': lambda: core.utils.get_graphql_query({
'query': '''
query fn($id: ID!, $paginationToken: ID, $first: Int!, $EXTRA_PARAMS) {
title(id: $id) {
moreLikeThisTitles(first: $first, after: $paginationToken) {
titles: edges {
node {
...Title
}
}
pageInfo {
hasNextPage
endCursor
}
}
}
}
''',
'operationName': 'fn',
'variables': {
'id': params.id,
'paginationToken': params.paginationToken,
'first': page_size,
}
}),
'seasons': lambda: core.utils.get_graphql_query({
'query': '''
query fn($id: ID!, $paginationToken: ID, $EXTRA_PARAMS) {
title(id: $id) {
%s
...Seasons
}
}
''' % ('...TitleFull' if not params.paginationToken else ''),
'operationName': 'fn',
'variables': {
'id': params.id,
'paginationToken': params.paginationToken,
}
}),
'episodes': lambda: core.utils.get_graphql_query({
'query': '''
query fn($id: ID!, $episodesFilter: EpisodesFilter!, $EXTRA_PARAMS) {
title(id: $id) {
...Episodes
}
}
''',
'operationName': 'fn',
'variables': {
'id': params.id,
'episodesFilter': {
'releasedOnOrAfter': releasedOnOrAfter if len(releasedOnOrAfter) > 0 else None,
'releasedOnOrBefore': releasedOnOrBefore if len(releasedOnOrBefore) > 0 else None
}
}
}),
'knownfor': lambda: core.utils.get_graphql_query({
'query': '''
query fn($id: ID!, $limit: Int!, $paginationToken: ID, $EXTRA_PARAMS) {
name(id: $id) {
credits(first: $limit, after: $paginationToken, filter: { categories: ["actor", "actress", "director", "writer"], credited: CREDITED_ONLY }) {
titles: edges {
node {
title {
...Title
series {
series {
id
titleType {
id
}
titleText {
text
}
primaryImage {
url
width
height
type
}
}
}
}
}
}
pageInfo {
hasNextPage
endCursor
}
}
}
}
''',
'operationName': 'fn',
'variables': {
'id': params.id,
'limit': page_size,
'paginationToken': params.paginationToken,
}
}),
'browse': lambda: core.utils.get_graphql_query({
'query': '''
query fn($id: ID!, $EXTRA_PARAMS) {
title(id: $id) {
...TitleFull
}
}
''',
'operationName': 'fn',
'variables': {
'id': params.id,
}
}),
'top_picks': lambda: core.utils.get_graphql_query({
'query': '''
query fn($first: Int!, $paginationToken: ID, $EXTRA_PARAMS) {
topPicksTitles(first: $first, after: $paginationToken) {
titles: edges {
node {
...Title
}
}
pageInfo {
hasNextPage
endCursor
}
}
}
''',
'operationName': 'fn',
'variables': {
'first': page_size,
'paginationToken': params.paginationToken,
}
}),
'ratings': lambda: core.utils.get_graphql_query({
'query': '''
query fn($ids: [ID!]!, $EXTRA_PARAMS) {
titles(ids: $ids) {
userRating {
value
}
}
}
''',
'operationName': 'fn',
'variables': {
'ids': params.ids
}
}),
'watchlist': lambda: core.utils.get_graphql_query({
'query': '''
query fn($first: Int!, $paginationToken: ID, $EXTRA_PARAMS) {
predefinedList(classType: WATCH_LIST) {
items(first: $first, after: $paginationToken, sort: { by: CREATED_DATE, order: DESC }) {
titles: edges {
node {
item {
...Title
}
}
}
pageInfo {
hasNextPage
endCursor
}
}
}
}
''',
'operationName': 'fn',
'variables': {
'first': lists_page_size,
'paginationToken': params.paginationToken,
}
}),
'listid': lambda: core.utils.get_graphql_query({
'query': '''
query fn($classType: ListClassId!) {
predefinedList(classType: $classType) {
id
}
}
''',
'operationName': 'fn',
'variables': {
'classType': params.class_type,
}
}),
'lists': lambda: core.utils.get_graphql_query({
'query': '''
query fn($first: Int!, $paginationToken: ID, $EXTRA_PARAMS) {
lists(first: $first, after: $paginationToken, filter: { classTypes: [LIST] }) {
lists: edges {
node {
id
name {
originalText
}
listType {
id
}
}
}
pageInfo {
hasNextPage
endCursor
}
}
}
''',
'operationName': 'fn',
'variables': {
'first': page_size,
'paginationToken': params.paginationToken,
}
}),
'list': lambda: core.utils.get_graphql_query({
'query': '''
query fn($id: ID!, $first: Int!, $paginationToken: ID, $EXTRA_PARAMS) {
list(id: $id) {
items(first: $first, after: $paginationToken, sort: { by: CREATED_DATE, order: DESC }) {
titles: edges {
node {
item {
...Title
}
}
}
pageInfo {
hasNextPage
endCursor
}
}
}
}
''',
'operationName': 'fn',
'variables': {
'id': params.id,
'first': lists_page_size,
'paginationToken': params.paginationToken,
}
}),
'status': lambda: core.utils.get_graphql_query({
'query': '''
query fn($classTypes: [ListClassId!]!, $EXTRA_PARAMS) {
lists(first: 2, filter: { listElementType: TITLES, classTypes: $classTypes }) {
edges {
node {
%s
listClass {
id
}
}
}
}
}
''' % ('\n'.join(['%s: isElementInList(itemElementId: "%s")' % (id, id) for id in params.ids])),
'variables': {
'classTypes': params.class_types if params.class_types else ['WATCH_LIST']
}
}),
'rate': lambda: core.utils.get_graphql_query({
'query': __generate_mutation_query(
'%s: rateTitle(input: { rating: $rating, titleId: "%s" }) { rating { value } }',
params.titleids if params.titleids else [params.titleid],
vars='$rating: Int!'
),
'operationName': 'fn',
'variables': {
'rating': params.rating,
}
}),
'unrate': lambda: core.utils.get_graphql_query({
'query': __generate_mutation_query(
'%s: deleteTitleRating(input: { titleId: "%s" }) { date }',
params.titleids if params.titleids else [params.titleid]
),
'operationName': 'fn',
'variables': {}
}),
'add_to_list': lambda: core.utils.get_graphql_query({
'query': __generate_mutation_query(
'%s: addItemToList(input: { listId: $listId, item: { itemElementId: "%s" } }) { listId }',
params.titleids if params.titleids else [params.titleid],
vars='$listId: ID!'
),
'operationName': 'fn',
'variables': {
'listId': params.listid,
}
}),
'remove_from_list': lambda: core.utils.get_graphql_query({
'query': '''
mutation fn($listId: ID!, $titleId: ID!, $EXTRA_PARAMS) {
removeElementFromList(input: { listId: $listId, itemElementId: $titleId }) {
listId
}
}
''',
'operationName': 'fn',
'variables': {
'listId': params.listid,
'titleId': params.titleid,
}
}),
'add_to_predefined_list': lambda: core.utils.get_graphql_query({
'query': __generate_mutation_query(
'%s: addItemToPredefinedList(input: { classType: $classType, item: { itemElementId: "%s" } }) { listId }',
params.titleids if params.titleids else [params.titleid],
vars='$classType: ListClassId!'
),
'operationName': 'fn',
'variables': {
'classType': params.class_type,
}
}),
'remove_from_predefined_list': lambda: core.utils.get_graphql_query({
'query': __generate_mutation_query(
'%s: removeElementFromPredefinedList(input: { classType: $classType, itemElementId: "%s" }) { listId }',
params.titleids if params.titleids else [params.titleid],
vars='$classType: ListClassId!'
),
'operationName': 'fn',
'variables': {
'classType': params.class_type,
}
}),
}
if not requests.get(params.type, None):
core.not_supported()
return
request = requests[params.type]()
response = core.request.execute(core, request)
if response.status_code != 200:
if params.type in bool_response_actions:
return False
else:
__handle_request_error(core, params, response)
return []
parsed_response = core.json.loads(response.content)
if parsed_response.get('errors', None) is not None and isinstance(parsed_response['errors'], list):
errors = parsed_response['errors']
try: invalid_creds = params.type not in no_auth_required_actions and 'authenticat' in ' '.join(map(lambda v: v['message'].lower(), errors))
except: invalid_creds = False
if invalid_creds:
if params.type in bool_response_actions:
return False
else:
if not params.silent:
core.kodi.notification('Invalid IMDb authentication cookies')
core.utils.end_action(core, True)
return []
else:
if 'data' not in parsed_response:
if not params.retry and len(errors) > 0:
params.retry = True
return query(core, params)
else:
__handle_request_error(core, params, response)
return []
else:
core.logger.notice(errors)
if params.type in bool_response_actions:
return True
data = parsed_response['data']
typeKey = '%sTitles' % params.type
try:
if typeKey in data:
data = data[typeKey]
except:
__handle_request_error(core, params, response)
return []
data = core.utils.sanitize_response(data)
if data is None:
if not params.retry:
params.retry = True
return query(core, params)
else:
__handle_request_error(core, params)
return []
if params.type in ['status', 'listid', 'ratings']:
return data
elif params.type == 'lists':
if params.silent:
return data['lists']
core.viewType = core.kodi.get_setting('views.menu')
core.contentType = 'videos'
__add_lists(core, data)
elif params.type == 'seasons':
episodesData = data.get('episodes', data).get('episodes', data)
episodes = episodesData.get('edges', [])
pageInfo = episodesData.get('pageInfo', {})
hasNextPage = pageInfo.get('hasNextPage', None)
paginationToken = pageInfo.get('endCursor', None)
if hasNextPage:
params_copy = core.utils.DictAsObject(params.copy())
params_copy.paginationToken = paginationToken
nextEpisodes = query(core, params_copy)
episodes = episodes + nextEpisodes
if params.paginationToken:
return episodes
data['episodes']['episodes'] = episodes
if params.silent:
return data['episodes']['episodes']
core.contentType = 'seasons'
__add_seasons(core, data)
elif params.type == 'episodes':
core.contentType = 'episodes'
__add_episodes(core, data, int(params.season))
elif params.type == 'browse':
if data['titleType'] in ['tvEpisode']:
core.viewType = core.kodi.get_setting('views.episode')
if data['titleType'] in ['movie', 'tvMovie', 'tvEpisode', 'video']:
core.contentType = 'movies'
result = __add_title(core, data, params.silent)
if params.silent:
return result
else:
core.contentType = 'movies'
titles = data if isinstance(data, list) else data.get('titles', [])
if params.type == 'knownfor':
title_ids = {}
temp_titles = []
for title in titles:
if title['titleType'] == 'tvEpisode':
title = title['series']
if title_ids.get(title['id'], True):
title_ids[title['id']] = False
temp_titles.append(title)
titles = list(filter(lambda t: (t['titleType'] in ['movie', 'tvSeries']) and t.get('primaryImage', None), temp_titles))
pageInfo = data.get('pageInfo', {})
hasNextPage = pageInfo.get('hasNextPage', None)
paginationToken = pageInfo.get('endCursor', None)
if hasNextPage:
params_copy = core.utils.DictAsObject(params.copy())
params_copy.paginationToken = paginationToken
nextTitles = query(core, params_copy)
titles = titles + nextTitles
if params.paginationToken:
return titles
title_ids = {}
temp_titles = titles
titles = []
for title in temp_titles:
if title_ids.get(title['id'], True):
title_ids[title['id']] = False
titles.append(title)
data['pageInfo'] = None
__add_titles(core, titles, browse=None)
if isinstance(data, dict) and (data.get('paginationToken', None) or data.get('pageInfo', None) and data['pageInfo'].get('hasNextPage', False)):
next_list_item = core.kodi.xbmcgui.ListItem(label='Next', offscreen=True)
next_list_item.setInfo('video', {'mediatype': 'video'})
paginationToken = data.get('paginationToken', None)
if not paginationToken:
paginationToken = data['pageInfo']['endCursor']
url = '%s?action=query&type=%s&paginationToken=%s' % (core.url, params.type, paginationToken)
if params.id:
url += '&id=%s' % params.id
if params.target_year:
url += '&target_year=%s' % params.target_year
core.kodi.xbmcplugin.addDirectoryItem(core.handle, url, next_list_item, True)
return data if isinstance(data, list) else [data]
def profile(core, params):
if not __check_imdb_auth_config(core, params):
return
if params.type == 'check_imdb_auth':
request = {
'method': 'GET',
'url': 'https://www.imdb.com/registration/is-user-recognized',
}
request.update(core.utils.imdb_auth_request_props())
response = core.request.execute(core, request)
if response.status_code != 200:
core.utils.end_action(core, False)
core.kodi.notification('Failed to authenticate')
return
parsed_response = core.json.loads(response.content)
if not parsed_response.get('isUserRecognized', False):
core.utils.end_action(core, False)
core.kodi.notification('Failed to authenticate')
return
core.kodi.notification('Successfully authenticated')
elif params.type.startswith('watchlist_'):
params.ids = params.ids.split('__') if params.ids else None
if params.type == 'watchlist_add':
result = query(core, core.utils.DictAsObject({
'type': 'add_to_predefined_list',
'class_type': 'WATCH_LIST',
'titleid': params.id,
'titleids': params.ids,
'silent': params.silent,
}))
if result is True:
if not params.silent:
core.kodi.notification('%s added to watchlist' % params.id)
else:
if not params.silent:
core.kodi.notification('Failed to add %s to watchlist' % params.id)
core.utils.end_action(core, True)
return
elif params.type == 'watchlist_remove':
result = query(core, core.utils.DictAsObject({
'type': 'remove_from_predefined_list',
'class_type': 'WATCH_LIST',
'titleid': params.id,
'titleids': params.ids,
'silent': params.silent,
}))
if result is True:
if not params.silent:
core.kodi.notification('%s removed from watchlist' % params.id)
else:
if not params.silent:
core.kodi.notification('Failed to remove %s from watchlist' % params.id)
core.utils.end_action(core, True)
return
else:
core.not_supported()
return
elif params.type.startswith('mark_'):
params.ids = params.ids.split('__') if params.ids else None
if params.type == 'mark_as_watched':
mark_as_watched_rating = core.kodi.get_int_setting('general.mark_as_watched_rating')
result = query(core, core.utils.DictAsObject({
'type': 'rate',
'rating': mark_as_watched_rating,
'titleid': params.id,
'titleids': params.ids,
'silent': params.silent,
}))
if result is True:
if not params.silent:
core.kodi.notification('%s marked as watched' % params.id)
else:
if not params.silent:
core.kodi.notification('Failed to mark %s as watched' % params.id)
core.utils.end_action(core, True)
return
elif params.type == 'mark_as_unwatched':
result = query(core, core.utils.DictAsObject({
'type': 'unrate',
'titleid': params.id,
'titleids': params.ids,
'silent': params.silent,
}))
if result is True:
if not params.silent:
core.kodi.notification('%s marked as unwatched' % params.id)
else:
if not params.silent:
core.kodi.notification('Failed to mark %s as unwatched' % params.id)
core.utils.end_action(core, True)
return
else:
core.not_supported()
return
elif params.type == 'rate':
rating_selection = list(map(lambda v: str(v), range(1, 11)))
rating_selection.reverse()
selection = core.kodi.xbmcgui.Dialog().select(
'IMDb rating',
['Remove'] + rating_selection,
)
if selection == -1:
core.utils.end_action(core, True)
return
elif selection == 0:
result = query(core, core.utils.DictAsObject({
'type': 'unrate',
'titleid': params.id,
'silent': params.silent,
}))
if result is True:
if not params.silent:
core.kodi.notification('Rating removed for %s' % params.id)
else:
if not params.silent:
core.kodi.notification('Failed to remove rating for %s' % params.id)
core.utils.end_action(core, True)
return
else:
result = query(core, core.utils.DictAsObject({
'type': 'rate',
'rating': int(rating_selection[selection - 1]),
'titleid': params.id,
'silent': params.silent,
}))
if result is True:
if not params.silent:
core.kodi.notification('Rating set for %s' % params.id)
else:
if not params.silent:
core.kodi.notification('Failed to set rating for %s' % params.id)
core.utils.end_action(core, True)
return
elif params.type.startswith('list_'):
params.ids = params.ids.split('__') if params.ids else None
if params.imdb_list:
imdb_list = params.imdb_list
else:
lists = query(core, core.utils.DictAsObject({ 'type': 'lists', 'silent': True }))
lists.sort(key=lambda v: v['name'])
selection = core.kodi.xbmcgui.Dialog().select(
'IMDb lists',
[imdb_list['name'] for imdb_list in lists],
)
if selection == -1:
core.utils.end_action(core, True)
return
imdb_list = lists[selection]
if params.type == 'list_add':
result = query(core, core.utils.DictAsObject({
'type': 'add_to_list',
'listid': imdb_list['id'],
'titleid': params.id,
'titleids': params.ids,
'silent': params.silent,
}))
if result is True:
if not params.silent:
core.kodi.notification('%s added to %s' % (params.id, imdb_list['name']))
else:
if not params.silent:
core.kodi.notification('Failed to add %s to %s' % (params.id, imdb_list['name']))
core.utils.end_action(core, True)
return
elif params.type == 'list_remove':
result = query(core, core.utils.DictAsObject({
'type': 'remove_from_list',
'listid': imdb_list['id'],
'titleid': params.id,
'silent': params.silent,
}))
if result is True:
if not params.silent:
core.kodi.notification('%s removed from %s' % (params.id, imdb_list['name']))
else:
if not params.silent:
core.kodi.notification('Failed to remove %s from %s' % (params.id, imdb_list['name']))
core.utils.end_action(core, True)
return
else:
core.not_supported()
return
else:
core.not_supported()
return
if not params.silent:
core.utils.end_action(core, True)
return True
def trailer(core, params):
if not params.id:
core.kodi.notification('Trailer not found')
core.utils.end_action(core, False)
return
if params.play == 'true':
core.kodi.open_busy_dialog()
request = {
'method': 'GET',
'url': 'https://www.imdb.com/ve/data/VIDEO_PLAYBACK_DATA',
'params': {
'key': core.base64.b64encode(core.json.dumps({ 'type': 'VIDEO_PLAYER', 'subType': 'FORCE_LEGACY', 'id': params.id }).encode('ascii'))
},
'headers': {
'content-type': 'application/json',
},
}
response = core.request.execute(core, request)
if response.status_code != 200:
core.kodi.close_busy_dialog()
core.utils.end_action(core, False)
core.logger.notice(response.text)
core.kodi.notification('Trailer not found')
return
parsed_response = core.json.loads(response.content)
try:
all = parsed_response[0]['videoLegacyEncodings']
filtered = filter(lambda v: v['definition'] != 'AUTO', all)
trailerUrl = next(iter(filtered), iter(all))['url']
except:
core.kodi.close_busy_dialog()
core.utils.end_action(core, False)
core.kodi.notification('Trailer not found')
return []
item = core.kodi.xbmcgui.ListItem(path=trailerUrl, offscreen=True)
item.setInfo('video', {'mediatype': 'video'})
if params.play == 'true':
core.kodi.close_busy_dialog()
core.kodi.xbmc.Player().play(item=trailerUrl, listitem=item)
else:
core.utils.end_action(core, True, item)
return [trailerUrl]
def cache_sources(core, params, results=None):
pm_apikey = core.utils.get_premiumize_apikey(core)
rd_apikey = core.utils.get_realdebrid_apikey(core)
ad_apikey = core.utils.get_alldebrid_apikey(core)
if (not pm_apikey or pm_apikey == '') and (not rd_apikey or rd_apikey == '') and (not ad_apikey or ad_apikey == ''):
core.kodi.notification('Missing debrid service API key')
core.utils.end_action(core, True)
return
if not results:
results = play(core, core.utils.DictAsObject({ 'id': params.id, 'cache_sources': True }))
if not results:
core.kodi.notification('Something went wrong. Check logs')
return
if len(results) == 0:
core.kodi.notification('No sources found')
return
debrid = []
debrid_map = {
'Premiumize': 'PM',
'RealDebrid': 'RD',
'AllDebrid': 'AD',
}
if pm_apikey:
debrid.append('Premiumize')
if rd_apikey:
debrid.append('RealDebrid')
if ad_apikey:
debrid.append('AllDebrid')
if len(debrid) == 1:
selection = 0
else:
selection = core.kodi.xbmcgui.Dialog().select(
'Choose Debrid',
debrid,
)
if selection == -1:
return
debrid = debrid_map[debrid[selection]]
selection = None
while(selection != -1):
results_keys = list(results.keys())
def sorter():
return lambda x: (
-int(results[x].get('seeds', 0)),
not results[x]['quality'] == '4K',
not results[x]['quality'] == '1080P',
not results[x]['quality'] == '720P',
not results[x]['quality'] == 'SD',
not results[x]['quality'] == 'CAM',
-results[x]['size'],
not results[x]['hdr'] == 'HDR',
not results[x]['videocodec'] == 'H265',
'TRUEHD' not in results[x]['audiocodec'],
'DTS' not in results[x]['audiocodec'],
'ATMOS' not in results[x]['audiocodec'],
'HD-MA' not in results[x]['audiocodec'],
results[x]['release_title'],
)
results_keys = sorted(results_keys, key=sorter())
selection = core.kodi.xbmcgui.Dialog().select(
'Choose source to cache',
['Seeds: %s | %s' % (results[key]['seeds'], results[key]['title']) for key in results_keys],
)
if selection > -1:
result = results[results_keys[selection]]
def cache_to_pm():
request = core.debrid.premiumize_cache(pm_apikey, result['magnet'])
response = core.request.execute(core, request)
if response.status_code != 200:
__handle_request_error(core, params, response)
return False
parsed_response = core.json.loads(response.content)
status = parsed_response.get('status', None)
error = parsed_response.get('error', None)
if status != 'success' and (status != 'error' or error != 'duplicate'):
__handle_request_error(core, params, response)
return False
if error == 'duplicate':
core.kodi.notification('%s transfer is already added' % debrid)
else:
core.kodi.notification('%s transfer created: %s' % (debrid, result['hash']))
return True
def cache_to_rd():
auth = core.utils.rd_auth_query_params(core, rd_apikey)
request = core.debrid.realdebrid_cache(auth, result['magnet'])
response = core.request.execute(core, request)
if response.status_code != 201:
__handle_request_error(core, params, response)
return False
parsed_response = core.json.loads(response.content)
id = parsed_response['id']
request = core.debrid.realdebrid_select(auth, id)
response = core.request.execute(core, request)
if response.status_code != 204:
__handle_request_error(core, params, response)
return False
core.kodi.notification('%s transfer created: %s' % (debrid, result['hash']))
return True
def cache_to_ad():
auth = core.utils.ad_auth_query_params(core, ad_apikey)
request = core.debrid.alldebrid_cache(auth, result['hash'])
response = core.request.execute(core, request)
if response.status_code != 200:
__handle_request_error(core, params, response)
return False
parsed_response = core.json.loads(response.content)
status = parsed_response.get('status', None)
if status != 'success':
__handle_request_error(core, params, response)
return False
core.kodi.notification('%s transfer created: %s' % (debrid, result['hash']))
return True
def cache():
if debrid == 'PM':
return cache_to_pm()
elif debrid == 'RD':
return cache_to_rd()
elif debrid == 'AD':
return cache_to_ad()
if not cache():
continue
results.pop(results_keys[selection])
@with_goto
def play(core, params):
general = core.cache.get_general()
if not params.cache_sources and (general.last_action_time and (core.utils.time_ms() - general.last_action_time) < 2000):
general.last_action_time = core.utils.time_ms()
core.cache.save_general(general)
core.utils.end_action(core, True)
return
else:
general.last_action_time = core.utils.time_ms()
core.cache.save_general(general)
if params.type == 'tvSeries':
core.kodi.notification('Select season instead')
core.utils.end_action(core, True)
return
provider_meta = core.provider_meta(core)
if not provider_meta.name:
core.kodi.notification('Provider not installed')
core.utils.end_action(core, True)
return
pm_apikey = core.utils.get_premiumize_apikey(core)
rd_apikey = core.utils.get_realdebrid_apikey(core)
ad_apikey = core.utils.get_alldebrid_apikey(core)
if (not pm_apikey or pm_apikey == '') and (not rd_apikey or rd_apikey == '') and (not ad_apikey or ad_apikey == ''):
core.kodi.notification('Missing debrid service API key')
core.utils.end_action(core, True)
return
provider_params = core.utils.DictAsObject({})
provider_params.type = 'search'
if not params.type:
if not params.id:
core.kodi.notification('Missing title id')
core.utils.end_action(core, True)
return
last_title = core.cache.get_last_title()
if params.id in last_title and last_title[params.id]:
params.type = last_title[params.id]
else:
params.type = query(core, core.utils.DictAsObject({ 'type': 'browse', 'id': params.id, 'silent': True }))
last_title = {}
last_title[params.id] = params.type
core.cache.save_last_title(last_title)
provider_params.title = core.utils.DictAsObject(core.json.loads(core.base64.b64decode(params.type)))
if not provider_params.title:
core.kodi.notification('Something went wrong. Check logs')
core.utils.end_action(core, True)
return
if provider_params.title.tvshowid:
provider_params.title.tvshowseasonid = '%s_%s' % (provider_params.title.tvshowid, provider_params.title.season)
provider_params.start_time = core.utils.time_ms()
last_results = core.cache.get_last_results()
results = {}
if not params.cache_sources:
try:
if provider_params.title.imdbnumber in last_results:
results.update(last_results[provider_params.title.imdbnumber]['results'])
last_results[provider_params.title.imdbnumber]['time'] = core.time.time()
if provider_params.title.tvshowseasonid in last_results:
results.update(last_results[provider_params.title.tvshowseasonid]['results'])
last_results[provider_params.title.tvshowseasonid]['time'] = core.time.time()
if provider_params.title.tvshowid in last_results:
results.update(last_results[provider_params.title.tvshowid]['results'])
last_results[provider_params.title.tvshowid]['time'] = core.time.time()
core.cache.save_last_results(last_results)
except:
if provider_params.title.imdbnumber in last_results:
last_results.pop(provider_params.title.imdbnumber)
if provider_params.title.tvshowseasonid in last_results:
last_results.pop(provider_params.title.tvshowseasonid)
if provider_params.title.tvshowid in last_results:
last_results.pop(provider_params.title.tvshowid)
if len(results) > 0:
for key in results:
results[key]['ref'] = provider_params.title
else:
provider = core.provider(core, provider_params)
if params.cache_sources:
return provider.results
if len(provider.cached) == 0:
core.kodi.notification('No sources found')
if len(provider.results) > 0 and pm_apikey:
confirmed = core.kodi.xbmcgui.Dialog().yesno(
'Uncached sources found',
'Found %s uncached sources. Do you want to some of them to debrid?' % len(provider.results),
nolabel='No',
yeslabel='Yes'
)
if confirmed:
cache_sources(core, params, provider.results)
general.last_action_time = core.utils.time_ms()
core.cache.save_general(general)
core.utils.end_action(core, True)
return
results = provider.cached
all_results = {}
season_results = {}
pack_results = {}
for key in results:
result = results[key].copy()
result.pop('ref')
all_results[key] = result
if result['package'] == 'season':
season_results[key] = result
elif result['package'] == 'show':
pack_results[key] = result
last_results[provider_params.title.imdbnumber] = {
'time': core.time.time(),
'results': all_results
}
if provider_params.title.tvshowid:
if len(season_results) > 0:
last_results[provider_params.title.tvshowseasonid] = {
'time': core.time.time(),
'results': season_results
}
if len(pack_results) > 0:
last_results[provider_params.title.tvshowid] = {
'time': core.time.time(),
'results': pack_results
}
all_results = None
season_results = None
pack_results = None
while len(last_results) > 10:
oldest_key = list(last_results.keys())[0]
for key in last_results:
if last_results[key]['time'] < last_results[oldest_key]['time']:
oldest_key = key
last_results.pop(oldest_key)
core.cache.save_last_results(last_results)
results_keys = list(results.keys())
def sorter():
return lambda x: (
not results[x]['quality'] == '4K',
not results[x]['quality'] == '1080P',
not results[x]['quality'] == '720P',
not results[x]['quality'] == 'SD',
not results[x]['quality'] == 'CAM',
-results[x]['size'],
not results[x]['hdr'] == 'HDR',
not results[x]['videocodec'] == 'H265',
'TRUEHD' not in results[x]['audiocodec'],
'DTS' not in results[x]['audiocodec'],
'ATMOS' not in results[x]['audiocodec'],
'HD-MA' not in results[x]['audiocodec'],
results[x]['release_title'],
)
results_keys = sorted(results_keys, key=sorter())
max_quality = core.kodi.get_int_setting('general.max_quality') + 1
quality_list = ['4K', '1080P', '720P', 'SD']
excluded_quality = quality_list[:len(quality_list) - max_quality]
if len(excluded_quality) > 0:
results_keys_filtered = [key for key in results_keys if results[key]['quality'] not in excluded_quality]
if len(results_keys_filtered) > 0:
results_keys = results_keys_filtered
else:
core.kodi.notification('No results for specified quality. Showing all results.')
if provider_params.title.mediatype == 'movie':
max_movie_size = core.kodi.get_int_setting('general.max_movie_size')
results_keys_filtered = [key for key in results_keys if results[key]['size'] <= max_movie_size]
if len(results_keys_filtered) > 0:
results_keys = results_keys_filtered
else:
core.kodi.notification('No results for specified movie size. Showing all results.')
result_style = '[LIGHT]%s[/LIGHT]'
autoplay = core.kodi.get_bool_setting('general.autoplay') and not params.force_sourceselect
selection = 1
label .selection # type: ignore # noqa: F821
if not autoplay:
selection = core.kodi.xbmcgui.Dialog().select(
'Choose source',
[__action_menu_style % 'New Search'] + [result_style % results[key].get('title_with_debrid', results[key]['title']) for key in results_keys],
preselect=selection
)
if selection == -1:
general.last_action_time = core.utils.time_ms()
core.cache.save_general(general)
core.utils.end_action(core, True)
return
elif selection == 0:
if provider_params.title.imdbnumber in last_results:
last_results.pop(provider_params.title.imdbnumber)
if provider_params.title.tvshowseasonid in last_results:
last_results.pop(provider_params.title.tvshowseasonid)
if provider_params.title.tvshowid in last_results:
last_results.pop(provider_params.title.tvshowid)
core.cache.save_last_results(last_results)
general.last_action_time = None
core.cache.save_general(general)
return play(core, params)
else:
selection -= 1
label .afterselection # type: ignore # noqa: F821
result = results[results_keys[selection]]
video_ext = list(map(lambda v: '.%s' % v.upper(), core.utils.video_containers()))
size = 1048576 * 100
def util_filter_episodes(files, propName):
season_zfill = str(result['ref'].season).zfill(2)
episode_zfill = str(result['ref'].episode).zfill(2)
episode_zfill_3 = episode_zfill.zfill(3)
season = 'S%s' % season_zfill
episode = 'E%s' % episode_zfill
episode_0 = 'E0%s' % episode_zfill
matches = [
' %s%s ' % (season, episode),
' %s%s ' % (season, episode_0),
' %s %s ' % (season, episode),
' %s %s ' % (season, episode_0),
' %sX%s ' % (season_zfill, episode_zfill),
' %sX%s ' % (season_zfill, episode_zfill_3),
' %sX%s ' % (season, episode_zfill),
' %sX%s ' % (season, episode_zfill_3),
' %s%s ' % (result['ref'].season, episode_zfill),
' %s%s ' % (result['ref'].season, episode_zfill_3),
' %s%s ' % (season_zfill, episode_zfill),
' %s%s ' % (season_zfill, episode_zfill_3),
' %s %s ' % (result['ref'].season, episode_zfill),
' %s %s ' % (result['ref'].season, episode_zfill_3),
' %s %s ' % (season_zfill, episode_zfill),
' %s %s ' % (season_zfill, episode_zfill_3),
]
return list(filter(lambda file: any(match in core.utils.clean_release_title(file[propName]) for match in matches), files))
def resolve_pm():
request = core.debrid.premiumize_resolve(pm_apikey, result['magnet'])
response = core.request.execute(core, request)
parsed_response = core.json.loads(response.content)
return parsed_response.get('content', [])
def resolve_rd(resolve_files='videos'):
auth = core.utils.rd_auth_query_params(core, rd_apikey)
request = core.debrid.realdebrid_cache(auth, result['magnet'])
response = core.request.execute(core, request)
parsed_response = core.json.loads(response.content)
if 'id' not in parsed_response:
if 'error' in parsed_response and parsed_response['error'] == 'permission_denied':
core.kodi.notification('RD subscription expired')
return []
id = parsed_response['id']
uri = parsed_response['uri']
files = []
try:
all_files = result['debrid_files'].keys()
file_ids = [] if resolve_files != 'all' else all_files
if resolve_files == 'videos':
title_name = provider_params.title.title.lower()
has_mt2s = False
for file_id in result['debrid_files'].keys():
file = result['debrid_files'][file_id]
ext = core.os.path.splitext(file['filename'])[1].upper()
is_video = ext in video_ext
if ext == '.M2TS':
has_mt2s = True
break
is_enough_size = int(file['filesize']) > size
is_sample = 'sample' not in title_name and 'sample' in file['filename'].lower()
if is_video and is_enough_size and not is_sample:
file_ids.append(file_id)
if has_mt2s:
fsize = None
fid = None
for file_id in result['debrid_files'].keys():
file = result['debrid_files'][file_id]
if fsize is None or fsize < int(file['filesize']):
fsize = int(file['filesize'])
fid = file_id
if fid:
file_ids = [fid]
if result['ref'].mediatype == 'episode' and (len(file_ids) == 0 or resolve_files == 'exact'):
resolve_files = 'exact'
episodes = []
for file_id in result['debrid_files'].keys():
episodes.append({
'id': file_id,
'filename': result['debrid_files'][file_id]['filename']
})
episodes = util_filter_episodes(episodes, 'filename')
for ep in episodes:
file_ids.append(ep['id'])
if len(file_ids) == 0:
return files
request = core.debrid.realdebrid_select(auth, id, files=','.join(file_ids))
response = core.request.execute(core, request)
request = {
'method': 'GET',
'url': '%s%s' % (uri, auth)
}
response = core.request.execute(core, request)
parsed_response = core.json.loads(response.content)
if len(parsed_response['links']) == 0:
if resolve_files == 'videos':
request = core.debrid.realdebrid_delete(auth, id)
core.request.execute(core, request)
if len(file_ids) < len(all_files):
return resolve_rd(resolve_files='all')
elif result['ref'].mediatype == 'episode':
return resolve_rd(resolve_files='exact')
else:
return files
elif resolve_files == 'all' and result['ref'].mediatype == 'episode':
request = core.debrid.realdebrid_delete(auth, id)
core.request.execute(core, request)
return resolve_rd(resolve_files='exact')
else:
return files
selected_files = []
for file in parsed_response['files']:
if file.get('selected', None):
selected_files.append(file)
for i, file in enumerate(selected_files):
if i < len(parsed_response['links']):
files.append({
'path': file['path'],
'size': file['bytes'],
'link': parsed_response['links'][i]
})
finally:
def delete_magnet():
request = core.debrid.realdebrid_delete(auth, id)
core.request.execute(core, request)
core.threading.Thread(target=delete_magnet).start()
return files
def resolve_ad():
auth = core.utils.ad_auth_query_params(core, ad_apikey)
request = core.debrid.alldebrid_cache(auth, result['hash'])
response = core.request.execute(core, request)
parsed_response = core.json.loads(response.content)
id = parsed_response.get('data', parsed_response)['magnets'][0]['id']
files = []
try:
request = core.debrid.alldebrid_files(auth, id)
response = core.request.execute(core, request)
parsed_response = core.json.loads(response.content)
magnet = parsed_response.get('data', parsed_response)['magnets']
if isinstance(magnet, list):
magnet = magnet[0]
links = magnet['links']
if not links:
links = []
for file in links:
files.append({
'path': file['filename'],
'size': file['size'],
'link': file['link']
})
finally:
def delete_magnet():
request = core.debrid.alldebrid_delete(auth, id)
core.request.execute(core, request)
core.threading.Thread(target=delete_magnet).start()
return files
link = result.get('url', None)
if link:
goto .play # type: ignore # noqa: F821
files = []
if result.get('debrid', 'PM') == 'PM':
try: files = resolve_pm()
except:
core.logger.notice(core.traceback.format_exc())
elif result['debrid'] == 'RD':
try: files = resolve_rd()
except:
core.logger.notice(core.traceback.format_exc())
elif result['debrid'] == 'AD':
try: files = resolve_ad()
except:
core.logger.notice(core.traceback.format_exc())
if len(files) > 0:
for file in files:
if file.get('path', None):
file['path'] = core.os.path.basename(file['path']).upper()
video_files = list(filter(lambda v: core.os.path.splitext(v['path'])[1] in video_ext and int(v['size']) > size, files))
if len(video_files) > 0:
files = video_files
filtered = False
try:
if len(files) > 1 and result['ref'].mediatype == 'episode':
episodes = util_filter_episodes(files, 'path')
if len(episodes) > 0:
files = episodes
filtered = True
except Exception as e:
core.logger.notice(e)
if len(files) > 1 or (len(files) == 1 and not filtered and result['package'] in ['show', 'season']):
file_results = {}
for file in files:
file_result = {
'ref': core.utils.DictAsObject(result['ref']),
'release_title': core.os.path.basename(file['path']),
'size': round(float(file['size']) / 1024 / 1024 / 1024, 1),
'link': file.get('link', file.get('stream_link', None)),
}
core.utils.cleanup_result(file_result, no_meta=True)
file_results[file_result['title']] = file_result
file_result_keys = list(file_results.keys())
file_result_keys.sort()
file_result_keys = sorted(file_result_keys, key=lambda v: file_results[v]['release_title'])
file_selection = core.kodi.xbmcgui.Dialog().select(
'Choose file',
[result_style % key for key in file_result_keys],
)
if file_selection > -1:
file = file_results[file_result_keys[file_selection]]
link = file['link']
elif file_selection == -1:
selection += 1
goto .selection # type: ignore # noqa: F821
elif len(files) == 1:
file = files[0]
link = file.get('link', file.get('stream_link', None))
if not link:
selection += 1
if selection < len(results_keys) and autoplay:
goto .afterselection # type: ignore # noqa: F821
else:
core.kodi.notification('Failed to resolve debrid')
goto .selection # type: ignore # noqa: F821
if result.get('debrid', 'PM') == 'RD':
try:
auth = core.utils.rd_auth_query_params(core, rd_apikey)
request = core.debrid.realdebrid_resolve(auth, link)
response = core.request.execute(core, request)
parsed_response = core.json.loads(response.content)
link = parsed_response['download']
except:
core.logger.notice(core.traceback.format_exc())
elif result.get('debrid', 'PM') == 'AD':
try:
auth = core.utils.ad_auth_query_params(core, ad_apikey)
request = core.debrid.alldebrid_resolve(auth, link)
response = core.request.execute(core, request)
parsed_response = core.json.loads(response.content)
link = parsed_response['data']['link']
except:
core.logger.notice(core.traceback.format_exc())
label .play # type: ignore # noqa: F821
item = core.kodi.xbmcgui.ListItem(path=link, offscreen=True)
item.setProperty('IsPlayable', 'true')
item.setContentLookup(False)
video_meta = provider_params.title.copy()
video_meta.pop('tvshowseasonid', None)
video_meta.pop('tvshowid', None)
video_meta.pop('seasons', None)
video_meta.pop('is_airing', None)
video_meta.pop('poster', None)
if provider_params.title.poster:
item.setArt({ 'poster': provider_params.title.poster })
item.setInfo('video', video_meta)
item.addStreamInfo('video', { 'codec': result['videocodec'], 'duration': result['ref'].duration })
core.utils.end_action(core, True, item)
return link
|
__init__.py
|
#!/usr/bin/python
import threading
import serial
import time
import os
if os.name == "posix":
import fcntl
class DMX_Serial:
def __init__(self, port="/dev/ttyUSB0"):
if isinstance(port, str):
self.ser = serial.Serial(port)
else:
self.ser = port
self.ser.baudrate = 250000
self.ser.bytesize = serial.EIGHTBITS
self.ser.parity = serial.PARITY_NONE
self.ser.stopbits = serial.STOPBITS_TWO
self.ser.xonoff = False
self.enabled = False
self.data = bytes((0,)*512)
self.nextdata = None
self.send_thread = threading.Thread(target=self.sender)
self.send_thread.daemon = True
self.send_thread.start()
def start(self):
self.enabled = True
def stop(self):
self.enabled = False
def sender(self):
while True:
if not(self.enabled):
continue
if os.name == "posix":
# Linux does not have proper support for variable length breaks, as the behavior of TCSBRK is
# undefined for values other than 0. (http://man7.org/linux/man-pages/man2/ioctl_tty.2.html)
# Instead this controls the timing of the break directly.
fcntl.ioctl(self.desc, 0x5427) # Yeah, it's magic. Start Break (TIOCSBRK)
time.sleep(0.0001)
fcntl.ioctl(self.desc, 0x5428) # Yeah, it's magic. End Break (TIOCCBRK)
else:
self.ser.send_break(0.0001)
self.ser.write(bytes((0,)))
self.ser.write(self.data)
self.ser.flush()
if self.nextdata:
self.data = self.nextdata
self.nextdata = None
def set_data(self, data):
self.nextdata = data
|
utils.py
|
import wave
import sys
import struct
import time
import subprocess
import threading
import traceback
import shlex
import os
import string
import random
import datetime as dt
import numpy as np
import scipy as sp
import scipy.special
from contextlib import closing
from argparse import ArgumentParser
from pyoperant import Error
try:
import simplejson as json
except ImportError:
import json
class NumpyAwareJSONEncoder(json.JSONEncoder):
""" this json encoder converts numpy arrays to lists so that json can write them.
example usage:
>>> import numpy as np
>>> dict_to_save = {'array': np.zeros((5,))}
>>> json.dumps(dict_to_save,
cls=NumpyAwareJSONEncoder
)
'{"array": [0.0, 0.0, 0.0, 0.0, 0.0]}'
"""
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
# consider importing this from python-neo
class Event(object):
"""docstring for Event"""
def __init__(self, time=None, duration=None, label='', name=None, description=None, file_origin=None, *args, **kwargs):
super(Event, self).__init__()
self.time = time
self.duration = duration
self.label = label
self.name = name
self.description = description
self.file_origin = file_origin
self.annotations = {}
self.annotate(**kwargs)
def annotate(self,**kwargs):
self.annotations.update(kwargs)
class Stimulus(Event):
"""docstring for Stimulus"""
def __init__(self, *args, **kwargs):
super(Stimulus, self).__init__(*args, **kwargs)
if self.label=='':
self.label = 'stimulus'
class AuditoryStimulus(Stimulus):
"""docstring for AuditoryStimulus"""
def __init__(self, *args, **kwargs):
super(AuditoryStimulus, self).__init__(*args, **kwargs)
if self.label=='':
self.label = 'auditory_stimulus'
def run_state_machine(start_in='pre', error_state=None, error_callback=None, **state_functions):
"""runs a state machine defined by the keyword arguments
>>> def run_start():
>>> print "in 'run_start'"
>>> return 'next'
>>> def run_next():
>>> print "in 'run_next'"
>>> return None
>>> run_state_machine(start_in='start',
>>> start=run_start,
>>> next=run_next)
in 'run_start'
in 'run_next'
None
"""
# make sure the start state has a function to run
assert (start_in in state_functions.keys())
# make sure all of the arguments passed in are callable
for func in state_functions.values():
assert hasattr(func, '__call__')
state = start_in
while state is not None:
try:
state = state_functions[state]()
except Exception, e:
if error_callback:
error_callback(e)
raise
else:
raise
state = error_state
class Trial(Event):
"""docstring for Trial"""
def __init__(self,
index=None,
type_='normal',
class_=None,
*args, **kwargs):
super(Trial, self).__init__(*args, **kwargs)
self.label = 'trial'
self.session = None
self.index = index
self.type_ = type_
self.stimulus = None
self.class_ = class_
self.response = None
self.correct = None
self.rt = None
self.reward = False
self.punish = False
self.events = []
self.stim_event = None
class Command(object):
"""
Enables to run subprocess commands in a different thread with TIMEOUT option.
via https://gist.github.com/kirpit/1306188
Based on jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
"""
command = None
process = None
status = None
output, error = '', ''
def __init__(self, command):
if isinstance(command, basestring):
command = shlex.split(command)
self.command = command
def run(self, timeout=None, **kwargs):
""" Run a command then return: (status, output, error). """
def target(**kwargs):
try:
self.process = subprocess.Popen(self.command, **kwargs)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
except:
self.error = traceback.format_exc()
self.status = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.PIPE
# thread
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.status, self.output, self.error
def parse_commandline(arg_str=sys.argv[1:]):
""" parse command line arguments
note: optparse is depreciated w/ v2.7 in favor of argparse
"""
parser=ArgumentParser()
parser.add_argument('-B', '--box',
action='store', type=int, dest='box', required=False,
help='(int) box identifier')
parser.add_argument('-S', '--subject',
action='store', type=str, dest='subj', required=False,
help='subject ID and folder name')
parser.add_argument('-c','--config',
action='store', type=str, dest='config_file', default='config.json', required=True,
help='configuration file [default: %(default)s]')
args = parser.parse_args(arg_str)
return vars(args)
def check_cmdline_params(parameters, cmd_line):
# if someone is using red bands they should ammend the checks I perform here
allchars=string.maketrans('','')
nodigs=allchars.translate(allchars, string.digits)
if not ('box' not in cmd_line or cmd_line['box'] == int(parameters['panel_name'].encode('ascii','ignore').translate(allchars, nodigs))):
print "box number doesn't match config and command line"
return False
if not ('subj' not in cmd_line or int(cmd_line['subj'].encode('ascii','ignore').translate(allchars, nodigs)) == int(parameters['subject'].encode('ascii','ignore').translate(allchars, nodigs))):
print "subject number doesn't match config and command line"
return False
return True
def time_in_range(start, end, x):
"""Return true if x is in the range [start, end]"""
if start <= end:
return start <= x <= end
else:
return start <= x or x <= end
def is_day((latitude, longitude) = ('32.82', '-117.14')):
"""Is it daytime?
(lat,long) -- latitude and longitude of location to check (default is San Diego*)
Returns True if it is daytime
* Discovered by the Germans in 1904, they named it San Diego,
which of course in German means a whale's vagina. (Burgundy, 2004)
"""
import ephem
obs = ephem.Observer()
obs.lat = latitude # San Diego, CA
obs.long = longitude
sun = ephem.Sun()
sun.compute()
next_sunrise = ephem.localtime(obs.next_rising(sun))
next_sunset = ephem.localtime(obs.next_setting(sun))
return next_sunset < next_sunrise
def check_time(schedule,fmt="%H:%M"):
""" determine whether trials should be done given the current time and the light schedule
returns Boolean if current time meets schedule
schedule='sun' will change lights according to local sunrise and sunset
schedule=[('07:00','17:00')] will have lights on between 7am and 5pm
schedule=[('06:00','12:00'),('18:00','24:00')] will have lights on between
"""
if schedule == 'sun':
if is_day():
return True
else:
for epoch in schedule:
assert len(epoch) is 2
now = dt.datetime.time(dt.datetime.now())
start = dt.datetime.time(dt.datetime.strptime(epoch[0],fmt))
end = dt.datetime.time(dt.datetime.strptime(epoch[1],fmt))
if time_in_range(start,end,now):
return True
return False
def wait(secs=1.0, final_countdown=0.0,waitfunc=None):
"""Smartly wait for a given time period.
secs -- total time to wait in seconds
final_countdown -- time at end of secs to wait and constantly poll the clock
waitfunc -- optional function to run in a loop during hogCPUperiod
If secs=1.0 and final_countdown=0.2 then for 0.8s python's time.sleep function will be used,
which is not especially precise, but allows the cpu to perform housekeeping. In
the final hogCPUsecs the more precise method of constantly polling the clock
is used for greater precision.
"""
#initial relaxed period, using sleep (better for system resources etc)
if secs > final_countdown:
time.sleep(secs-final_countdown)
secs = final_countdown # only this much is now left
#It's the Final Countdown!!
#hog the cpu, checking time
t0 = time.time()
while (time.time()-t0) < secs:
#let's see if any events were collected in meantime
try:
waitfunc()
except:
pass
def auditory_stim_from_wav(wav):
with closing(wave.open(wav,'rb')) as wf:
(nchannels, sampwidth, framerate, nframes, comptype, compname) = wf.getparams()
duration = float(nframes)/sampwidth
duration = duration * 2.0 / framerate
stim = AuditoryStimulus(time=0.0,
duration=duration,
name=wav,
label='wav',
description='',
file_origin=wav,
annotations={'nchannels': nchannels,
'sampwidth': sampwidth,
'framerate': framerate,
'nframes': nframes,
'comptype': comptype,
'compname': compname,
}
)
return stim
def concat_wav(input_file_list, output_filename='concat.wav'):
""" concat a set of wav files into a single wav file and return the output filename
takes in a tuple list of files and duration of pause after the file
input_file_list = [
('a.wav', 0.1),
('b.wav', 0.09),
('c.wav', 0.0),
]
returns a list of AuditoryStimulus objects
TODO: add checks for sampling rate, number of channels, etc.
"""
cursor = 0
epochs = [] # list of file epochs
audio_data = ''
with closing(wave.open(output_filename, 'wb')) as output:
for input_filename, isi in input_file_list:
# read in the wav file
with closing(wave.open(input_filename,'rb')) as wav_part:
try:
params = wav_part.getparams()
output.setparams(params)
fs = output.getframerate()
except: # TODO: what was I trying to except here? be more specific
pass
audio_frames = wav_part.readframes(wav_part.getnframes())
# append the audio data
audio_data += audio_frames
part_start = cursor
part_dur = len(audio_frames)/params[1]
epochs.append(AuditoryStimulus(time=float(part_start)/fs,
duration=float(part_dur)/fs,
name=input_filename,
file_origin=input_filename,
annotations=params,
label='motif'
))
cursor += part_dur # move cursor length of the duration
# add isi
if isi > 0.0:
isi_frames = ''.join([struct.pack('h', fr) for fr in [0]*int(fs*isi)])
audio_data += isi_frames
cursor += len(isi_frames)/params[1]
# concat all of the audio together and write to file
output.writeframes(audio_data)
description = 'concatenated on-the-fly'
concat_wav = AuditoryStimulus(time=0.0,
duration=epochs[-1].time+epochs[-1].duration,
name=output_filename,
label='wav',
description=description,
file_origin=output_filename,
annotations=output.getparams(),
)
return (concat_wav,epochs)
def get_num_open_fds():
'''
return the number of open file descriptors for current process
.. warning: will only work on UNIX-like os-es.
'''
pid = os.getpid()
procs = subprocess.check_output(
[ "lsof", '-w', '-Ff', "-p", str( pid ) ] )
nprocs = len(
filter(
lambda s: s and s[ 0 ] == 'f' and s[1: ].isdigit(),
procs.split( '\n' ) )
)
return nprocs
def rand_from_log_shape_dist(alpha=10):
"""
randomly samples from a distribution between 0 and 1 with pdf shaped like the log function
low probability of getting close to zero, increasing probability going towards 1
alpha determines how sharp the curve is, higher alpha, sharper curve.
"""
beta = (alpha + 1) * np.log(alpha + 1) - alpha
t = random.random()
ret = ((beta * t-1)/(sp.special.lambertw((beta*t-1)/np.e)) - 1) / alpha
return max(min(np.real(ret), 1), 0)
|
test_connection.py
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import gc
import os
from signal import SIGHUP
import threading
import time
import signal
from amqpy.login import login_response_plain
import pytest
from .. import Channel, NotFound, FrameError, spec, Connection
from ..proto import Method
class TestConnection:
def test_create_channel(self, conn):
ch = conn.channel(1)
assert isinstance(ch, Channel)
assert ch.channel_id == 1
ch2 = conn.channel()
assert ch2.channel_id != 1
ch.close()
ch2.close()
def test_close(self, conn):
"""Make sure we've broken various references when closing channels and connections, to help
with GC
"""
# create a channel and make sure it's linked as we'd expect
ch = conn.channel()
assert 1 in conn.channels
assert ch.connection == conn
assert ch.is_open is True
# close the channel and make sure the references are broken that we expect
ch.close()
assert ch.connection is None
assert 1 not in conn.channels
assert ch.callbacks == {}
assert ch.is_open is False
# close the connection and make sure the references we expect are gone
conn.close()
assert conn.connection is None
assert conn.channels is not None
def test_open_close_open(self):
# create a new connection
conn = Connection()
# close the connection
conn.close()
# reopen the connection
conn.connect()
def test_is_alive(self, conn):
assert conn.is_alive()
def test_is_alive_after_close(self, conn):
conn.close()
assert conn.is_alive() is False
def test_is_alive_chan_exception(self, conn, ch, rand_queue):
"""Make sure connection is still alive after a channel exception
"""
with pytest.raises(NotFound):
ch.queue_declare(rand_queue, passive=True)
assert conn.is_alive()
def test_is_alive_conn_exception(self, conn, rand_queue):
"""Make sure is_alive() returns False after a connection exception
"""
ch = Channel(conn, 10)
with pytest.raises(NotFound):
ch.queue_declare(rand_queue, passive=True)
with pytest.raises(FrameError):
conn.method_writer.write_method(Method(spec.Queue.Declare, channel_id=10))
conn.wait()
assert conn.is_alive() is False
def test_gc_closed(self, conn):
"""Make sure we've broken various references when closing channels and connections, to help
with GC
"""
unreachable_before = len(gc.garbage)
# create a channel and make sure it's linked as we'd expect
conn.channel()
assert 1 in conn.channels
# close the connection and make sure the references we expect are gone.
conn.close()
gc.collect()
gc.collect()
gc.collect()
assert unreachable_before == len(gc.garbage)
def test_gc_forget(self, conn):
"""Make sure the connection gets gc'ed when there is no more references to it
"""
unreachable_before = len(gc.garbage)
ch = conn.channel()
assert 1 in conn.channels
del ch
gc.collect()
gc.collect()
gc.collect()
assert unreachable_before == len(gc.garbage)
@pytest.mark.skipif('sys.version_info >= (3, 5) or sys.version_info[0] == 2')
def test_interrupted(self, conn):
"""Make sure to get InterruptedError if a read was interrupted
"""
def sig_handler(sig, frame):
pass
signal.signal(SIGHUP, sig_handler)
def interrupt_it():
time.sleep(1)
os.kill(os.getpid(), signal.SIGHUP)
th = threading.Thread(target=interrupt_it)
th.start()
with pytest.raises(InterruptedError):
conn.drain_events(2)
class TestLogin:
def test_login_response_plain(self):
b = login_response_plain('blah', 'blah')
assert isinstance(b, bytes)
|
tcpserver.py
|
#!/usr/bin/env python3
import errno
import os
import signal
import socket
import struct
import sys
import threading
import time
from optparse import OptionParser
from fprime.constants import DATA_ENCODING
try:
import socketserver
except ImportError:
import SocketServer as socketserver
__version__ = 0.1
__date__ = "2015-04-03"
__updated__ = "2016-04-07"
# Universal server id global
SERVER = None
LOCK = None
shutdown_event = threading.Event()
FSW_clients = []
GUI_clients = []
FSW_ids = []
GUI_ids = []
def signal_handler(*_):
print("Ctrl-C received, server shutting down.")
shutdown_event.set()
def now():
return time.ctime(time.time())
class ThreadedTCPRequestHandler(socketserver.StreamRequestHandler):
"""
Derived from original Stable demo during R&TD and adapted
for use in new FSW gse.py applicaiton.
TCP socket server for commands, log events, and telemetry data.
Later this will handle other things such as sequence files and parameters.
Handle is instanced in own thread for each client.
Registration is done by sending the string "Register <name>".
Sending a message to destination <name> is done as
"A5A5 <name> <data>" Note only <data> is sent.
Any client that sends a "List" comment makes the server display all
registered clients.
"""
socketserver.StreamRequestHandler.allow_reuse_address = True
socketserver.StreamRequestHandler.timeout = 1
def handle(self): # on each client connect
"""
The function that is invoked upon a new client. This function listens
for data on the socket. Packets for now are assumed to be separated
by a newline. For each packet, call processPkt.
"""
self.partial = b""
self.cmdQueue = []
self.registered = False
self.name = b""
self.id = 0
# print self.client_address, now() # show this client's address
# Read the data from the socket
data = self.recv(13)
# Connection was closed by the client
if not data:
print("Client exited.")
return
else:
# Process the data into the cmdQueue
self.getCmds(data)
# Process the cmdQueue
self.processQueue()
if self.registered:
print("Registration complete waiting for message.")
self.getNewMsg()
else:
print("Unable to register client.")
return
LOCK.acquire()
del SERVER.dest_obj[self.name]
if self.name in FSW_clients:
FSW_clients.remove(self.name)
FSW_ids.remove(self.id)
elif self.name in GUI_clients:
GUI_clients.remove(self.name)
GUI_ids.remove(self.id)
LOCK.release()
print("Closed %s connection." % self.name.decode(DATA_ENCODING))
self.registered = False
self.request.close()
def getCmds(self, inputString, end_of_command=b"\n"):
"""
Build a command from partial or full socket input
"""
commands = inputString.split(end_of_command)
if len(self.partial):
commands[0] = self.partial + commands[0]
self.partial = b""
if len(commands[-1]):
self.partial = commands[-1]
self.cmdQueue.extend(commands[:-1])
else:
self.cmdQueue.extend(commands[:-1])
def processQueue(self):
for cmd in self.cmdQueue:
self.processRegistration(cmd)
self.cmdQueue = []
def processRegistration(self, cmd):
params = cmd.split()
process_id = 0
if params[0] == b"Register":
LOCK.acquire()
name = params[1]
if b"FSW" in name:
if FSW_clients:
process_id = sorted(FSW_ids)[-1] + 1
name = params[1] + b"_" + bytes(process_id)
FSW_clients.append(name)
FSW_ids.append(process_id)
elif b"GUI" in name:
if GUI_clients:
process_id = sorted(GUI_ids)[-1] + 1
name = params[1] + b"_" + bytes(process_id)
GUI_clients.append(name)
GUI_ids.append(process_id)
SERVER.dest_obj[name] = DestObj(name, self.request)
LOCK.release()
self.registered = True
self.name = name
self.id = process_id
print("Registered client " + self.name.decode(DATA_ENCODING))
#################################################
# New Routines to process the command messages
#################################################
def getNewMsg(self):
"""
After registration wait for an incoming message
The first part must always be an "A5A5 " or a "List "
"""
# Loop while the connected client has packets to send/receive
while not shutdown_event.is_set():
# Read the header data from the socket either A5A5 or List
header = self.readHeader()
# If the received header is an empty string, connection closed, exit loop
if not header:
break
elif header == b"Quit":
LOCK.acquire()
print("Quit received!")
SERVER.dest_obj[self.name].put(struct.pack(">I", 0xA5A5A5A5))
shutdown_event.set()
time.sleep(1)
print("Quit processed!")
SERVER.shutdown()
SERVER.server_close()
LOCK.release()
break
# Got the header data so read the data of the message here...
data = self.readData(header)
# Process and send the packet of the message here...
self.processNewPkt(header, data)
def recv(self, l):
"""
Read l bytes from socket.
"""
chunk = b""
msg = b""
n = 0
while l > n:
try:
chunk = self.request.recv(l - n)
if chunk == b"":
print("read data from socket is empty!")
return b""
msg = msg + chunk
n = len(msg)
except socket.timeout:
if shutdown_event.is_set():
print("socket timed out and shutdown is requested")
return b"Quit\n"
continue
except OSError as err:
if err.errno == errno.ECONNRESET:
print(
"Socket error "
+ str(err.errno)
+ " (Connection reset by peer) occurred on recv()."
)
else:
print("Socket error " + str(err.errno) + " occurred on recv().")
return msg
def readHeader(self):
"""
Read the 9 byte header (e.g. "A5A5 GUI " or "A5A5 FSW "),
or just read the "List\n" command.
"""
header = self.recv(5)
if len(header) == 0:
print(
"Header information is empty, client "
+ self.name.decode(DATA_ENCODING)
+ " exiting."
)
return header
if header == b"List\n":
return b"List"
elif header == b"Quit\n":
return b"Quit"
elif header[:-1] == b"A5A5":
header2 = self.recv(4)
return header + header2
else:
return
def readData(self, header):
"""
Read the data part of the message sent to either GUI or FSW.
GUI receives telemetry.
FSW receives commands of various lengths.
"""
data = b""
if header == b"List":
return b""
elif header == b"Quit":
return b""
dst = header.split(b" ")[1].strip(b" ")
if dst == b"FSW":
# Read variable length command data here...
desc = self.recv(4)
sizeb = self.recv(4)
size = struct.unpack(">I", sizeb)[0]
data = desc + sizeb + self.recv(size)
elif dst == b"GUI":
# Read telemetry data here...
tlm_packet_size = self.recv(4)
size = struct.unpack(">I", tlm_packet_size)[0]
data = tlm_packet_size + self.recv(size)
else:
raise RuntimeError("unrecognized client %s" % dst.decode(DATA_ENCODING))
return data
def processNewPkt(self, header, data):
"""
Process a single command here header and data here.
The command must always start with A5A5 except if it is a List.
Once the entire header tstring is processed send it on queue.
If something goes wrong report and shutdown server.
"""
dest_list = []
if header == b"List":
print("List of registered clients: ")
LOCK.acquire()
for d in list(SERVER.dest_obj.keys()):
print("\t" + SERVER.dest_obj[d].name.decode(DATA_ENCODING))
reg_client_str = b"List " + SERVER.dest_obj[d].name
l = len(reg_client_str)
reg_client_str = struct.pack("i%ds" % l, l, reg_client_str)
self.request.send(reg_client_str)
LOCK.release()
return 0
# Process data here...
head, dst = header.strip(b" ").split(b" ")
if head == b"A5A5": # Packet Header
# print "Received Packet: %s %s...\n" % (head,dst)
if data == b"":
print(" Data is empty, returning.")
if b"GUI" in dst:
dest_list = GUI_clients
elif b"FSW" in dst:
dest_list = FSW_clients
for dest_elem in dest_list:
# print "Locking TCP"
LOCK.acquire()
if dest_elem in list(SERVER.dest_obj.keys()):
# Send the message here....
# print "Sending TCP msg to ", dest_elem
SERVER.dest_obj[dest_elem].put(data)
LOCK.release()
else:
raise RuntimeError("Packet missing A5A5 header")
class ThreadedUDPRequestHandler(socketserver.BaseRequestHandler):
"""
Derived from original Stable demo during R&TD and adapted
for use in new FSW gse.py applicaiton.
TCP socket server for commands, log events, and telemetry data.
Later this will handle other things such as sequence files and parameters.
Handle is instanced in own thread for each client.
Registration is done by sending the string "Register <name>".
Sending a message to destination <name> is done as
"A5A5 <name> <data>" Note only <data> is sent.
Any client that sends a "List" comment makes the server display all
registered clients.
"""
socketserver.BaseRequestHandler.allow_reuse_address = True
def handle(self): # on each packet
"""
The function that is invoked when a packet is received. This function listens
for data on the socket. Packets for now are assumed to be separated
by a newline. For each packet, call processPkt.
"""
self.getNewMsg(self.request[0])
#################################################
# New Routines to process the command messages
#################################################
def getNewMsg(self, packet):
"""
After registration wait for an incoming message
The first part must always be an "A5A5 " or a "List "
"""
# Read the header data from the socket either A5A5 or List
(header, packet) = self.readHeader(packet)
# If the received header is an empty string, connection closed, exit loop
if not header:
return
# Got the header data so read the data of the message here...
data = self.readData(header, packet)
# Process and send the packet of the message here...
self.processNewPkt(header, data)
def readHeader(self, packet):
"""
Read the 9 byte header (e.g. "A5A5 GUI " or "A5A5 FSW "),
or just read the "List\n" command.
"""
header = packet[:4]
header2 = packet[4:9]
packet = packet[9:]
return (header + header2, packet)
def readData(self, header, packet):
"""
Read the data part of the message sent to either GUI or FSW.
GUI receives telemetry.
FSW receives commands of various lengths.
"""
data = ""
header.split(b" ")[1].strip(b" ")
# Read telemetry data here...
tlm_packet_size = packet[:4]
size = struct.unpack(">I", tlm_packet_size)[0]
data = tlm_packet_size + packet[4 : 4 + size]
return data
def processNewPkt(self, header, data):
"""
Process a single command here header and data here.
The command must always start with A5A5 except if it is a List.
Once the entire header string is processed send it on queue.
If something goes wrong report and shutdown server.
"""
dest_list = []
# Process data here...
head, dst = header.strip(b" ").split(b" ")
if head == b"A5A5": # Packet Header
# print "Received Packet: %s %s...\n" % (head,dst)
if data == "":
print(" Data is empty, returning.")
if b"GUI" in dst:
dest_list = GUI_clients
else:
print("dest? %s" % dst.decode(DATA_ENCODING))
for dest_elem in dest_list:
LOCK.acquire()
if dest_elem in list(SERVER.dest_obj.keys()):
# Send the message here....
# print "Sending UDP msg to ", dest_elem
SERVER.dest_obj[dest_elem].put(data)
LOCK.release()
else:
raise RuntimeError("Telemetry missing A5A5 header")
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
"""
TCP Socket server.
Keep a dictionary of destination objects containing queues and
socket id's for writting to destinations.
"""
dest_obj = dict()
lock_obj = threading.Lock()
class ThreadedUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
"""
UDP Socket server.
"""
class DestObj:
"""
Destination object for all clients registered.
"""
def __init__(self, name, request):
"""
Constructor
"""
self.name = name
self.socket = request
self.packet = b""
def put(self, msg):
"""
Write out the message to the destination socket
"""
try:
# print "about to send data to " + self.name
self.socket.send(msg)
except OSError as err:
print("Socket error " + str(err.errno) + " occurred on send().")
def fileno(self):
"""
"""
return self.socket
def main(argv=None):
global SERVER, LOCK
program_name = os.path.basename(sys.argv[0])
program_license = "Copyright 2015 user_name (California Institute of Technology) \
ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged."
program_version = "v0.1"
program_build_date = "%s" % __updated__
program_version_string = "%prog {} ({})".format(program_version, program_build_date)
program_longdesc = (
"""""" # optional - give further explanation about what the program does
)
if argv is None:
argv = sys.argv[1:]
try:
parser = OptionParser(
version=program_version_string,
epilog=program_longdesc,
description=program_license,
)
parser.add_option(
"-p",
"--port",
dest="port",
action="store",
type="int",
help="Set threaded tcp socket server port [default: %default]",
default=50007,
)
parser.add_option(
"-i",
"--host",
dest="host",
action="store",
type="string",
help="Set threaded tcp socket server ip [default: %default]",
default="127.0.0.1",
)
# process options
(opts, args) = parser.parse_args(argv)
HOST = opts.host
PORT = opts.port
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
udp_server = ThreadedUDPServer((HOST, PORT), ThreadedUDPRequestHandler)
# Hopefully this will allow address reuse and server to restart immediately
server.allow_reuse_address = True
SERVER = server
LOCK = server.lock_obj
ip, port = server.server_address
print("TCP Socket Server listening on host addr {}, port {}".format(HOST, PORT))
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
udp_server_thread = threading.Thread(target=udp_server.serve_forever)
signal.signal(signal.SIGINT, signal_handler)
server_thread.daemon = False
server_thread.start()
udp_server_thread.daemon = False
udp_server_thread.start()
while not shutdown_event.is_set():
server_thread.join(timeout=5.0)
udp_server_thread.join(timeout=5.0)
print("shutdown from main thread")
SERVER.shutdown()
SERVER.server_close()
udp_server.shutdown()
udp_server.server_close()
time.sleep(1)
except Exception as e:
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help\n")
return 2
if __name__ == "__main__":
sys.exit(main())
|
database_heartbeat.py
|
import datetime
import threading
from galaxy.model import WorkerProcess
from galaxy.model.orm.now import now
class DatabaseHeartbeat(object):
def __init__(self, application_stack, heartbeat_interval=60):
self.application_stack = application_stack
self.heartbeat_interval = heartbeat_interval
self.exit = threading.Event()
self.thread = None
self.active = False
@property
def sa_session(self):
return self.application_stack.app.model.context
@property
def server_name(self):
# Application stack manipulates server name after forking
return self.application_stack.app.config.server_name
def start(self):
if not self.active:
self.thread = threading.Thread(target=self.send_database_heartbeat, name="database_heartbeart_%s.thread" % self.server_name)
self.thread.daemon = True
self.active = True
self.thread.start()
def shutdown(self):
self.active = False
self.exit.set()
if self.thread:
self.thread.join()
def get_active_processes(self, last_seen_seconds=None):
"""Return all processes seen in ``last_seen_seconds`` seconds."""
if last_seen_seconds is None:
last_seen_seconds = self.heartbeat_interval
seconds_ago = now() - datetime.timedelta(seconds=last_seen_seconds)
return self.sa_session.query(WorkerProcess).filter(WorkerProcess.table.c.update_time > seconds_ago).all()
def send_database_heartbeat(self):
if self.active:
while not self.exit.isSet():
worker_process = self.sa_session.query(WorkerProcess).filter_by(
server_name=self.server_name).first()
if not worker_process:
worker_process = WorkerProcess(server_name=self.server_name)
worker_process.update_time = now()
self.sa_session.add(worker_process)
self.sa_session.flush()
self.exit.wait(self.heartbeat_interval)
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.v2019_11_01.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.v2019_11_01.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.v2019_11_01.models import ManagedClusterServicePrincipalProfile
from azure.mgmt.containerservice.v2019_11_01.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.v2019_11_01.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.v2019_11_01.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.v2019_11_01.models import ManagedCluster
from azure.mgmt.containerservice.v2019_11_01.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2019_11_01.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2019_11_01.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_11_01.models import ManagedClusterIdentity
from azure.mgmt.containerservice.v2019_11_01.models import AgentPool
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import NetworkProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterMonitorProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._helpers import _populate_api_server_access_profile, _set_vm_set_type
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None):
"""Install kubectl, a command-line interface for Kubernetes clusters."""
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group):
from subprocess import PIPE, Popen
instance = client.get(resource_group_name, name)
helm_not_installed = 'Helm not detected, please verify if it is installed.'
url_chart = chart_url
if image_tag is None:
image_tag = 'latest'
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# If SPN is specified, the secret should also be specified
if service_principal is not None and client_secret is None:
raise CLIError('--client-secret must be specified when --service-principal is specified')
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
# Validate the location upon the ACI avaiable regions
_validate_aci_location(norm_location)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
subscription_id = get_subscription_id(cmd.cli_ctx)
# Get the TenantID
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, tenant_id = profile.get_login_credentials()
# Check if we want the linux connector
if os_type.lower() in ['linux', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Linux', instance.enable_rbac, instance.fqdn)
# Check if we want the windows connector
if os_type.lower() in ['windows', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Windows', instance.enable_rbac, instance.fqdn)
def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, os_type, use_rbac, masterFqdn):
rbac_install = "true" if use_rbac else "false"
node_taint = 'azure.com/aci'
helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
k8s_master = 'https://{}'.format(masterFqdn)
logger.warning("Deploying the ACI connector for '%s' using Helm", os_type)
try:
values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format(
node_name, node_taint, os_type, image_tag, rbac_install)
if service_principal:
values += ",env.azureClientId=" + service_principal
if client_secret:
values += ",env.azureClientKey=" + client_secret
if subscription_id:
values += ",env.azureSubscriptionId=" + subscription_id
if tenant_id:
values += ",env.azureTenantId=" + tenant_id
if aci_resource_group:
values += ",env.aciResourceGroup=" + aci_resource_group
if norm_location:
values += ",env.aciRegion=" + norm_location
# Currently, we need to set the master FQDN.
# This is temporary and we should remove it when possible
values += ",env.masterUri=" + k8s_master
if helm_cmd == "install":
subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values])
elif helm_cmd == "upgrade":
subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values])
except subprocess.CalledProcessError as err:
raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err))
def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, graceful=False, os_type='Linux'):
from subprocess import PIPE, Popen
helm_not_installed = "Error : Helm not detected, please verify if it is installed."
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
if os_type.lower() in ['linux', 'both']:
helm_release_name = connector_name.lower() + '-linux-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
if os_type.lower() in ['windows', 'both']:
helm_release_name = connector_name.lower() + '-windows-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
def _undeploy_connector(graceful, node_name, helm_release_name):
if graceful:
logger.warning('Graceful option selected, will try to drain the node first')
from subprocess import PIPE, Popen
kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.'
try:
Popen(["kubectl"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(kubectl_not_installed)
try:
drain_node = subprocess.check_output(
['kubectl', 'drain', node_name, '--force', '--delete-local-data'],
universal_newlines=True)
if not drain_node:
raise CLIError('Could not find the node, make sure you' +
' are using the correct --os-type')
except subprocess.CalledProcessError as err:
raise CLIError('Could not find the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
logger.warning("Undeploying the '%s' using Helm", helm_release_name)
try:
subprocess.call(['helm', 'del', helm_release_name, '--purge'])
except subprocess.CalledProcessError as err:
raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err))
try:
subprocess.check_output(
['kubectl', 'delete', 'node', node_name],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not delete the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType, get_sdk
DeploymentProperties = get_sdk(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, 'DeploymentProperties', mod='models')
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n, NoTTYException
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
# pylint: disable=too-many-statements
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
addon_profile = addon_profiles.get("kubeDashboard", ManagedClusterAddonProfile(enabled=True))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on\n'
'by running "az aks enable-addons --addons kube-dashboard".')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning('To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
('omsagent' in result.addon_profiles) and
(hasattr(result.addon_profiles['omsagent'], 'identity')) and
(hasattr(result.addon_profiles['omsagent'].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles['omsagent'].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
enable_managed_identity=False,
attach_acr=None,
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
availability_zones=zones,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_profile.client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id
)
monitoring = False
if 'omsagent' in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if enable_private_cluster and load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = _populate_api_server_access_profile(
api_server_authorized_ip_ranges,
enable_private_cluster
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
identity = None
if enable_managed_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
api_server_access_profile=api_server_access_profile,
identity=identity
)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
if monitoring:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
else:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name, parameters=mc)
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles and instance.addon_profiles['omsagent'].enabled:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector',
'kube-dashboard': 'kubeDashboard'
}
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
if node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if (update_autoscaler != 1 and not update_lb_profile and
not attach_acr and
not detach_acr and
api_server_authorized_ip_ranges is None):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--attach-acr" or "--dettach-acr" or'
'"--"api-server-authorized-ip-ranges')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
node_count = instance.agent_pool_profiles[0].count
_validate_autoscaler_update_counts(min_count, max_count, node_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
# pylint: disable=unused-argument,inconsistent-return-statements
def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, control_plane_only=False,
no_wait=False, **kwargs):
instance = client.get(resource_group_name, name)
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
from knack.prompting import prompt_y_n
upgrade_all = False
instance.kubernetes_version = kubernetes_version
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
if 'kube-dashboard' in addon_args and 'kubeDashboard' not in addon_profiles:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
tags=None,
labels=None,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
raise CLIError('Windows nodepool is not supported')
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
node_taints=taints_array
)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if new_node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
kubernetes_version,
nodepool_name,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
tags=None,
no_wait=False):
update_flags = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_flags != 1:
if update_flags != 0 or tags is None:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
node_count = instance.count
_validate_autoscaler_update_counts(min_count, max_count, node_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, node_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count.')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError("Current node count '{}' is not in the range of min-count and max-count.".format(node_count))
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
|
ssh.py
|
from __future__ import absolute_import
from __future__ import division
import inspect
import logging
import os
import re
import shutil
import six
import string
import sys
import tarfile
import tempfile
import threading
import time
import types
from pwnlib import term
from pwnlib.context import context
from pwnlib.log import Logger
from pwnlib.log import getLogger
from pwnlib.term import text
from pwnlib.timeout import Timeout
from pwnlib.tubes.sock import sock
from pwnlib.util import hashes
from pwnlib.util import misc
from pwnlib.util import safeeval
from pwnlib.util.sh_string import sh_string
# Kill the warning line:
# No handlers could be found for logger "paramiko.transport"
paramiko_log = logging.getLogger("paramiko.transport")
h = logging.StreamHandler(open(os.devnull,'w+'))
h.setFormatter(logging.Formatter())
paramiko_log.addHandler(h)
class ssh_channel(sock):
#: Parent :class:`ssh` object
parent = None
#: Remote host
host = None
#: Return code, or :const:`None` if the process has not returned
#: Use :meth:`poll` to check.
returncode = None
#: :const:`True` if a tty was allocated for this channel
tty = False
#: Environment specified for the remote process, or :const:`None`
#: if the default environment was used
env = None
#: Command specified for the constructor
process = None
def __init__(self, parent, process = None, tty = False, wd = None, env = None, raw = True, *args, **kwargs):
super(ssh_channel, self).__init__(*args, **kwargs)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.returncode = None
self.host = parent.host
self.tty = tty
self.env = env
self.process = process
self.cwd = wd or '.'
if isinstance(wd, six.text_type):
wd = wd.encode('utf-8')
env = env or {}
msg = 'Opening new channel: %r' % (process or 'shell')
if isinstance(process, (list, tuple)):
process = b' '.join((lambda x:x.encode('utf-8') if isinstance(x, six.text_type) else x)(sh_string(s)) for s in process)
if isinstance(process, six.text_type):
process = process.encode('utf-8')
if process and wd:
process = b'cd ' + sh_string(wd) + b' >/dev/null 2>&1;' + process
if process and env:
for name, value in env.items():
if not re.match('^[a-zA-Z_][a-zA-Z0-9_]*$', name):
self.error('run(): Invalid environment key %r' % name)
export = 'export %s=%s;' % (name, sh_string(value))
if isinstance(export, six.text_type):
export = export.encode('utf-8')
process = export + process
if process and tty:
if raw:
process = b'stty raw -ctlecho -echo; ' + process
else:
process = b'stty -ctlecho -echo; ' + process
# If this object is enabled for DEBUG-level logging, don't hide
# anything about the command that's actually executed.
if process and self.isEnabledFor(logging.DEBUG):
msg = 'Opening new channel: %r' % ((process,) or 'shell')
with self.waitfor(msg) as h:
import paramiko
try:
self.sock = parent.transport.open_session()
except paramiko.ChannelException as e:
if e.args == (1, 'Administratively prohibited'):
self.error("Too many sessions open! Use ssh_channel.close() or 'with'!")
raise e
if self.tty:
self.sock.get_pty('xterm', term.width, term.height)
def resizer():
if self.sock:
try:
self.sock.resize_pty(term.width, term.height)
except paramiko.ssh_exception.SSHException:
pass
self.resizer = resizer
term.term.on_winch.append(self.resizer)
else:
self.resizer = None
# Put stderr on stdout. This might not always be desirable,
# but our API does not support multiple streams
self.sock.set_combine_stderr(True)
self.settimeout(self.timeout)
if process:
self.sock.exec_command(process)
else:
self.sock.invoke_shell()
h.success()
def kill(self):
"""kill()
Kills the process.
"""
self.close()
def recvall(self, timeout = sock.forever):
# We subclass tubes.sock which sets self.sock to None.
#
# However, we need to wait for the return value to propagate,
# which may not happen by the time .close() is called by tube.recvall()
tmp_sock = self.sock
tmp_close = self.close
self.close = lambda: None
timeout = self.maximum if self.timeout is self.forever else self.timeout
data = super(ssh_channel, self).recvall(timeout)
# Restore self.sock to be able to call wait()
self.close = tmp_close
self.sock = tmp_sock
self.wait()
self.close()
# Again set self.sock to None
self.sock = None
return data
def wait(self, timeout=sock.default):
# TODO: deal with timeouts
return self.poll(block=True)
def poll(self, block=False):
"""poll() -> int
Poll the exit code of the process. Will return None, if the
process has not yet finished and the exit code otherwise.
"""
if self.returncode is None and self.sock \
and (block or self.sock.exit_status_ready()):
while not self.sock.status_event.is_set():
self.sock.status_event.wait(0.05)
self.returncode = self.sock.recv_exit_status()
return self.returncode
def can_recv_raw(self, timeout):
with self.countdown(timeout):
while self.countdown_active():
if self.sock.recv_ready():
return True
time.sleep(min(self.timeout, 0.05))
return False
def interactive(self, prompt = term.text.bold_red('$') + ' '):
"""interactive(prompt = pwnlib.term.text.bold_red('$') + ' ')
If not in TTY-mode, this does exactly the same as
meth:`pwnlib.tubes.tube.tube.interactive`, otherwise
it does mostly the same.
An SSH connection in TTY-mode will typically supply its own prompt,
thus the prompt argument is ignored in this case.
We also have a few SSH-specific hacks that will ideally be removed
once the :mod:`pwnlib.term` is more mature.
"""
# If we are only executing a regular old shell, we need to handle
# control codes (specifically Ctrl+C).
#
# Otherwise, we can just punt to the default implementation of interactive()
if self.process is not None:
return super(ssh_channel, self).interactive(prompt)
self.info('Switching to interactive mode')
# We would like a cursor, please!
term.term.show_cursor()
event = threading.Event()
def recv_thread(event):
while not event.is_set():
try:
cur = self.recv(timeout = 0.05)
cur = cur.replace(b'\r\n',b'\n')
cur = cur.replace(b'\r',b'')
if cur is None:
continue
elif cur == b'\a':
# Ugly hack until term unstands bell characters
continue
stdout = sys.stdout
if not term.term_mode:
stdout = getattr(stdout, 'buffer', stdout)
stdout.write(cur)
stdout.flush()
except EOFError:
self.info('Got EOF while reading in interactive')
event.set()
break
t = context.Thread(target = recv_thread, args = (event,))
t.daemon = True
t.start()
while not event.is_set():
if term.term_mode:
try:
data = term.key.getraw(0.1)
except KeyboardInterrupt:
data = [3] # This is ctrl-c
except IOError:
if not event.is_set():
raise
else:
stdin = getattr(sys.stdin, 'buffer', sys.stdin)
data = stdin.read(1)
if not data:
event.set()
else:
data = [six.byte2int(data)]
if data:
try:
self.send(b''.join(six.int2byte(c) for c in data))
except EOFError:
event.set()
self.info('Got EOF while sending in interactive')
while t.is_alive():
t.join(timeout = 0.1)
# Restore
term.term.hide_cursor()
def close(self):
self.poll()
while self.resizer in term.term.on_winch:
term.term.on_winch.remove(self.resizer)
super(ssh_channel, self).close()
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def _close_msg(self):
self.info('Closed SSH channel with %s' % self.host)
class ssh_process(ssh_channel):
#: Working directory
cwd = None
#: PID of the process
#: Only valid when instantiated through :meth:`ssh.process`
pid = None
#: Executable of the procesks
#: Only valid when instantiated through :meth:`ssh.process`
executable = None
#: Arguments passed to the process
#: Only valid when instantiated through :meth:`ssh.process`
argv = None
def libs(self):
"""libs() -> dict
Returns a dictionary mapping the address of each loaded library in the
process's address space.
If ``/proc/$PID/maps`` cannot be opened, the output of ldd is used
verbatim, which may be different than the actual addresses if ASLR
is enabled.
"""
maps = self.parent.libs(self.executable)
maps_raw = self.parent.cat('/proc/%d/maps' % self.pid)
for lib in maps:
remote_path = lib.split(self.parent.host)[-1]
for line in maps_raw.splitlines():
if line.endswith(remote_path):
address = line.split('-')[0]
maps[lib] = int(address, 16)
break
return maps
@property
def libc(self):
"""libc() -> ELF
Returns an ELF for the libc for the current process.
If possible, it is adjusted to the correct address
automatically.
"""
from pwnlib.elf import ELF
for lib, address in self.libs().items():
if 'libc.so' in lib:
e = ELF(lib)
e.address = address
return e
@property
def elf(self):
"""elf() -> pwnlib.elf.elf.ELF
Returns an ELF file for the executable that launched the process.
"""
import pwnlib.elf.elf
libs = self.parent.libs(self.executable)
for lib in libs:
# Cannot just check "executable in lib", see issue #1047
if lib.endswith(self.executable):
return pwnlib.elf.elf.ELF(lib)
@property
def corefile(self):
import pwnlib.elf.corefile
finder = pwnlib.elf.corefile.CorefileFinder(self)
if not finder.core_path:
self.error("Could not find core file for pid %i" % self.pid)
return pwnlib.elf.corefile.Corefile(finder.core_path)
def getenv(self, variable, **kwargs):
"""Retrieve the address of an environment variable in the remote process.
"""
argv0 = self.argv[0]
script = ';'.join(('from ctypes import *',
'import os',
'libc = CDLL("libc.so.6")',
'print(os.path.realpath(%r))' % self.executable,
'print(libc.getenv(%r))' % variable,))
try:
with context.local(log_level='error'):
python = self.parent.which('python')
if not python:
self.error("Python is not installed on the remote system.")
io = self.parent.process([argv0,'-c', script.strip()],
executable=python,
env=self.env,
**kwargs)
path = io.recvline()
address = int(io.recvline())
address -= len(python)
address += len(path)
return int(address) & context.mask
except:
self.exception("Could not look up environment variable %r" % variable)
def _close_msg(self):
# If we never completely started up, just use the parent implementation
if self.executable is None:
return super(ssh_process, self)._close_msg()
self.info('Stopped remote process %r on %s (pid %i)' \
% (os.path.basename(self.executable),
self.host,
self.pid))
class ssh_connecter(sock):
def __init__(self, parent, host, port, *a, **kw):
super(ssh_connecter, self).__init__(*a, **kw)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.host = parent.host
self.rhost = host
self.rport = port
msg = 'Connecting to %s:%d via SSH to %s' % (self.rhost, self.rport, self.host)
with self.waitfor(msg) as h:
try:
self.sock = parent.transport.open_channel('direct-tcpip', (host, port), ('127.0.0.1', 0))
except Exception as e:
self.exception(e.message)
raise
try:
# Iterate all layers of proxying to get to base-level Socket object
curr = self.sock.get_transport().sock
while getattr(curr, "get_transport", None):
curr = curr.get_transport().sock
sockname = curr.getsockname()
self.lhost = sockname[0]
self.lport = sockname[1]
except Exception as e:
self.exception("Could not find base-level Socket object.")
raise e
h.success()
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def _close_msg(self):
self.info("Closed remote connection to %s:%d via SSH connection to %s" % (self.rhost, self.rport, self.host))
class ssh_listener(sock):
def __init__(self, parent, bind_address, port, *a, **kw):
super(ssh_listener, self).__init__(*a, **kw)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.host = parent.host
try:
self.port = parent.transport.request_port_forward(bind_address, port)
except Exception:
h.failure('Failed create a port forwarding')
raise
def accepter():
msg = 'Waiting on port %d via SSH to %s' % (self.port, self.host)
h = self.waitfor(msg)
try:
self.sock = parent.transport.accept()
parent.transport.cancel_port_forward(bind_address, self.port)
except Exception:
self.sock = None
h.failure()
self.exception('Failed to get a connection')
return
self.rhost, self.rport = self.sock.origin_addr
h.success('Got connection from %s:%d' % (self.rhost, self.rport))
self._accepter = context.Thread(target = accepter)
self._accepter.daemon = True
self._accepter.start()
def _close_msg(self):
self.info("Closed remote connection to %s:%d via SSH listener on port %d via %s" % (self.rhost, self.rport, self.port, self.host))
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def wait_for_connection(self):
"""Blocks until a connection has been established."""
_ = self.sock
return self
def __getattr__(self, key):
if key == 'sock':
while self._accepter.is_alive():
self._accepter.join(timeout = 0.1)
return self.sock
else:
return getattr(super(ssh_listener, self), key)
class ssh(Timeout, Logger):
#: Remote host name (``str``)
host = None
#: Remote port (``int``)
port = None
#: Working directory (``str``)
cwd = None
#: Enable caching of SSH downloads (``bool``)
cache = True
#: Paramiko SSHClient which backs this object
client = None
#: Paramiko SFTPClient object which is used for file transfers.
#: Set to :const:`None` to disable ``sftp``.
sftp = None
#: PID of the remote ``sshd`` process servicing this connection.
pid = None
def __init__(self, user=None, host=None, port=22, password=None, key=None,
keyfile=None, proxy_command=None, proxy_sock=None,
level=None, cache=True, ssh_agent=False, *a, **kw):
"""Creates a new ssh connection.
Arguments:
user(str): The username to log in with
host(str): The hostname to connect to
port(int): The port to connect to
password(str): Try to authenticate using this password
key(str): Try to authenticate using this private key. The string should be the actual private key.
keyfile(str): Try to authenticate using this private key. The string should be a filename.
proxy_command(str): Use this as a proxy command. It has approximately the same semantics as ProxyCommand from ssh(1).
proxy_sock(str): Use this socket instead of connecting to the host.
timeout: Timeout, in seconds
level: Log level
cache: Cache downloaded files (by hash/size/timestamp)
ssh_agent: If :const:`True`, enable usage of keys via ssh-agent
NOTE: The proxy_command and proxy_sock arguments is only available if a
fairly new version of paramiko is used.
Example proxying:
>>> s1 = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> r1 = s1.remote('localhost', 22)
>>> s2 = ssh(host='example.pwnme',
... user='travis',
... password='demopass',
... proxy_sock=r1.sock)
>>> r2 = s2.remote('localhost', 22) # and so on...
>>> for x in r2, s2, r1, s1: x.close()
"""
super(ssh, self).__init__(*a, **kw)
Logger.__init__(self)
if level is not None:
self.setLevel(level)
self.host = host
self.port = port
self.user = user
self.password = password
self.key = key
self.keyfile = keyfile
self._cachedir = os.path.join(tempfile.gettempdir(), 'pwntools-ssh-cache')
self.cwd = '.'
self.cache = cache
# Deferred attributes
self._platform_info = {}
self._aslr = None
self._aslr_ulimit = None
misc.mkdir_p(self._cachedir)
# This is a dirty hack to make my Yubikey shut up.
# If anybody has a problem with this, please open a bug and I'll
# figure out a better workaround.
if not ssh_agent:
os.environ.pop('SSH_AUTH_SOCK', None)
import paramiko
# Make a basic attempt to parse the ssh_config file
try:
config_file = os.path.expanduser('~/.ssh/config')
if os.path.exists(config_file):
ssh_config = paramiko.SSHConfig()
ssh_config.parse(open(config_file))
host_config = ssh_config.lookup(host)
if 'hostname' in host_config:
self.host = host = host_config['hostname']
if not keyfile and 'identityfile' in host_config:
keyfile = host_config['identityfile'][0]
if keyfile.lower() == 'none':
keyfile = None
except Exception as e:
self.debug("An error occurred while parsing ~/.ssh/config:\n%s" % e)
keyfiles = [os.path.expanduser(keyfile)] if keyfile else []
msg = 'Connecting to %s on port %d' % (host, port)
with self.waitfor(msg) as h:
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
known_hosts = os.path.expanduser('~/.ssh/known_hosts')
if os.path.exists(known_hosts):
self.client.load_host_keys(known_hosts)
has_proxy = (proxy_sock or proxy_command) and True
if has_proxy:
if 'ProxyCommand' not in dir(paramiko):
self.error('This version of paramiko does not support proxies.')
if proxy_sock and proxy_command:
self.error('Cannot have both a proxy command and a proxy sock')
if proxy_command:
proxy_sock = paramiko.ProxyCommand(proxy_command)
self.client.connect(host, port, user, password, key, keyfiles, self.timeout, compress = True, sock = proxy_sock)
else:
self.client.connect(host, port, user, password, key, keyfiles, self.timeout, compress = True)
self.transport = self.client.get_transport()
self.transport.use_compression(True)
h.success()
self._tried_sftp = False
with context.local(log_level='error'):
def getppid():
print(os.getppid())
try:
self.pid = int(self.process('false', preexec_fn=getppid).recvall())
except Exception:
self.pid = None
try:
self.info_once(self.checksec())
except Exception:
self.warn_once("Couldn't check security settings on %r" % self.host)
@property
def sftp(self):
if not self._tried_sftp:
try:
self._sftp = self.transport.open_sftp_client()
except Exception:
self._sftp = None
self._tried_sftp = True
return self._sftp
@sftp.setter
def sftp(self, value):
self._sftp = value
self._tried_sftp = True
def __enter__(self, *a):
return self
def __exit__(self, *a, **kw):
self.close()
def shell(self, shell = None, tty = True, timeout = Timeout.default):
"""shell(shell = None, tty = True, timeout = Timeout.default) -> ssh_channel
Open a new channel with a shell inside.
Arguments:
shell(str): Path to the shell program to run.
If :const:`None`, uses the default shell for the logged in user.
tty(bool): If :const:`True`, then a TTY is requested on the remote server.
Returns:
Return a :class:`pwnlib.tubes.ssh.ssh_channel` object.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> sh = s.shell('/bin/sh')
>>> sh.sendline(b'echo Hello; exit')
>>> print(b'Hello' in sh.recvall())
True
"""
return self.run(shell, tty, timeout = timeout)
def process(self, argv=None, executable=None, tty=True, cwd=None, env=None, timeout=Timeout.default, run=True,
stdin=0, stdout=1, stderr=2, preexec_fn=None, preexec_args=(), raw=True, aslr=None, setuid=None,
shell=False):
r"""
Executes a process on the remote server, in the same fashion
as pwnlib.tubes.process.process.
To achieve this, a Python script is created to call ``os.execve``
with the appropriate arguments.
As an added bonus, the ``ssh_channel`` object returned has a
``pid`` property for the process pid.
Arguments:
argv(list):
List of arguments to pass into the process
executable(str):
Path to the executable to run.
If :const:`None`, ``argv[0]`` is used.
tty(bool):
Request a `tty` from the server. This usually fixes buffering problems
by causing `libc` to write data immediately rather than buffering it.
However, this disables interpretation of control codes (e.g. Ctrl+C)
and breaks `.shutdown`.
cwd(str):
Working directory. If :const:`None`, uses the working directory specified
on :attr:`cwd` or set via :meth:`set_working_directory`.
env(dict):
Environment variables to set in the child. If :const:`None`, inherits the
default environment.
timeout(int):
Timeout to set on the `tube` created to interact with the process.
run(bool):
Set to :const:`True` to run the program (default).
If :const:`False`, returns the path to an executable Python script on the
remote server which, when executed, will do it.
stdin(int, str):
If an integer, replace stdin with the numbered file descriptor.
If a string, a open a file with the specified path and replace
stdin with its file descriptor. May also be one of ``sys.stdin``,
``sys.stdout``, ``sys.stderr``. If :const:`None`, the file descriptor is closed.
stdout(int, str):
See ``stdin``.
stderr(int, str):
See ``stdin``.
preexec_fn(callable):
Function which is executed on the remote side before execve().
This **MUST** be a self-contained function -- it must perform
all of its own imports, and cannot refer to variables outside
its scope.
preexec_args(object):
Argument passed to ``preexec_fn``.
This **MUST** only consist of native Python objects.
raw(bool):
If :const:`True`, disable TTY control code interpretation.
aslr(bool):
See :class:`pwnlib.tubes.process.process` for more information.
setuid(bool):
See :class:`pwnlib.tubes.process.process` for more information.
shell(bool):
Pass the command-line arguments to the shell.
Returns:
A new SSH channel, or a path to a script if ``run=False``.
Notes:
Requires Python on the remote server.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> sh = s.process('/bin/sh', env={'PS1':''})
>>> sh.sendline(b'echo Hello; exit')
>>> sh.recvall()
b'Hello\n'
>>> s.process(['/bin/echo', b'\xff']).recvall()
b'\xff\n'
>>> s.process(['readlink', '/proc/self/exe']).recvall()
b'/bin/readlink\n'
>>> s.process(['LOLOLOL', '/proc/self/exe'], executable='readlink').recvall()
b'/bin/readlink\n'
>>> s.process(['LOLOLOL\x00', '/proc/self/cmdline'], executable='cat').recvall()
b'LOLOLOL\x00/proc/self/cmdline\x00'
>>> sh = s.process(executable='/bin/sh')
>>> str(sh.pid).encode() in s.pidof('sh') # doctest: +SKIP
True
>>> s.process(['pwd'], cwd='/tmp').recvall()
b'/tmp\n'
>>> p = s.process(['python','-c','import os; os.write(1, os.read(2, 1024))'], stderr=0)
>>> p.send(b'hello')
>>> p.recv()
b'hello'
>>> s.process(['/bin/echo', 'hello']).recvall()
b'hello\n'
>>> s.process(['/bin/echo', 'hello'], stdout='/dev/null').recvall()
b''
>>> s.process(['/usr/bin/env'], env={}).recvall()
b''
>>> s.process('/usr/bin/env', env={'A':'B'}).recvall()
b'A=B\n'
>>> s.process('false', preexec_fn=1234)
Traceback (most recent call last):
...
PwnlibException: preexec_fn must be a function
>>> s.process('false', preexec_fn=lambda: 1234)
Traceback (most recent call last):
...
PwnlibException: preexec_fn cannot be a lambda
>>> def uses_globals():
... foo = bar
>>> print(s.process('false', preexec_fn=uses_globals).recvall().strip().decode()) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NameError: ... name 'bar' is not defined
>>> s.process('echo hello', shell=True).recvall()
b'hello\n'
"""
if not argv and not executable:
self.error("Must specify argv or executable")
argv = argv or []
aslr = aslr if aslr is not None else context.aslr
if isinstance(argv, (six.text_type, six.binary_type)):
argv = [argv]
if not isinstance(argv, (list, tuple)):
self.error('argv must be a list or tuple')
if not all(isinstance(arg, (six.text_type, six.binary_type)) for arg in argv):
self.error("argv must be strings or bytes: %r" % argv)
if shell:
if len(argv) != 1:
self.error('Cannot provide more than 1 argument if shell=True')
argv = ['/bin/sh', '-c'] + argv
# Create a duplicate so we can modify it
argv = list(argv or [])
# Python doesn't like when an arg in argv contains '\x00'
# -> execve() arg 2 must contain only strings
for i, oarg in enumerate(argv):
if isinstance(oarg, six.text_type):
arg = oarg.encode('utf-8')
else:
arg = oarg
if b'\x00' in arg[:-1]:
self.error('Inappropriate nulls in argv[%i]: %r' % (i, oarg))
argv[i] = bytearray(arg.rstrip(b'\x00'))
if env is not None and not isinstance(env, dict) and env != os.environ:
self.error("env must be a dict: %r" % env)
# Converts the environment variables to a list of tuples to retain order.
env2 = []
# Python also doesn't like when envp contains '\x00'
if env and hasattr(env, 'items'):
for k, v in env.items():
if isinstance(k, six.text_type):
k = k.encode('utf-8')
if isinstance(v, six.text_type):
v = v.encode('utf-8')
if b'\x00' in k[:-1]:
self.error('Inappropriate nulls in environment key %r' % k)
if b'\x00' in v[:-1]:
self.error('Inappropriate nulls in environment value %r=%r' % (k, v))
env2.append((bytearray(k.rstrip(b'\x00')), bytearray(v.rstrip(b'\x00'))))
env = env2 or env
executable = executable or argv[0]
cwd = cwd or self.cwd
# Validate, since failures on the remote side will suck.
if not isinstance(executable, (six.text_type, six.binary_type, bytearray)):
self.error("executable / argv[0] must be a string: %r" % executable)
executable = context._decode(executable)
# Allow passing in sys.stdin/stdout/stderr objects
handles = {sys.stdin: 0, sys.stdout:1, sys.stderr:2}
stdin = handles.get(stdin, stdin)
stdout = handles.get(stdout, stdout)
stderr = handles.get(stderr, stderr)
# Allow the user to provide a self-contained function to run
def func(): pass
func = preexec_fn or func
func_args = preexec_args
if not isinstance(func, types.FunctionType):
self.error("preexec_fn must be a function")
func_name = func.__name__
if func_name == (lambda: 0).__name__:
self.error("preexec_fn cannot be a lambda")
func_src = inspect.getsource(func).strip()
setuid = True if setuid is None else bool(setuid)
script = r"""
#!/usr/bin/env python
import os, sys, ctypes, resource, platform, stat
from collections import OrderedDict
try:
integer_types = int, long
except NameError:
integer_types = int,
exe = %(executable)r
argv = [bytes(a) for a in %(argv)r]
env = %(env)r
os.chdir(%(cwd)r)
if env is not None:
env = OrderedDict((bytes(k), bytes(v)) for k,v in env)
os.environ.clear()
getattr(os, 'environb', os.environ).update(env)
else:
env = os.environ
def is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
PATH = os.environ.get('PATH','').split(os.pathsep)
if os.path.sep not in exe and not is_exe(exe):
for path in PATH:
test_path = os.path.join(path, exe)
if is_exe(test_path):
exe = test_path
break
if not is_exe(exe):
sys.stderr.write('3\n')
sys.stderr.write("{} is not executable or does not exist in $PATH: {}".format(exe,PATH))
sys.exit(-1)
if not %(setuid)r:
PR_SET_NO_NEW_PRIVS = 38
result = ctypes.CDLL('libc.so.6').prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
if result != 0:
sys.stdout.write('3\n')
sys.stdout.write("Could not disable setuid: prctl(PR_SET_NO_NEW_PRIVS) failed")
sys.exit(-1)
try:
PR_SET_PTRACER = 0x59616d61
PR_SET_PTRACER_ANY = -1
ctypes.CDLL('libc.so.6').prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0)
except Exception:
pass
# Determine what UID the process will execute as
# This is used for locating apport core dumps
suid = os.getuid()
sgid = os.getgid()
st = os.stat(exe)
if %(setuid)r:
if (st.st_mode & stat.S_ISUID):
suid = st.st_uid
if (st.st_mode & stat.S_ISGID):
sgid = st.st_gid
if sys.argv[-1] == 'check':
sys.stdout.write("1\n")
sys.stdout.write(str(os.getpid()) + "\n")
sys.stdout.write(str(os.getuid()) + "\n")
sys.stdout.write(str(os.getgid()) + "\n")
sys.stdout.write(str(suid) + "\n")
sys.stdout.write(str(sgid) + "\n")
sys.stdout.write(os.path.realpath(exe) + '\x00')
sys.stdout.flush()
for fd, newfd in {0: %(stdin)r, 1: %(stdout)r, 2:%(stderr)r}.items():
if newfd is None:
os.close(fd)
elif isinstance(newfd, (str, bytes)):
newfd = os.open(newfd, os.O_RDONLY if fd == 0 else (os.O_RDWR|os.O_CREAT))
os.dup2(newfd, fd)
os.close(newfd)
elif isinstance(newfd, integer_types) and newfd != fd:
os.dup2(fd, newfd)
if not %(aslr)r:
if platform.system().lower() == 'linux' and %(setuid)r is not True:
ADDR_NO_RANDOMIZE = 0x0040000
ctypes.CDLL('libc.so.6').personality(ADDR_NO_RANDOMIZE)
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
# Attempt to dump ALL core file regions
try:
with open('/proc/self/coredump_filter', 'w') as core_filter:
core_filter.write('0x3f\n')
except Exception:
pass
# Assume that the user would prefer to have core dumps.
try:
resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))
except Exception:
pass
%(func_src)s
%(func_name)s(*%(func_args)r)
os.execve(exe, argv, env)
""" % locals()
script = script.strip()
self.debug("Created execve script:\n" + script)
if not run:
with context.local(log_level='error'):
tmpfile = self.mktemp('-t', 'pwnlib-execve-XXXXXXXXXX')
self.chmod('+x', tmpfile)
self.info("Uploading execve script to %r" % tmpfile)
self.upload_data(script, tmpfile)
return tmpfile
if self.isEnabledFor(logging.DEBUG):
execve_repr = "execve(%r, %s, %s)" % (executable,
argv,
'os.environ'
if (env in (None, os.environ))
else env)
# Avoid spamming the screen
if self.isEnabledFor(logging.DEBUG) and len(execve_repr) > 512:
execve_repr = execve_repr[:512] + '...'
else:
execve_repr = repr(executable)
msg = 'Starting remote process %s on %s' % (execve_repr, self.host)
with self.progress(msg) as h:
script = 'for py in python2.7 python2 python; do test -x "$(which $py 2>&1)" && exec $py -c %s check; done; echo 2' % sh_string(script)
with context.quiet:
python = ssh_process(self, script, tty=True, raw=True, level=self.level, timeout=self.timeout)
try:
result = safeeval.const(python.recvline())
except (EOFError, ValueError):
h.failure("Process creation failed")
self.warn_once('Could not find a Python interpreter on %s\n' % self.host \
+ "Use ssh.run() instead of ssh.process()")
return None
# If an error occurred, try to grab as much output
# as we can.
if result != 1:
error_message = python.recvrepeat(timeout=1)
if result == 0:
self.error("%r does not exist or is not executable" % executable)
elif result == 3:
self.error(error_message)
elif result == 2:
self.error("python is not installed on the remote system %r" % self.host)
elif result != 1:
h.failure("something bad happened:\n%s" % error_message)
python.pid = safeeval.const(python.recvline())
python.uid = safeeval.const(python.recvline())
python.gid = safeeval.const(python.recvline())
python.suid = safeeval.const(python.recvline())
python.sgid = safeeval.const(python.recvline())
python.argv = argv
python.executable = context._decode(python.recvuntil(b'\x00')[:-1])
h.success('pid %i' % python.pid)
if not aslr and setuid and (python.uid != python.suid or python.gid != python.sgid):
effect = "partial" if self.aslr_ulimit else "no"
message = "Specfied aslr=False on setuid binary %s\n" % python.executable
message += "This will have %s effect. Add setuid=False to disable ASLR for debugging.\n" % effect
if self.aslr_ulimit:
message += "Unlimited stack size should de-randomize shared libraries."
self.warn_once(message)
elif not aslr:
self.warn_once("ASLR is disabled for %r!" % python.executable)
return python
def which(self, program):
"""which(program) -> str
Minor modification to just directly invoking ``which`` on the remote
system which adds the current working directory to the end of ``$PATH``.
"""
# If name is a path, do not attempt to resolve it.
if os.path.sep in program:
return program
result = self.run('export PATH=$PATH:$PWD; which %s' % program).recvall().strip().decode()
if ('/%s' % program) not in result:
return None
return result
def system(self, process, tty = True, wd = None, env = None, timeout = None, raw = True):
r"""system(process, tty = True, wd = None, env = None, timeout = Timeout.default, raw = True) -> ssh_channel
Open a new channel with a specific process inside. If `tty` is True,
then a TTY is requested on the remote server.
If `raw` is True, terminal control codes are ignored and input is not
echoed back.
Return a :class:`pwnlib.tubes.ssh.ssh_channel` object.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> py = s.run('python -i')
>>> _ = py.recvuntil(b'>>> ')
>>> py.sendline(b'print(2+2)')
>>> py.sendline(b'exit')
>>> print(repr(py.recvline()))
b'4\n'
"""
if wd is None:
wd = self.cwd
if timeout is None:
timeout = self.timeout
return ssh_channel(self, process, tty, wd, env, timeout = timeout, level = self.level, raw = raw)
#: Backward compatibility. Use :meth:`system`
run = system
def getenv(self, variable, **kwargs):
"""Retrieve the address of an environment variable on the remote
system.
Note:
The exact address will differ based on what other environment
variables are set, as well as argv[0]. In order to ensure that
the path is *exactly* the same, it is recommended to invoke the
process with ``argv=[]``.
"""
script = '''
from ctypes import *; libc = CDLL('libc.so.6'); print(libc.getenv(%r))
''' % variable
with context.local(log_level='error'):
python = self.which('python')
if not python:
self.error("Python is not installed on the remote system.")
io = self.process(['','-c', script.strip()], executable=python, **kwargs)
result = io.recvall()
try:
return int(result) & context.mask
except ValueError:
self.exception("Could not look up environment variable %r" % variable)
def run_to_end(self, process, tty = False, wd = None, env = None):
r"""run_to_end(process, tty = False, timeout = Timeout.default, env = None) -> str
Run a command on the remote server and return a tuple with
(data, exit_status). If `tty` is True, then the command is run inside
a TTY on the remote server.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> print(s.run_to_end('echo Hello; exit 17'))
(b'Hello\n', 17)
"""
with context.local(log_level = 'ERROR'):
c = self.run(process, tty, wd = wd, timeout = Timeout.default)
data = c.recvall()
retcode = c.wait()
c.close()
return data, retcode
def connect_remote(self, host, port, timeout = Timeout.default):
r"""connect_remote(host, port, timeout = Timeout.default) -> ssh_connecter
Connects to a host through an SSH connection. This is equivalent to
using the ``-L`` flag on ``ssh``.
Returns a :class:`pwnlib.tubes.ssh.ssh_connecter` object.
Examples:
>>> from pwn import *
>>> l = listen()
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> a = s.connect_remote(s.host, l.lport)
>>> a=a; b = l.wait_for_connection() # a=a; prevents hangs
>>> a.sendline(b'Hello')
>>> print(repr(b.recvline()))
b'Hello\n'
"""
return ssh_connecter(self, host, port, timeout, level=self.level)
remote = connect_remote
def listen_remote(self, port = 0, bind_address = '', timeout = Timeout.default):
r"""listen_remote(port = 0, bind_address = '', timeout = Timeout.default) -> ssh_connecter
Listens remotely through an SSH connection. This is equivalent to
using the ``-R`` flag on ``ssh``.
Returns a :class:`pwnlib.tubes.ssh.ssh_listener` object.
Examples:
>>> from pwn import *
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> l = s.listen_remote()
>>> a = remote(s.host, l.port)
>>> a=a; b = l.wait_for_connection() # a=a; prevents hangs
>>> a.sendline(b'Hello')
>>> print(repr(b.recvline()))
b'Hello\n'
"""
return ssh_listener(self, bind_address, port, timeout, level=self.level)
listen = listen_remote
def __getitem__(self, attr):
"""Permits indexed access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> print(repr(s['echo hello']))
b'hello'
"""
return self.__getattr__(attr)()
def __call__(self, attr):
"""Permits function-style access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> print(repr(s('echo hello')))
b'hello'
"""
return self.__getattr__(attr)()
def __getattr__(self, attr):
"""Permits member access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> s.echo('hello')
b'hello'
>>> s.whoami()
b'travis'
>>> s.echo(['huh','yay','args'])
b'huh yay args'
"""
bad_attrs = [
'trait_names', # ipython tab-complete
]
if attr in self.__dict__ \
or attr in bad_attrs \
or attr.startswith('_'):
raise AttributeError
def runner(*args):
if len(args) == 1 and isinstance(args[0], (list, tuple)):
command = [attr] + args[0]
else:
command = ' '.join((attr,) + args)
return self.run(command).recvall().strip()
return runner
def connected(self):
"""Returns True if we are connected.
Example:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> s.connected()
True
>>> s.close()
>>> s.connected()
False
"""
return bool(self.client and self.client.get_transport().is_active())
def close(self):
"""Close the connection."""
if self.client:
self.client.close()
self.client = None
self.info("Closed connection to %r" % self.host)
def _libs_remote(self, remote):
"""Return a dictionary of the libraries used by a remote file."""
escaped_remote = sh_string(remote)
cmd = ''.join([
'(',
'ulimit -s unlimited;',
'ldd %s > /dev/null &&' % escaped_remote,
'(',
'LD_TRACE_LOADED_OBJECTS=1 %s||' % escaped_remote,
'ldd %s' % escaped_remote,
'))',
' 2>/dev/null'
])
data, status = self.run_to_end(cmd)
if status != 0:
self.error('Unable to find libraries for %r' % remote)
return {}
return misc.parse_ldd_output(context._decode(data))
def _get_fingerprint(self, remote):
cmd = '(sha256 || sha256sum || openssl sha256) 2>/dev/null < '
cmd = cmd + sh_string(remote)
data, status = self.run_to_end(cmd)
if status != 0:
return None
# OpenSSL outputs in the format of...
# (stdin)= e3b0c4429...
data = data.replace(b'(stdin)= ',b'')
# sha256 and sha256sum outputs in the format of...
# e3b0c442... -
data = data.replace(b'-',b'').strip()
if not isinstance(data, str):
data = data.decode('ascii')
return data
def _get_cachefile(self, fingerprint):
return os.path.join(self._cachedir, fingerprint)
def _verify_local_fingerprint(self, fingerprint):
if not set(fingerprint).issubset(string.hexdigits) or \
len(fingerprint) != 64:
self.error('Invalid fingerprint %r' % fingerprint)
return False
local = self._get_cachefile(fingerprint)
if not os.path.isfile(local):
return False
if hashes.sha256filehex(local) == fingerprint:
return True
else:
os.unlink(local)
return False
def _download_raw(self, remote, local, h):
def update(has, total):
h.status("%s/%s" % (misc.size(has), misc.size(total)))
if self.sftp:
try:
self.sftp.get(remote, local, update)
return
except IOError:
pass
cmd = 'wc -c < ' + sh_string(remote)
total, exitcode = self.run_to_end(cmd)
if exitcode != 0:
h.failure("%r does not exist or is not accessible" % remote)
return
total = int(total)
with context.local(log_level = 'ERROR'):
cmd = 'cat < ' + sh_string(remote)
c = self.run(cmd)
data = b''
while True:
try:
data += c.recv()
except EOFError:
break
update(len(data), total)
result = c.wait()
if result != 0:
h.failure('Could not download file %r (%r)' % (remote, result))
return
with open(local, 'wb') as fd:
fd.write(data)
def _download_to_cache(self, remote, p):
with context.local(log_level='error'):
remote = self.readlink('-f',remote)
if not hasattr(remote, 'encode'):
remote = remote.decode('utf-8')
fingerprint = self._get_fingerprint(remote)
if fingerprint is None:
local = os.path.normpath(remote)
local = os.path.basename(local)
local += time.strftime('-%Y-%m-%d-%H:%M:%S')
local = os.path.join(self._cachedir, local)
self._download_raw(remote, local, p)
return local
local = self._get_cachefile(fingerprint)
if self.cache and self._verify_local_fingerprint(fingerprint):
p.success('Found %r in ssh cache' % remote)
else:
self._download_raw(remote, local, p)
if not self._verify_local_fingerprint(fingerprint):
p.failure('Could not download file %r' % remote)
return local
def download_data(self, remote):
"""Downloads a file from the remote server and returns it as a string.
Arguments:
remote(str): The remote filename to download.
Examples:
>>> with open('/tmp/bar','w+') as f:
... _ = f.write('Hello, world')
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass',
... cache=False)
>>> s.download_data('/tmp/bar')
b'Hello, world'
>>> s._sftp = None
>>> s._tried_sftp = True
>>> s.download_data('/tmp/bar')
b'Hello, world'
"""
with self.progress('Downloading %r' % remote) as p:
with open(self._download_to_cache(remote, p), 'rb') as fd:
return fd.read()
def download_file(self, remote, local = None):
"""Downloads a file from the remote server.
The file is cached in /tmp/pwntools-ssh-cache using a hash of the file, so
calling the function twice has little overhead.
Arguments:
remote(str): The remote filename to download
local(str): The local filename to save it to. Default is to infer it from the remote filename.
"""
if not local:
local = os.path.basename(os.path.normpath(remote))
if os.path.basename(remote) == remote:
remote = os.path.join(self.cwd, remote)
with self.progress('Downloading %r to %r' % (remote, local)) as p:
local_tmp = self._download_to_cache(remote, p)
# Check to see if an identical copy of the file already exists
if not os.path.exists(local) or hashes.sha256filehex(local_tmp) != hashes.sha256filehex(local):
shutil.copy2(local_tmp, local)
def download_dir(self, remote=None, local=None):
"""Recursively downloads a directory from the remote server
Arguments:
local: Local directory
remote: Remote directory
"""
remote = remote or self.cwd
if self.sftp:
remote = str(self.sftp.normalize(remote))
else:
with context.local(log_level='error'):
remote = self.system('readlink -f ' + sh_string(remote))
basename = os.path.basename(remote)
local = local or '.'
local = os.path.expanduser(local)
self.info("Downloading %r to %r" % (basename,local))
with context.local(log_level='error'):
remote_tar = self.mktemp()
cmd = 'tar -C %s -czf %s %s' % \
(sh_string(remote),
sh_string(remote_tar),
sh_string(basename))
tar = self.system(cmd)
if 0 != tar.wait():
self.error("Could not create remote tar")
local_tar = tempfile.NamedTemporaryFile(suffix='.tar.gz')
self.download_file(remote_tar, local_tar.name)
tar = tarfile.open(local_tar.name)
tar.extractall(local)
def upload_data(self, data, remote):
"""Uploads some data into a file on the remote server.
Arguments:
data(str): The data to upload.
remote(str): The filename to upload it to.
Example:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> s.upload_data(b'Hello, world', '/tmp/upload_foo')
>>> print(open('/tmp/upload_foo').read())
Hello, world
>>> s._sftp = False
>>> s._tried_sftp = True
>>> s.upload_data(b'Hello, world', '/tmp/upload_bar')
>>> print(open('/tmp/upload_bar').read())
Hello, world
"""
data = context._encode(data)
# If a relative path was provided, prepend the cwd
if os.path.normpath(remote) == os.path.basename(remote):
remote = os.path.join(self.cwd, remote)
if self.sftp:
with tempfile.NamedTemporaryFile() as f:
f.write(data)
f.flush()
self.sftp.put(f.name, remote)
return
with context.local(log_level = 'ERROR'):
cmd = 'cat > ' + sh_string(remote)
s = self.run(cmd, tty=False)
s.send(data)
s.shutdown('send')
data = s.recvall()
result = s.wait()
if result != 0:
self.error("Could not upload file %r (%r)\n%s" % (remote, result, data))
def upload_file(self, filename, remote = None):
"""Uploads a file to the remote server. Returns the remote filename.
Arguments:
filename(str): The local filename to download
remote(str): The remote filename to save it to. Default is to infer it from the local filename."""
if remote is None:
remote = os.path.normpath(filename)
remote = os.path.basename(remote)
remote = os.path.join(self.cwd, remote)
with open(filename, 'rb') as fd:
data = fd.read()
self.info("Uploading %r to %r" % (filename,remote))
self.upload_data(data, remote)
return remote
def upload_dir(self, local, remote=None):
"""Recursively uploads a directory onto the remote server
Arguments:
local: Local directory
remote: Remote directory
"""
remote = remote or self.cwd
local = os.path.expanduser(local)
dirname = os.path.dirname(local)
basename = os.path.basename(local)
if not os.path.isdir(local):
self.error("%r is not a directory" % local)
msg = "Uploading %r to %r" % (basename,remote)
with self.waitfor(msg):
# Generate a tarfile with everything inside of it
local_tar = tempfile.mktemp()
with tarfile.open(local_tar, 'w:gz') as tar:
tar.add(local, basename)
# Upload and extract it
with context.local(log_level='error'):
remote_tar = self.mktemp('--suffix=.tar.gz')
self.upload_file(local_tar, remote_tar)
untar = self.run('cd %s && tar -xzf %s' % (remote, remote_tar))
message = untar.recvrepeat(2)
if untar.wait() != 0:
self.error("Could not untar %r on the remote end\n%s" % (remote_tar, message))
def upload(self, file_or_directory, remote=None):
"""upload(file_or_directory, remote=None)
Upload a file or directory to the remote host.
Arguments:
file_or_directory(str): Path to the file or directory to download.
remote(str): Local path to store the data.
By default, uses the working directory.
"""
if isinstance(file_or_directory, str):
file_or_directory = os.path.expanduser(file_or_directory)
file_or_directory = os.path.expandvars(file_or_directory)
if os.path.isfile(file_or_directory):
return self.upload_file(file_or_directory, remote)
if os.path.isdir(file_or_directory):
return self.upload_dir(file_or_directory, remote)
self.error('%r does not exist' % file_or_directory)
def download(self, file_or_directory, local=None):
"""download(file_or_directory, local=None)
Download a file or directory from the remote host.
Arguments:
file_or_directory(str): Path to the file or directory to download.
local(str): Local path to store the data.
By default, uses the current directory.
"""
if not self.sftp:
self.error("Cannot determine remote file type without SFTP")
with self.system('test -d ' + sh_string(file_or_directory)) as io:
is_dir = io.wait()
if 0 == is_dir:
self.download_dir(file_or_directory, local)
else:
self.download_file(file_or_directory, local)
put = upload
get = download
def unlink(self, file):
"""unlink(file)
Delete the file on the remote host
Arguments:
file(str): Path to the file
"""
if not self.sftp:
self.error("unlink() is only supported if SFTP is supported")
return self.sftp.unlink(file)
def libs(self, remote, directory = None):
"""Downloads the libraries referred to by a file.
This is done by running ldd on the remote server, parsing the output
and downloading the relevant files.
The directory argument specified where to download the files. This defaults
to './$HOSTNAME' where $HOSTNAME is the hostname of the remote server."""
libs = self._libs_remote(remote)
remote = context._decode(self.readlink('-f',remote).strip())
libs[remote] = 0
if directory is None:
directory = self.host
directory = os.path.realpath(directory)
res = {}
seen = set()
for lib, addr in libs.items():
local = os.path.realpath(os.path.join(directory, '.' + os.path.sep + lib))
if not local.startswith(directory):
self.warning('This seems fishy: %r' % lib)
continue
misc.mkdir_p(os.path.dirname(local))
if lib not in seen:
self.download_file(lib, local)
seen.add(lib)
res[local] = addr
return res
def interactive(self, shell=None):
"""Create an interactive session.
This is a simple wrapper for creating a new
:class:`pwnlib.tubes.ssh.ssh_channel` object and calling
:meth:`pwnlib.tubes.ssh.ssh_channel.interactive` on it."""
s = self.shell(shell)
if self.cwd != '.':
cmd = 'cd ' + sh_string(self.cwd)
s.sendline(cmd)
s.interactive()
s.close()
def set_working_directory(self, wd = None, symlink = False):
"""Sets the working directory in which future commands will
be run (via ssh.run) and to which files will be uploaded/downloaded
from if no path is provided
Note:
This uses ``mktemp -d`` under the covers, sets permissions
on the directory to ``0700``. This means that setuid binaries
will **not** be able to access files created in this directory.
In order to work around this, we also ``chmod +x`` the directory.
Arguments:
wd(string): Working directory. Default is to auto-generate a directory
based on the result of running 'mktemp -d' on the remote machine.
symlink(bool,str): Create symlinks in the new directory.
The default value, ``False``, implies that no symlinks should be
created.
A string value is treated as a path that should be symlinked.
It is passed directly to the shell on the remote end for expansion,
so wildcards work.
Any other value is treated as a boolean, where ``True`` indicates
that all files in the "old" working directory should be symlinked.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> cwd = s.set_working_directory()
>>> s.ls()
b''
>>> s.pwd() == cwd
True
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> homedir = s.pwd()
>>> _=s.touch('foo')
>>> _=s.set_working_directory()
>>> assert s.ls() == b''
>>> _=s.set_working_directory(homedir)
>>> assert b'foo' in s.ls().split()
>>> _=s.set_working_directory(symlink=True)
>>> assert b'foo' in s.ls().split()
>>> assert homedir != s.pwd()
>>> symlink=os.path.join(homedir,b'*')
>>> _=s.set_working_directory(symlink=symlink)
>>> assert b'foo' in s.ls().split()
>>> assert homedir != s.pwd()
"""
status = 0
if symlink and not isinstance(symlink, (six.binary_type, six.text_type)):
symlink = os.path.join(self.pwd(), b'*')
if not hasattr(symlink, 'encode') and hasattr(symlink, 'decode'):
symlink = symlink.decode('utf-8')
if not wd:
wd, status = self.run_to_end('x=$(mktemp -d) && cd $x && chmod +x . && echo $PWD', wd='.')
wd = wd.strip()
if status:
self.error("Could not generate a temporary directory (%i)\n%s" % (status, wd))
else:
cmd = b'ls ' + sh_string(wd)
_, status = self.run_to_end(cmd, wd = '.')
if status:
self.error("%r does not appear to exist" % wd)
self.cwd = wd
if not isinstance(wd, str):
self.cwd = wd.decode('utf-8')
self.info("Working directory: %r" % self.cwd)
if symlink:
self.ln('-s', symlink, '.')
return wd
def write(self, path, data):
"""Wrapper around upload_data to match :func:`pwnlib.util.misc.write`"""
return self.upload_data(data, path)
def read(self, path):
"""Wrapper around download_data to match :func:`pwnlib.util.misc.read`"""
return self.download_data(path)
def _init_remote_platform_info(self):
r"""Fills _platform_info, e.g.:
::
{'distro': 'Ubuntu\n',
'distro_ver': '14.04\n',
'machine': 'x86_64',
'node': 'pwnable.kr',
'processor': 'x86_64',
'release': '3.11.0-12-generic',
'system': 'linux',
'version': '#19-ubuntu smp wed oct 9 16:20:46 utc 2013'}
"""
if self._platform_info:
return
def preexec():
import platform
print('\n'.join(platform.uname()))
with context.quiet:
with self.process('true', preexec_fn=preexec) as io:
self._platform_info = {
'system': io.recvline().lower().strip().decode(),
'node': io.recvline().lower().strip().decode(),
'release': io.recvline().lower().strip().decode(),
'version': io.recvline().lower().strip().decode(),
'machine': io.recvline().lower().strip().decode(),
'processor': io.recvline().lower().strip().decode(),
'distro': 'Unknown',
'distro_ver': ''
}
try:
if not self.which('lsb_release'):
return
with self.process(['lsb_release', '-irs']) as io:
self._platform_info.update({
'distro': io.recvline().strip().decode(),
'distro_ver': io.recvline().strip().decode()
})
except Exception:
pass
@property
def os(self):
""":class:`str`: Operating System of the remote machine."""
try:
self._init_remote_platform_info()
with context.local(os=self._platform_info['system']):
return context.os
except Exception:
return "Unknown"
@property
def arch(self):
""":class:`str`: CPU Architecture of the remote machine."""
try:
self._init_remote_platform_info()
with context.local(arch=self._platform_info['machine']):
return context.arch
except Exception:
return "Unknown"
@property
def bits(self):
""":class:`str`: Pointer size of the remote machine."""
try:
with context.local():
context.clear()
context.arch = self.arch
return context.bits
except Exception:
return context.bits
@property
def version(self):
""":class:`tuple`: Kernel version of the remote machine."""
try:
self._init_remote_platform_info()
vers = self._platform_info['release']
# 3.11.0-12-generic
expr = r'([0-9]+\.?)+'
vers = re.search(expr, vers).group()
return tuple(map(int, vers.split('.')))
except Exception:
return (0,0,0)
@property
def distro(self):
""":class:`tuple`: Linux distribution name and release."""
try:
self._init_remote_platform_info()
return (self._platform_info['distro'], self._platform_info['distro_ver'])
except Exception:
return ("Unknown", "Unknown")
@property
def aslr(self):
""":class:`bool`: Whether ASLR is enabled on the system.
Example:
>>> s = ssh("travis", "example.pwnme")
>>> s.aslr
True
"""
if self._aslr is None:
if self.os != 'linux':
self.warn_once("Only Linux is supported for ASLR checks.")
self._aslr = False
else:
with context.quiet:
rvs = self.read('/proc/sys/kernel/randomize_va_space')
self._aslr = not rvs.startswith(b'0')
return self._aslr
@property
def aslr_ulimit(self):
""":class:`bool`: Whether the entropy of 32-bit processes can be reduced with ulimit."""
import pwnlib.elf.elf
import pwnlib.shellcraft
if self._aslr_ulimit is not None:
return self._aslr_ulimit
# This test must run a 32-bit binary, fix the architecture
arch = {
'amd64': 'i386',
'aarch64': 'arm'
}.get(self.arch, self.arch)
with context.local(arch=arch, bits=32, os=self.os, aslr=True):
with context.quiet:
try:
sc = pwnlib.shellcraft.cat('/proc/self/maps') \
+ pwnlib.shellcraft.exit(0)
elf = pwnlib.elf.elf.ELF.from_assembly(sc, shared=True)
except Exception:
self.warn_once("Can't determine ulimit ASLR status")
self._aslr_ulimit = False
return self._aslr_ulimit
def preexec():
import resource
try:
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
except Exception:
pass
# Move to a new temporary directory
cwd = self.cwd
tmp = self.set_working_directory()
try:
self.upload(elf.path, './aslr-test')
except IOError:
self.warn_once("Couldn't check ASLR ulimit trick")
self._aslr_ulimit = False
return False
self.process(['chmod', '+x', './aslr-test']).wait()
maps = self.process(['./aslr-test'], preexec_fn=preexec).recvall()
# Move back to the old directory
self.cwd = cwd
# Clean up the files
self.process(['rm', '-rf', tmp]).wait()
# Check for 555555000 (1/3 of the address space for PAE)
# and for 40000000 (1/3 of the address space with 3BG barrier)
self._aslr_ulimit = bool(b'55555000' in maps or b'40000000' in maps)
return self._aslr_ulimit
def _checksec_cache(self, value=None):
path = self._get_cachefile('%s-%s' % (self.host, self.port))
if value is not None:
with open(path, 'w+') as f:
f.write(value)
elif os.path.exists(path):
with open(path, 'r+') as f:
return f.read()
def checksec(self, banner=True):
"""checksec()
Prints a helpful message about the remote system.
Arguments:
banner(bool): Whether to print the path to the ELF binary.
"""
cached = self._checksec_cache()
if cached:
return cached
red = text.red
green = text.green
yellow = text.yellow
res = [
"%s@%s:" % (self.user, self.host),
"Distro".ljust(10) + ' '.join(self.distro),
"OS:".ljust(10) + self.os,
"Arch:".ljust(10) + self.arch,
"Version:".ljust(10) + '.'.join(map(str, self.version)),
"ASLR:".ljust(10) + {
True: green("Enabled"),
False: red("Disabled")
}[self.aslr]
]
if self.aslr_ulimit:
res += [ "Note:".ljust(10) + red("Susceptible to ASLR ulimit trick (CVE-2016-3672)")]
cached = '\n'.join(res)
self._checksec_cache(cached)
return cached
|
system_info.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
.. module:: system_info
:platform: Unix
:synopsis: the top-level submodule of T_System's remote_ui that contains the functions for moving of t_system's arm.
.. moduleauthor:: Cem Baybars GÜÇLÜ <cem.baybars@gmail.com>
"""
import multiprocessing
from t_system.foundation import *
from t_system.administration import is_admin
from t_system import emotion_manager
def i_am_ready():
"""Method to make feel like 'I am ready'.
"""
emotion_manager.make_feel("i_am_ready", "scenario")
emotion_manager.revert_the_expand_actor()
def get_system_info(admin_id):
"""Method to get system info.
Args:
admin_id (str): Admin privileges flag.
"""
multiprocessing.Process(target=i_am_ready).start()
root = is_admin(admin_id)
result = {}
result.update(get_ram_usage(root))
result.update(get_cpu_usage(root))
result.update(get_cpu_temperature(root))
result.update(get_disk_usage(root))
result.update(get_versions(root))
return result
|
launcher.py
|
#
# CV is a framework for continuous verification.
#
# Copyright (c) 2018-2019 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import multiprocessing
import subprocess
import tempfile
from aux.common import *
from components.component import Component
from components.coverage_processor import Coverage
from models.verification_result import *
DEFAULT_CIL_DIR = "cil"
DEFAULT_MAIN_DIR = "main"
DEFAULT_LAUNCHES_DIR = "launches"
DEFAULT_SOURCE_PATCHES_DIR = "patches/sources"
DEFAULT_PREPARATION_PATCHES_DIR = "patches/preparation"
DEFAULT_ENTRYPOINTS_DIR = "entrypoints"
DEFAULT_RULES_DIR = "rules"
DEFAULT_PLUGIN_DIR = "plugin"
DEFAULT_WORK_DIR = "work_dir"
DEFAULT_RESULTS_DIR = "results"
DEFAULT_BACKUP_PREFIX = "backup_"
TAG_LIMIT_MEMORY = "memory size"
TAG_LIMIT_CPU_TIME = "CPU time"
TAG_LIMIT_CPU_CORES = "number of cores"
TAG_CACHED = "cached"
TAG_BRANCH = "branch"
TAG_PATCH = "patches"
TAG_BUILD_PATCH = "build patch"
TAG_MAX_COVERAGE = "max"
TAG_CALLERS = "callers"
TAG_COMMITS = "commits"
TAG_BACKUP_WRITE = "backup write"
TAG_BACKUP_READ = "backup read"
TAG_BENCHMARK_ARGS = "benchmark args"
TAG_PARALLEL_LAUNCHES = "parallel launches"
TAG_RESOURCE_LIMITATIONS = "resource limits"
TAG_PROCESSES = "processes"
TAG_SCHEDULER = "scheduler"
TAG_CLOUD = "cloud"
TAG_CLOUD_MASTER = "master"
TAG_CLOUD_PRIORITY = "priority"
TAG_UPLOADER_UPLOAD_RESULTS = "upload results"
TAG_UPLOADER_IDENTIFIER = "identifier"
TAG_UPLOADER_SERVER = "server"
TAG_UPLOADER_USER = "user"
TAG_UPLOADER_PASSWORD = "password"
TAG_UPLOADER_PARENT_ID = "parent id"
TAG_UPLOADER_REQUEST_SLEEP = "request sleep"
TAG_SKIP = "skip"
TAG_STATISTICS_TIME = "statistics time"
TAG_BUILD_CONFIG = "build config"
TAG_ID = "id"
TAG_REPOSITORY = "repository"
TAG_NAME = "name"
TAG_VERIFIER_OPTIONS = "verifier options"
TAG_EXPORT_HTML_ERROR_TRACES = "standalone error traces"
TIMESTAMP_PATTERN = "<timestamp>"
RUNDEFINITION_PATTERN = "<rundefinition>"
COMMIT_PATTERN = "<commit>"
SCHEDULER_CLOUD = "cloud"
SCHEDULER_LOCAL = "local"
SCHEDULERS = [SCHEDULER_CLOUD, SCHEDULER_LOCAL]
CLOUD_PRIORITIES = ["IDLE", "LOW", "HIGH", "URGENT"]
DEFAULT_CLOUD_PRIORITY = "LOW"
CLOUD_BENCHMARK_LOG = "benchmark_log.txt"
HARDCODED_RACES_OUTPUT_DIR = "output"
ROUND_DIGITS = 9 # nanoseconds.
DEFAULT_TIME_FOR_STATISTICS = 0 # By default we do not allocate time for printing statistics.
VERIFIER_OPTIONS_NOT_OPTIMIZED = [
"cpa.functionpointer.ignoreUnknownFunctionPointerCalls=false",
]
VERIFIER_FILES_DIR = "verifier_files"
VERIFIER_OPTIONS_DIR = "options"
VERIFIER_PROPERTIES_DIR = "properties"
VERIFIER_OPTIONS_COMMON = "common"
SOURCE_QUEUE_BUILDER_RESOURCES = "builder resources"
SOURCE_QUEUE_QUALIFIER_RESOURCES = "qualifier resources"
SOURCE_QUEUE_FILES = "files"
SOURCE_QUEUE_FUNCTIONS = "functions"
SOURCE_QUEUE_RESULTS = "results"
TAG_ENTRYPOINTS_DESC = "entrypoints desc"
TAG_PREPARATION_CONFIG = "preparation config"
DEFAULT_PREPARATION_CONFIG = "conf.json"
DEFAULT_PROPERTY_MEMSAFETY = "properties/memsafety.spc"
DEFAULT_PROPERTY_UNREACHABILITY = "properties/unreachability.spc"
TAG_CONFIG_MEMORY_LIMIT = "Memory limit"
TAG_CONFIG_CPU_TIME_LIMIT = "CPU time limit"
TAG_CONFIG_CPU_CORES_LIMIT = "CPU cores limit"
TAG_CONFIG_OPTIONS = "Options"
class Launcher(Component):
"""
Main component, which creates verification tasks for the given system, launches them and processes results.
"""
def __init__(self, name: str, config_file: str):
self.config_file = os.path.basename(config_file).replace(JSON_EXTENSION, "")
if os.path.exists(config_file):
with open(config_file, errors='ignore') as data_file:
config = json.load(data_file)
else:
config = {
TAG_DIRS: {
TAG_DIRS_RESULTS: DEFAULT_RESULTS_DIR,
TAG_DIRS_WORK: DEFAULT_WORK_DIR
}
}
super(Launcher, self).__init__(name, config)
# Since Launcher does not produce a lot of output and any of its failure is fatal, we can put in on stdout.
self.debug = self.config.get(TAG_DEBUG, False)
if self.debug:
self.output_desc = sys.stdout
else:
self.output_desc = subprocess.DEVNULL
# Remember some useful directories.
self.root_dir = os.getcwd() # By default tool-set is run from this directory.
self.work_dir = os.path.abspath(self.config.get(TAG_DIRS, {}).get(TAG_DIRS_WORK, DEFAULT_WORK_DIR))
self.results_dir = os.path.abspath(self.config.get(TAG_DIRS, {}).get(TAG_DIRS_RESULTS, DEFAULT_RESULTS_DIR))
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir, exist_ok=True)
if self.config.get(TAG_EXPORT_HTML_ERROR_TRACES, False):
self.result_dir_et = os.path.abspath(os.path.join(self.config[TAG_DIRS][TAG_DIRS_RESULTS],
self._get_result_file_prefix()))
else:
self.result_dir_et = None
self.install_dir = os.path.join(self.root_dir, DEFAULT_INSTALL_DIR)
self.cpu_cores = multiprocessing.cpu_count()
self.backup = None # File, in which backup copy will be placed during verification.
# Defines type of scheduler.
self.scheduler = self.component_config.get(TAG_SCHEDULER)
self.benchmark_args = self.component_config.get(TAG_BENCHMARK_ARGS, "")
if self.scheduler == SCHEDULER_CLOUD:
cloud_master = self.config.get(TAG_CLOUD, {}).get(TAG_CLOUD_MASTER)
cloud_priority = self.config.get(TAG_CLOUD, {}).get(TAG_CLOUD_PRIORITY, DEFAULT_CLOUD_PRIORITY)
self.benchmark_args = "{} --cloud --cloudMaster {} --cloudPriority {}".\
format(self.benchmark_args, cloud_master, cloud_priority)
self.job_name_suffix = ""
self.export_safes = self.config.get(COMPONENT_EXPORTER, {}).get(TAG_ADD_VERIFIER_PROOFS, True)
def __check_result_files(self, file: str, launch_dir: str):
if file.endswith(".log"):
dst = LOG_FILE
else:
dst = os.path.basename(file)
if not self.export_safes and WITNESS_CORRECTNESS in file:
return
shutil.copy(file, os.path.join(launch_dir, dst))
def _copy_result_files(self, files: list, group_directory: str) -> str:
launch_dir = os.path.abspath(tempfile.mkdtemp(dir=group_directory))
for file in files:
if os.path.isfile(file):
self.__check_result_files(file, launch_dir)
for root, dirs, files_in in os.walk(file):
for name in files_in:
file = os.path.join(root, name)
self.__check_result_files(file, launch_dir)
return launch_dir
def _process_coverage(self, result, launch_directory, source_dirs: list, default_source_file=None):
cov = Coverage(self, default_source_file=default_source_file)
cov_queue = multiprocessing.Queue()
cov_process = multiprocessing.Process(target=cov.compute_coverage, name="coverage_{}".format(result.get_name()),
args=(source_dirs, launch_directory, cov_queue))
cov_process.start()
cov_process.join() # Wait since we are already in parallel threads for each launch.
if not cov_process.exitcode:
if cov_queue.qsize():
data = cov_queue.get()
result.cov_funcs = data.get(TAG_COVERAGE_FUNCS, 0.0)
result.cov_lines = data.get(TAG_COVERAGE_LINES, 0.0)
result.coverage_resources[TAG_CPU_TIME] = data.get(TAG_CPU_TIME, 0.0)
result.coverage_resources[TAG_WALL_TIME] = data.get(TAG_WALL_TIME, 0.0)
result.coverage_resources[TAG_MEMORY_USAGE] = data.get(TAG_MEMORY_USAGE, 0)
else:
self.logger.warning("Coverage was not computed for {} and entry-point {}".
format(result.id, result.entrypoint))
def _get_from_queue_into_list(self, queue, result_list):
while not queue.empty():
launch = queue.get()
result_list.append(launch)
if self.backup:
with open(self.backup, "a") as f_report:
f_report.write(str(launch) + "\n")
return result_list
def _get_result_file_prefix(self):
return self.config_file + "_" + datetime.datetime.fromtimestamp(time.time()).strftime('%Y_%m_%d_%H_%M_%S')
def _upload_results(self, uploader_config, result_file):
server = uploader_config.get(TAG_UPLOADER_SERVER)
identifier = uploader_config.get(TAG_UPLOADER_IDENTIFIER)
user = uploader_config.get(TAG_UPLOADER_USER)
password = uploader_config.get(TAG_UPLOADER_PASSWORD)
request_sleep = uploader_config.get(TAG_UPLOADER_REQUEST_SLEEP)
is_parent = uploader_config.get(TAG_UPLOADER_PARENT_ID, False)
predefined_name = uploader_config.get(TAG_NAME, None)
if not server:
self.logger.error("Server was not provided for uploading results, skipping it.")
return
if not identifier:
self.logger.error("Job identifier was not provided for uploading results, skipping it.")
return
if not user:
self.logger.error("User name was not provided for uploading results, skipping it.")
return
self.logger.info("Uploading results into server {} with identifier {}".format(server, identifier))
uploader = self.get_tool_path(DEFAULT_TOOL_PATH[UPLOADER])
uploader_python_path = os.path.abspath(os.path.join(os.path.dirname(uploader), os.path.pardir))
commits = self.config.get(TAG_COMMITS)
if commits:
commit = commits[0]
res = re.search(r'(\w+)\.\.(\w+)', commit)
if res:
commit = res.group(2)
commits = commit[:7]
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y_%m_%d_%H_%M_%S')
if predefined_name:
job_name = predefined_name.replace(TIMESTAMP_PATTERN, timestamp)
job_name = job_name.replace(RUNDEFINITION_PATTERN, self.job_name_suffix)
job_name = job_name.replace(COMMIT_PATTERN, str(commits))
elif commits:
job_name = "{}: {} ({})".format(self.config_file, commits, timestamp)
else:
job_name = "{} ({})".format(self.config_file, timestamp)
self.logger.debug("Using name '{}' for uploaded report".format(job_name))
command = "PYTHONPATH={} {} '{}' --host='{}' --username='{}' --password='{}' --archive='{}' --name='{}'". \
format(uploader_python_path, uploader, identifier, server, user, password, result_file, job_name)
if request_sleep:
command = "{} --request-sleep {}".format(command, request_sleep)
if is_parent:
command = "{} --copy".format(command)
try:
subprocess.check_call(command, shell=True)
self.logger.info("Results were successfully uploaded into the server: {}/jobs".format(server))
except:
self.logger.warning("Error on uploading of report archive '{}' via command '{}':\n".
format(result_file, command), exc_info=True)
def _get_none_rule_key(self, verification_result: VerificationResults):
return "{0}_{1}".format(verification_result.id, verification_result.entrypoint)
def _print_launches_report(self, file_name: str, report_resources: str, results: list, cov_lines: dict = None,
cov_funcs: dict = None):
self.logger.info("Preparing report on launches into file: '{}'".format(file_name))
with open(file_name, "w") as f_report, open(report_resources, "w") as f_resources:
# Write headers.
f_report.write("Subsystem;Rule;Entrypoint;Verdict;Termination;CPU;Wall;Memory;Relevancy;"
"Traces;Filtered traces;Work dir;Cov lines;Cov funcs;MEA time\n")
f_resources.write("Counter;" + ";".join(ADDITIONAL_RESOURCES) + "\n")
counter = 1
for result in results:
# Add coverage information.
if result.verdict == VERDICT_SAFE and not result.rule == RULE_COVERAGE:
key = self._get_none_rule_key(result)
if not result.cov_lines and cov_lines:
result.cov_lines = cov_lines.get(key, 0.0)
if not result.cov_funcs and cov_funcs:
result.cov_funcs = cov_funcs.get(key, 0.0)
f_report.write(str(result) + "\n")
f_resources.write("{};".format(counter) + result.print_resources() + "\n")
counter += 1
def _get_results_names(self) -> tuple:
reports_prefix = self._get_result_file_prefix()
report_launches = os.path.join(self.results_dir, "report_launches_{0}.csv".format(reports_prefix))
result_archive = os.path.join(self.results_dir, "results_{0}.zip".format(reports_prefix))
report_components = os.path.join(self.results_dir, "report_components_{0}.csv".format(reports_prefix))
short_report = os.path.join(self.results_dir, "short_report_{0}.csv".format(reports_prefix))
report_resources = os.path.join(self.results_dir, "report_resources_{0}.csv".format(reports_prefix))
return report_launches, result_archive, report_components, short_report, report_resources
|
miner.py
|
# Time running
import time
# Long string
import hashlib as hasher
# Data
import json
# Get, post, ...
import requests
# base 64
import base64
# Web framework
from flask import Flask
# Http request
from flask import request
# Running and |
from multiprocessing import Process, Pipe
# Crypto algorithm
# ecd sa
import ecdsa
# Miner address, miner dot url, peer point
from miner_config import MINER_ADDRESS, MINER_NODE_URL, PEER_NODES
# Web framework instance
node = Flask(__name__)
# Block class
class Block:
# Start
# Self, block index, when block created
# Block data
# Prev block hash
# Own hash
def __init__(self, index, timestamp, data, previous_hash):
# Return a new block object
# Chained by prev hash
"""Return a new Block object. Each block is "chained" to its previous
by calling its unique hash.
Args:
index (int): Block number.
timestamp (int): Block creation timestamp.
data (str): Data to be sent.
previous_hash(str): String representing previous block unique hash.
Attrib:
index (int): Block number.
timestamp (int): Block creation timestamp.
data (str): Data to be sent.
previous_hash(str): String representing previous block unique hash.
hash(str): Current block unique hash.
"""
# block id
self.index = index
# When block created
self.timestamp = timestamp
# Data in block
self.data = data
# Prev block
self.previous_hash = previous_hash
# It is block hash
self.hash = self.hash_block()
def hash_block(self):
"""Creates the unique hash for the block. It uses sha256."""
sha = hasher.sha256()
#
sha.update((str(self.index) + str(self.timestamp) + str(self.data) + str(self.previous_hash)).encode('utf-8'))
return sha.hexdigest()
def create_genesis_block():
"""To create each block, it needs the hash of the previous one. First
block has no previous, so it must be created manually (with index zero
and arbitrary previous hash)"""
# block index: 0
# created time: now
# data is json
# proof of work: 9
# trans: none
# hash zero
return Block(0, time.time(),
{"proof-of-work": 9,"transactions": None},
"0")
# Insert first block to list
# Node's blockchain copy
BLOCKCHAIN = []
BLOCKCHAIN.append(create_genesis_block())
# Node has transation
""" Store the transactions that this node has, in a list
If the node you sent the transaction adds a block
it will get accepted, but there is a chance it gets
discarded and your transaction goes back as if it was never
processed"""
NODE_PENDING_TRANSACTIONS = []
def proof_of_work(last_proof,blockchain):
# keep adding 1
# Create a variable that we will use to find our next proof of work
incrementor = last_proof + 1
# Start time
# Get start time
start_time = time.time()
# loop body
# the num can divided by 7919
# and can divided by last proof
# Keep incrementing the incrementor until it's equal to a number divisible by 9
# and the proof of work of the previous block in the chain
while not (incrementor % 7919 == 0 and incrementor % last_proof == 0):
# increment 1
incrementor += 1
# start time
start_time = time.time()
# We check every 60s
# Check if any node found the solution every 60 seconds
if (int((time.time()-start_time)%60)==0):
# Other ppl got it stop
# If any other node got the proof, stop searching
new_blockchain = consensus(blockchain)
if new_blockchain != False:
#(False:another node got proof first, new blockchain)
return (False,new_blockchain)
# block chain is list
# Once that number is found, we can return it as a proof of our work
return (incrementor,blockchain)
# what is a
# list
# transactions to a node
def mine(a,blockchain,node_pending_transactions):
BLOCKCHAIN = blockchain
NODE_PENDING_TRANSACTIONS = node_pending_transactions
while True:
"""Mining is the only way that new coins can be created.
In order to prevent to many coins to be created, the process
is slowed down by a proof of work algorithm.
"""
# Get last block
# Get the last proof of work
last_block = BLOCKCHAIN[len(BLOCKCHAIN) - 1]
# Get last proof of work
# proof of work is number
last_proof = last_block.data['proof-of-work']
# Is incrementor same as proof of work.....?
# Find the proof of work for the current block being mined
# Note: The program will hang here until a new proof of work is found
proof = proof_of_work(last_proof, BLOCKCHAIN)
# If we didn't guess the proof, start mining again
if proof[0] == False:
# Update blockchain and save it to file
BLOCKCHAIN = proof[1]
# a is append, append to file
a.send(BLOCKCHAIN)
continue
else:
# Once we find a valid proof of work, we know we can mine a block so
# we reward the miner by adding a transaction
#First we load all pending transactions sent to the node server
# miner url
# miner address
# url access
# Get content
NODE_PENDING_TRANSACTIONS = requests.get(MINER_NODE_URL + "/txion?update=" + MINER_ADDRESS).content
# load data from url
NODE_PENDING_TRANSACTIONS = json.loads(NODE_PENDING_TRANSACTIONS)
# Append json
# from, to, amount
#Then we add the mining reward
NODE_PENDING_TRANSACTIONS.append(
{ "from": "network",
"to": MINER_ADDRESS,
"amount": 1 }
)
#
# Now we can gather the data needed to create the new block
new_block_data = {
"proof-of-work": proof[0],
"transactions": list(NODE_PENDING_TRANSACTIONS)
}
new_block_index = last_block.index + 1
new_block_timestamp = time.time()
last_block_hash = last_block.hash
# Empty transaction list
NODE_PENDING_TRANSACTIONS = []
# Now create the new block
mined_block = Block(new_block_index, new_block_timestamp, new_block_data, last_block_hash)
BLOCKCHAIN.append(mined_block)
# Let the client know this node mined a block
print(json.dumps({
"index": new_block_index,
"timestamp": str(new_block_timestamp),
"data": new_block_data,
"hash": last_block_hash
}) + "\n")
a.send(BLOCKCHAIN)
requests.get(MINER_NODE_URL + "/blocks?update=" + MINER_ADDRESS)
def find_new_chains():
# Get the blockchains of every other node
other_chains = []
for node_url in PEER_NODES:
# Get their chains using a GET request
block = requests.get(node_url + "/blocks").content
# Convert the JSON object to a Python dictionary
block = json.loads(block)
# Verify other node block is correct
validated = validate_blockchain(block)
if validated == True:
# Add it to our list
other_chains.append(block)
return other_chains
def consensus(blockchain):
#
# Get the blocks from other nodes
other_chains = find_new_chains()
# If our chain isn't longest, then we store the longest chain
BLOCKCHAIN = blockchain
longest_chain = BLOCKCHAIN
for chain in other_chains:
if len(longest_chain) < len(chain):
longest_chain = chain
# If the longest chain wasn't ours, then we set our chain to the longest
if longest_chain == BLOCKCHAIN:
# Keep searching for proof
return False
else:
# Give up searching proof, update chain and start over again
BLOCKCHAIN = longest_chain
return BLOCKCHAIN
def validate_blockchain(block):
"""Validate the submited chain. If hashes are not correct, return false
block(str): json
"""
return True
@node.route('/blocks', methods=['GET'])
def get_blocks():
# Load current blockchain. Only you, should update your blockchain
if request.args.get("update") == MINER_ADDRESS:
global BLOCKCHAIN
BLOCKCHAIN = b.recv()
chain_to_send = BLOCKCHAIN
else:
# Any other node trying to connect to your node will use this
chain_to_send = BLOCKCHAIN
# Convert our blocks into dictionaries so we can send them as json objects later
chain_to_send_json = []
for block in chain_to_send:
block = {
"index": str(block.index),
"timestamp": str(block.timestamp),
"data": str(block.data),
"hash": block.hash
}
chain_to_send_json.append(block)
# Send our chain to whomever requested it
chain_to_send = json.dumps(chain_to_send_json)
return chain_to_send
@node.route('/txion', methods=['GET','POST'])
def transaction():
"""Each transaction sent to this node gets validated and submitted.
Then it waits to be added to the blockchain. Transactions only move
coins, they don't create it.
"""
if request.method == 'POST':
# On each new POST request, we extract the transaction data
new_txion = request.get_json()
# Then we add the transaction to our list
if validate_signature(new_txion['from'],new_txion['signature'],new_txion['message']):
NODE_PENDING_TRANSACTIONS.append(new_txion)
# Because the transaction was successfully
# submitted, we log it to our console
print("New transaction")
print("FROM: {0}".format(new_txion['from']))
print("TO: {0}".format(new_txion['to']))
print("AMOUNT: {0}\n".format(new_txion['amount']))
# Then we let the client know it worked out
return "Transaction submission successful\n"
else:
return "Transaction submission failed. Wrong signature\n"
#Send pending transactions to the mining process
elif request.method == 'GET' and request.args.get("update") == MINER_ADDRESS:
pending = json.dumps(NODE_PENDING_TRANSACTIONS)
# Empty transaction list
NODE_PENDING_TRANSACTIONS[:] = []
return pending
def validate_signature(public_key,signature,message):
"""Verify if the signature is correct. This is used to prove if
it's you (and not someon else) trying to do a transaction with your
address. Called when a user try to submit a new transaction.
"""
public_key = (base64.b64decode(public_key)).hex()
signature = base64.b64decode(signature)
vk = ecdsa.VerifyingKey.from_string(bytes.fromhex(public_key), curve=ecdsa.SECP256k1)
try:
return(vk.verify(signature, message.encode()))
except:
return False
# Welcome msg
def welcome_msg():
print(""" =========================================\n
SIMPLE COIN v1.0.0 - BLOCKCHAIN SYSTEM\n
=========================================\n\n
You can find more help at: https://github.com/cosme12/SimpleCoin\n
Make sure you are using the latest version or you may end in
a parallel chain.\n\n\n""")
if __name__ == '__main__':
# Welcome
welcome_msg()
# mine and server, 2 process
#Start mining
a,b=Pipe()
# mine(xxxxxx)
p1 = Process(target = mine, args=(a,BLOCKCHAIN,NODE_PENDING_TRANSACTIONS))
p1.start()
# server node run (process)
#Start server to recieve transactions
p2 = Process(target = node.run(), args=b)
p2.start()
|
_testing.py
|
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import os
from shutil import rmtree
import string
import tempfile
from typing import Any, Callable, List, Optional, Type, Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
import pandas._libs.testing as _testing
from pandas._typing import FilePathOrBuffer, FrameOrSeries
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_list_like,
is_number,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = _import_lzma()
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("always", _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("ignore", _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
f = gzip.open(path, "rb")
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = _get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
import zipfile
compress_method = zipfile.ZipFile
elif compression == "gzip":
import gzip
compress_method = gzip.GzipFile
elif compression == "bz2":
import bz2
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = _get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
args = (data,)
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = False,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left,
right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return "".join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False, **kwargs):
"""
Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords passed in for creating a temporary file.
:meth:`tempFile.TemporaryFile` is used when `return_filelike` is ``True``.
:meth:`tempfile.mkstemp` is used when `return_filelike` is ``False``.
Note that the `filename` parameter will be passed in as the `suffix`
argument to either function.
See Also
--------
tempfile.TemporaryFile
tempfile.mkstemp
"""
filename = filename or ""
fd = None
kwargs["suffix"] = filename
if return_filelike:
f = tempfile.TemporaryFile(**kwargs)
try:
yield f
finally:
f.close()
else:
# Don't generate tempfile if using a path with directory specified.
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(**kwargs)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(f"Couldn't close file descriptor: {fd} (file: {filename})")
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print(f"Exception on removing file: {e}")
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = False,
check_exact: bool = True,
check_categorical: bool = True,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string"):
assert r.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return type(x).__name__
except AttributeError:
return repr(type(x))
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr, left, right, obj="Attributes"):
"""
checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def isiterable(obj):
return hasattr(obj, "__iter__")
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes",
)
else:
assert_index_equal(
left.categories.sort_values(),
right.categories.sort_values(),
obj=f"{obj}.categories",
)
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact, obj=f"{obj}.left")
assert_index_equal(left.right, right.right, exact=exact, obj=f"{obj}.left")
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}.values")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray"):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg = f"""{obj} are different
{message}
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape,
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left, right, check_dtype=True, check_less_precise=False, check_exact=False
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if hasattr(left, "asi8") and type(right) == type(left):
# Avoid slow object-dtype comparisons
assert_numpy_array_equal(left.asi8, right.asi8)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj="ExtensionArray NA mask")
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj="ExtensionArray")
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj="ExtensionArray",
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals
.. versionadded:: 1.0.2
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.index",
)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left)
and is_categorical_dtype(right)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact:
assert_numpy_array_equal(
left._internal_get_values(),
right._internal_get_values(),
check_dtype=check_dtype,
obj=str(obj),
)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if needs_i8_conversion(left) or needs_i8_conversion(right):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = (
f"[datetimelike_compat=True] {left.values} "
f"is not equal to {right.values}."
)
raise AssertionError(msg)
else:
assert_numpy_array_equal(
left._internal_get_values(),
right._internal_get_values(),
check_dtype=check_dtype,
)
elif is_interval_dtype(left) or is_interval_dtype(right):
assert_interval_array_equal(left.array, right.array)
elif is_extension_array_dtype(left.dtype) and is_datetime64tz_dtype(left.dtype):
# .values is an ndarray, but ._values is the ExtensionArray.
# TODO: Use .array
assert is_extension_array_dtype(right.dtype)
assert_extension_array_equal(left._values, right._values)
elif (
is_extension_array_dtype(left)
and not is_categorical_dtype(left)
and is_extension_array_dtype(right)
and not is_categorical_dtype(right)
):
assert_extension_array_equal(left.array, right.array)
else:
_testing.assert_almost_equal(
left._internal_get_values(),
right._internal_get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj=str(obj),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(
left.values,
right.values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}",
)
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.columns",
)
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
if is_period_dtype(obj):
return period_array(obj)
elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(obj):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(
left,
right,
check_dtype=True,
check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values, check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not check_kind:
left_index = left.sp_index.to_block_index()
right_index = right.sp_index.to_block_index()
else:
left_index = left.sp_index
right_index = right.sp_index
if consolidate_block_indices and left.kind == "block":
# we'll probably remove this hack...
left_index = left_index.to_int_index().to_block_index()
right_index = right_index.to_int_index().to_block_index()
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
if check_fill_value:
assert_attr_equal("fill_value", left, right)
if check_dtype:
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense(), check_dtype=check_dtype)
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def all_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [
makeIntIndex,
makeFloatIndex,
makeStringIndex,
makeUnicodeIndex,
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeBoolIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(N)
data = Index(data, dtype=object)
index = makeStringIndex(N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return {c: Series(randn(N), index=index) for c in getCols(K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(
nrows,
ncols,
density=0.9,
random_state=None,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(
nrows,
ncols,
c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l,
r_ndupe_l=r_ndupe_l,
dtype=dtype,
c_idx_type=c_idx_type,
r_idx_type=r_idx_type,
)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""
Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
errno = getattr(err.reason, "errno", None)
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
clear=None,
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
clear : str, default None
If not ``None`` then remove any previously raised warnings from
the ``__warningsregistry__`` to ensure that no warning messages are
suppressed by this context manager. If ``None`` is specified,
the ``__warningsregistry__`` keeps track of which warnings have been
shown, and does not show them again.
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearing these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except AttributeError:
# module may not have __warningregistry__
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(
actual_warning.category, expected_warning
):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = (
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
expected = sep.join(rows_list) + sep
return expected
def external_error_raised(
expected_exception: Type[Exception],
) -> Callable[[Type[Exception], None], None]:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
|
monitor.py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import numpy as np
import Queue
import threading
import time
import datetime
from serial import Serial
debug=False
## Serial port simulation ##
from Rat import Rat
## Define some Serial read functions
queue = Queue.Queue(0)
def get_all_queue_result(queue):
result_list = []
while not queue.empty():
result_list.append(queue.get())
return result_list
def serial_read(s, q, sid):
while True:
line = s.readline().strip()
if debug==True:
print time.strftime("%Y-%m-%d %H:%M:%S"), sid, line
else:
out = ' '.join([time.strftime("%Y-%m-%d %H:%M:%S"), rats[line], sid])
q.put(out)
print out
ports = {'P1': '/dev/ttyUSB0',
'P2': '/dev/ttyUSB1'}
## For testing
ports = {'P1': 'COM11',
'P2': 'COM22'}
## Testing ends
## RFID tag number and some name
rats = {'001': 'Steve',
'002': 'Julia',
'003': 'Mario'}
## Open all ports and start reading
for dooraddr in ports.itervalues():
reader = Serial(dooraddr, baudrate=9600)
thread = threading.Thread(target=serial_read, args=(reader, queue, dooraddr),).start()
outfile = 'log_rats.dat'
o = open(outfile, 'a', 0)
while True:
result = get_all_queue_result(queue)
for aa in result:
o.write(aa+'\n')
o.close()
|
default.py
|
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import os
import re
import stat
import sys
import threading
from time import sleep
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common.future import ustr
import azurelinuxagent.common.conf as conf
from azurelinuxagent.common.event import add_event, WALAEventOperation
import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.utils.shellutil as shellutil
from azurelinuxagent.common.exception import ResourceDiskError
from azurelinuxagent.common.osutil import get_osutil
from azurelinuxagent.common.version import AGENT_NAME
DATALOSS_WARNING_FILE_NAME = "DATALOSS_WARNING_README.txt"
DATA_LOSS_WARNING = """\
WARNING: THIS IS A TEMPORARY DISK.
Any data stored on this drive is SUBJECT TO LOSS and THERE IS NO WAY TO RECOVER IT.
Please do not use this disk for storing any personal or application data.
For additional details to please refer to the MSDN documentation at :
http://msdn.microsoft.com/en-us/library/windowsazure/jj672979.aspx
"""
class ResourceDiskHandler(object):
def __init__(self):
self.osutil = get_osutil()
self.fs = conf.get_resourcedisk_filesystem()
def start_activate_resource_disk(self):
disk_thread = threading.Thread(target=self.run)
disk_thread.start()
def run(self):
mount_point = None
if conf.get_resourcedisk_format():
mount_point = self.activate_resource_disk()
if mount_point is not None and \
conf.get_resourcedisk_enable_swap():
self.enable_swap(mount_point)
def activate_resource_disk(self):
logger.info("Activate resource disk")
try:
mount_point = conf.get_resourcedisk_mountpoint()
mount_point = self.mount_resource_disk(mount_point)
warning_file = os.path.join(mount_point,
DATALOSS_WARNING_FILE_NAME)
try:
fileutil.write_file(warning_file, DATA_LOSS_WARNING)
except IOError as e:
logger.warn("Failed to write data loss warning:{0}", e)
return mount_point
except ResourceDiskError as e:
logger.error("Failed to mount resource disk {0}", e)
add_event(name=AGENT_NAME, is_success=False, message=ustr(e),
op=WALAEventOperation.ActivateResourceDisk)
return None
def enable_swap(self, mount_point):
logger.info("Enable swap")
try:
size_mb = conf.get_resourcedisk_swap_size_mb()
self.create_swap_space(mount_point, size_mb)
except ResourceDiskError as e:
logger.error("Failed to enable swap {0}", e)
def reread_partition_table(self, device):
if shellutil.run("sfdisk -R {0}".format(device), chk_err=False):
shellutil.run("blockdev --rereadpt {0}".format(device),
chk_err=False)
def mount_resource_disk(self, mount_point):
device = self.osutil.device_for_ide_port(1)
if device is None:
raise ResourceDiskError("unable to detect disk topology")
device = "/dev/{0}".format(device)
partition = device + "1"
mount_list = shellutil.run_get_output("mount")[1]
existing = self.osutil.get_mount_point(mount_list, device)
if existing:
logger.info("Resource disk [{0}] is already mounted [{1}]",
partition,
existing)
return existing
try:
fileutil.mkdir(mount_point, mode=0o755)
except OSError as ose:
msg = "Failed to create mount point " \
"directory [{0}]: {1}".format(mount_point, ose)
logger.error(msg)
raise ResourceDiskError(msg=msg, inner=ose)
logger.info("Examining partition table")
ret = shellutil.run_get_output("parted {0} print".format(device))
if ret[0]:
raise ResourceDiskError("Could not determine partition info for "
"{0}: {1}".format(device, ret[1]))
force_option = 'F'
if self.fs == 'xfs':
force_option = 'f'
mkfs_string = "mkfs.{0} -{2} {1}".format(
self.fs, partition, force_option)
if "gpt" in ret[1]:
logger.info("GPT detected, finding partitions")
parts = [x for x in ret[1].split("\n") if
re.match(r"^\s*[0-9]+", x)]
logger.info("Found {0} GPT partition(s).", len(parts))
if len(parts) > 1:
logger.info("Removing old GPT partitions")
for i in range(1, len(parts) + 1):
logger.info("Remove partition {0}", i)
shellutil.run("parted {0} rm {1}".format(device, i))
logger.info("Creating new GPT partition")
shellutil.run(
"parted {0} mkpart primary 0% 100%".format(device))
logger.info("Format partition [{0}]", mkfs_string)
shellutil.run(mkfs_string)
else:
logger.info("GPT not detected, determining filesystem")
ret = self.change_partition_type(
suppress_message=True,
option_str="{0} 1 -n".format(device))
ptype = ret[1].strip()
if ptype == "7" and self.fs != "ntfs":
logger.info("The partition is formatted with ntfs, updating "
"partition type to 83")
self.change_partition_type(
suppress_message=False,
option_str="{0} 1 83".format(device))
self.reread_partition_table(device)
logger.info("Format partition [{0}]", mkfs_string)
shellutil.run(mkfs_string)
else:
logger.info("The partition type is {0}", ptype)
mount_options = conf.get_resourcedisk_mountoptions()
mount_string = self.get_mount_string(mount_options,
partition,
mount_point)
attempts = 5
while not os.path.exists(partition) and attempts > 0:
logger.info("Waiting for partition [{0}], {1} attempts remaining",
partition,
attempts)
sleep(5)
attempts -= 1
if not os.path.exists(partition):
raise ResourceDiskError(
"Partition was not created [{0}]".format(partition))
logger.info("Mount resource disk [{0}]", mount_string)
ret, output = shellutil.run_get_output(mount_string, chk_err=False)
# if the exit code is 32, then the resource disk can be already mounted
if ret == 32 and output.find("is already mounted") != -1:
logger.warn("Could not mount resource disk: {0}", output)
elif ret != 0:
# Some kernels seem to issue an async partition re-read after a
# 'parted' command invocation. This causes mount to fail if the
# partition re-read is not complete by the time mount is
# attempted. Seen in CentOS 7.2. Force a sequential re-read of
# the partition and try mounting.
logger.warn("Failed to mount resource disk. "
"Retry mounting after re-reading partition info.")
self.reread_partition_table(device)
ret, output = shellutil.run_get_output(mount_string, chk_err=False)
if ret:
logger.warn("Failed to mount resource disk. "
"Attempting to format and retry mount. [{0}]",
output)
shellutil.run(mkfs_string)
ret, output = shellutil.run_get_output(mount_string)
if ret:
raise ResourceDiskError("Could not mount {0} "
"after syncing partition table: "
"[{1}] {2}".format(partition,
ret,
output))
logger.info("Resource disk {0} is mounted at {1} with {2}",
device,
mount_point,
self.fs)
return mount_point
def change_partition_type(self, suppress_message, option_str):
"""
use sfdisk to change partition type.
First try with --part-type; if fails, fall back to -c
"""
option_to_use = '--part-type'
command = "sfdisk {0} {1} {2}".format(
option_to_use, '-f' if suppress_message else '', option_str)
err_code, output = shellutil.run_get_output(
command, chk_err=False, log_cmd=True)
# fall back to -c
if err_code != 0:
logger.info(
"sfdisk with --part-type failed [{0}], retrying with -c",
err_code)
option_to_use = '-c'
command = "sfdisk {0} {1} {2}".format(
option_to_use, '-f' if suppress_message else '', option_str)
err_code, output = shellutil.run_get_output(command, log_cmd=True)
if err_code == 0:
logger.info('{0} succeeded',
command)
else:
logger.error('{0} failed [{1}: {2}]',
command,
err_code,
output)
return err_code, output
def get_mount_string(self, mount_options, partition, mount_point):
if mount_options is not None:
return 'mount -t {0} -o {1} {2} {3}'.format(
self.fs,
mount_options,
partition,
mount_point
)
else:
return 'mount -t {0} {1} {2}'.format(
self.fs,
partition,
mount_point
)
@staticmethod
def check_existing_swap_file(swapfile, swaplist, size):
if swapfile in swaplist and os.path.isfile(
swapfile) and os.path.getsize(swapfile) == size:
logger.info("Swap already enabled")
# restrict access to owner (remove all access from group, others)
swapfile_mode = os.stat(swapfile).st_mode
if swapfile_mode & (stat.S_IRWXG | stat.S_IRWXO):
swapfile_mode = swapfile_mode & ~(stat.S_IRWXG | stat.S_IRWXO)
logger.info(
"Changing mode of {0} to {1:o}".format(
swapfile, swapfile_mode))
os.chmod(swapfile, swapfile_mode)
return True
return False
def create_swap_space(self, mount_point, size_mb):
size_kb = size_mb * 1024
size = size_kb * 1024
swapfile = os.path.join(mount_point, 'swapfile')
swaplist = shellutil.run_get_output("swapon -s")[1]
if self.check_existing_swap_file(swapfile, swaplist, size):
return
if os.path.isfile(swapfile) and os.path.getsize(swapfile) != size:
logger.info("Remove old swap file")
shellutil.run("swapoff {0}".format(swapfile), chk_err=False)
os.remove(swapfile)
if not os.path.isfile(swapfile):
logger.info("Create swap file")
self.mkfile(swapfile, size_kb * 1024)
shellutil.run("mkswap {0}".format(swapfile))
if shellutil.run("swapon {0}".format(swapfile)):
raise ResourceDiskError("{0}".format(swapfile))
logger.info("Enabled {0}KB of swap at {1}".format(size_kb, swapfile))
def mkfile(self, filename, nbytes):
"""
Create a non-sparse file of that size. Deletes and replaces existing
file.
To allow efficient execution, fallocate will be tried first. This
includes
``os.posix_fallocate`` on Python 3.3+ (unix) and the ``fallocate``
command
in the popular ``util-linux{,-ng}`` package.
A dd fallback will be tried too. When size < 64M, perform
single-pass dd.
Otherwise do two-pass dd.
"""
if not isinstance(nbytes, int):
nbytes = int(nbytes)
if nbytes <= 0:
raise ResourceDiskError("Invalid swap size [{0}]".format(nbytes))
if os.path.isfile(filename):
os.remove(filename)
# If file system is xfs, use dd right away as we have been reported that
# swap enabling fails in xfs fs when disk space is allocated with
# fallocate
ret = 0
fn_sh = shellutil.quote((filename,))
if self.fs not in ['xfs', 'ext4']:
# os.posix_fallocate
if sys.version_info >= (3, 3):
# Probable errors:
# - OSError: Seen on Cygwin, libc notimpl?
# - AttributeError: What if someone runs this under...
fd = None
try:
fd = os.open(
filename,
os.O_CREAT | os.O_WRONLY | os.O_EXCL,
stat.S_IRUSR | stat.S_IWUSR)
os.posix_fallocate(fd, 0, nbytes) # pylint: disable=no-member
return 0
except BaseException:
# Not confident with this thing, just keep trying...
pass
finally:
if fd is not None:
os.close(fd)
# fallocate command
ret = shellutil.run(
u"umask 0077 && fallocate -l {0} {1}".format(nbytes, fn_sh))
if ret == 0:
return ret
logger.info("fallocate unsuccessful, falling back to dd")
# dd fallback
dd_maxbs = 64 * 1024 ** 2
dd_cmd = "umask 0077 && dd if=/dev/zero bs={0} count={1} " \
"conv=notrunc of={2}"
blocks = int(nbytes / dd_maxbs)
if blocks > 0:
ret = shellutil.run(dd_cmd.format(dd_maxbs, blocks, fn_sh)) << 8
remains = int(nbytes % dd_maxbs)
if remains > 0:
ret += shellutil.run(dd_cmd.format(remains, 1, fn_sh))
if ret == 0:
logger.info("dd successful")
else:
logger.error("dd unsuccessful")
return ret
|
reservation.py
|
# Copyright 2017 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
"""This module contains client/server methods to manage node reservations during TFCluster startup."""
from __future__ import absolute_import
from __future__ import division
from __future__ import nested_scopes
from __future__ import print_function
import logging
import os
import pickle
import select
import socket
import struct
import sys
import threading
import time
from . import util
logger = logging.getLogger(__name__)
TFOS_SERVER_PORT = "TFOS_SERVER_PORT"
TFOS_SERVER_HOST = "TFOS_SERVER_HOST"
BUFSIZE = 1024
MAX_RETRIES = 3
class Reservations:
"""Thread-safe store for node reservations.
Args:
:required: expected number of nodes in the cluster.
"""
def __init__(self, required):
self.required = required
self.lock = threading.RLock()
self.reservations = []
def add(self, meta):
"""Add a reservation.
Args:
:meta: a dictonary of metadata about a node
"""
with self.lock:
self.reservations.append(meta)
def done(self):
"""Returns True if the ``required`` number of reservations have been fulfilled."""
with self.lock:
return len(self.reservations) >= self.required
def get(self):
"""Get the list of current reservations."""
with self.lock:
return self.reservations
def remaining(self):
"""Get a count of remaining/unfulfilled reservations."""
with self.lock:
return self.required - len(self.reservations)
class MessageSocket(object):
"""Abstract class w/ length-prefixed socket send/receive functions."""
def receive(self, sock):
"""Receive a message on ``sock``."""
msg = None
data = b''
recv_done = False
recv_len = -1
while not recv_done:
buf = sock.recv(BUFSIZE)
if buf is None or len(buf) == 0:
raise Exception("socket closed")
if recv_len == -1:
recv_len = struct.unpack('>I', buf[:4])[0]
data += buf[4:]
recv_len -= len(data)
else:
data += buf
recv_len -= len(buf)
recv_done = (recv_len == 0)
msg = pickle.loads(data)
return msg
def send(self, sock, msg):
"""Send ``msg`` to destination ``sock``."""
data = pickle.dumps(msg)
buf = struct.pack('>I', len(data)) + data
sock.sendall(buf)
class Server(MessageSocket):
"""Simple socket server with length-prefixed pickle messages.
Args:
:count: expected number of nodes in the cluster.
"""
reservations = None #: List of reservations managed by this server.
done = False #: boolean indicating if server should be shutdown.
def __init__(self, count):
assert count > 0, "Expected number of reservations should be greater than zero"
self.reservations = Reservations(count)
def await_reservations(self, sc, status={}, timeout=600):
"""Block until all reservations are received."""
timespent = 0
while not self.reservations.done():
logger.info("waiting for {0} reservations".format(self.reservations.remaining()))
# check status flags for any errors
if 'error' in status:
sc.cancelAllJobs()
sc.stop()
sys.exit(1)
time.sleep(1)
timespent += 1
if (timespent > timeout):
raise Exception("timed out waiting for reservations to complete")
logger.info("all reservations completed")
return self.reservations.get()
def _handle_message(self, sock, msg):
logger.debug("received: {0}".format(msg))
msg_type = msg['type']
if msg_type == 'REG':
self.reservations.add(msg['data'])
MessageSocket.send(self, sock, 'OK')
elif msg_type == 'QUERY':
MessageSocket.send(self, sock, self.reservations.done())
elif msg_type == 'QINFO':
rinfo = self.reservations.get()
MessageSocket.send(self, sock, rinfo)
elif msg_type == 'STOP':
logger.info("setting server.done")
MessageSocket.send(self, sock, 'OK')
self.done = True
else:
MessageSocket.send(self, sock, 'ERR')
def start(self):
"""Start listener in a background thread
Returns:
address of the Server as a tuple of (host, port)
"""
server_sock = self.start_listening_socket()
# hostname may not be resolvable but IP address probably will be
host = self.get_server_ip()
port = server_sock.getsockname()[1]
addr = (host, port)
logger.info("listening for reservations at {0}".format(addr))
def _listen(self, sock):
CONNECTIONS = []
CONNECTIONS.append(sock)
while not self.done:
read_socks, write_socks, err_socks = select.select(CONNECTIONS, [], [], 60)
for sock in read_socks:
if sock == server_sock:
client_sock, client_addr = sock.accept()
CONNECTIONS.append(client_sock)
logger.debug("client connected from {0}".format(client_addr))
else:
try:
msg = self.receive(sock)
self._handle_message(sock, msg)
except Exception as e:
logger.debug(e)
sock.close()
CONNECTIONS.remove(sock)
server_sock.close()
t = threading.Thread(target=_listen, args=(self, server_sock))
t.daemon = True
t.start()
return addr
def get_server_ip(self):
"""Returns the value of TFOS_SERVER_HOST environment variable (if set), otherwise defaults to current host/IP."""
return os.getenv(TFOS_SERVER_HOST, util.get_ip_address())
def get_server_ports(self):
"""Returns a list of target ports as defined in the TFOS_SERVER_PORT environment (if set), otherwise defaults to 0 (any port).
TFOS_SERVER_PORT should be either a single port number or a range, e.g. '8888' or '9997-9999'
"""
port_string = os.getenv(TFOS_SERVER_PORT, "0")
if '-' not in port_string:
return [int(port_string)]
else:
ports = port_string.split('-')
if len(ports) != 2:
raise Exception("Invalid TFOS_SERVER_PORT: {}".format(port_string))
return list(range(int(ports[0]), int(ports[1]) + 1))
def start_listening_socket(self):
"""Starts the registration server socket listener."""
port_list = self.get_server_ports()
for port in port_list:
try:
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_sock.bind(('', port))
server_sock.listen(10)
logger.info("Reservation server binding to port {}".format(port))
break
except Exception as e:
logger.warn("Unable to bind to port {}, error {}".format(port, e))
server_sock = None
pass
if not server_sock:
raise Exception("Reservation server unable to bind to any ports, port_list = {}".format(port_list))
return server_sock
def stop(self):
"""Stop the Server's socket listener."""
self.done = True
class Client(MessageSocket):
"""Client to register and await node reservations.
Args:
:server_addr: a tuple of (host, port) pointing to the Server.
"""
sock = None #: socket to server TCP connection
server_addr = None #: address of server
def __init__(self, server_addr):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(server_addr)
self.server_addr = server_addr
logger.info("connected to server at {0}".format(server_addr))
def _request(self, msg_type, msg_data=None):
"""Helper function to wrap msg w/ msg_type."""
msg = {}
msg['type'] = msg_type
if msg_data:
msg['data'] = msg_data
done = False
tries = 0
while not done and tries < MAX_RETRIES:
try:
MessageSocket.send(self, self.sock, msg)
done = True
except socket.error as e:
tries += 1
if tries >= MAX_RETRIES:
raise
print("Socket error: {}".format(e))
self.sock.close()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(self.server_addr)
logger.debug("sent: {0}".format(msg))
resp = MessageSocket.receive(self, self.sock)
logger.debug("received: {0}".format(resp))
return resp
def close(self):
"""Close the client socket."""
self.sock.close()
def register(self, reservation):
"""Register ``reservation`` with server."""
resp = self._request('REG', reservation)
return resp
def get_reservations(self):
"""Get current list of reservations."""
cluster_info = self._request('QINFO')
return cluster_info
def await_reservations(self):
"""Poll until all reservations completed, then return cluster_info."""
done = False
while not done:
done = self._request('QUERY')
time.sleep(1)
return self.get_reservations()
def request_stop(self):
"""Request server stop."""
resp = self._request('STOP')
return resp
|
server.py
|
#!/usr/bin/env python
# Author:
# Muhammad Shahbaz (muhammad.shahbaz@gatech.edu)
from multiprocessing import Queue
from multiprocessing.connection import Listener
from threading import Thread
''' bgp server '''
class server(object):
def __init__(self, logger):
self.logger = logger
self.listener = Listener(('localhost', 6000), authkey='xrs', backlog=100)
self.sender_queue = Queue()
self.receiver_queue = Queue()
def start(self):
self.conn = self.listener.accept()
self.logger.debug('Connection accepted from '+str(self.listener.last_accepted))
self.sender = Thread(target=_sender, args=(self.conn,self.sender_queue))
self.sender.start()
self.receiver = Thread(target=_receiver, args=(self.conn,self.receiver_queue))
self.receiver.start()
''' sender '''
def _sender(conn,queue):
while True:
try:
line = queue.get()
conn.send(line)
except:
pass
''' receiver '''
def _receiver(conn,queue):
while True:
try:
line = conn.recv()
queue.put(line)
except:
pass
''' main '''
if __name__ == '__main__':
while True:
server = server()
while True:
try:
print server.receiver_queue.get()
server.sender_queue.put('announce route %s next-hop %s as-path [ %s ]' % ('200.0.0.0/16','172.0.0.1','100'))
except:
print 'thread ended'
break
|
server.py
|
import socket
from threading import Thread
from time import sleep
server=socket.socket()
server.bind((socket.gethostbyname(socket.gethostname()),120))
print(f"your ip is: {socket.gethostbyname(socket.gethostname())}")
clients={
"cls":[],
"closed":[]
}
l=True
def brodcast(msg):
print("brodcasting")
print(msg)
global clients
for i in clients["cls"]:
i.send(msg.encode())
def keep(client):
global closed
global l
_closed=True
while l and _closed:
client.send("hb".encode())
sleep(5)
if clients["closed"][clients["cls"].index(client)] != True:
_closed=False
client.close()
def receive(client):
global l
bruh=True
global closed
print("receiver online")
client.send(f"i:{ clients['cls'].index(client)}".encode())
while l and bruh:
try:
msg = client.recv(64).decode()
if msg:
msg=msg.split(":")
if msg != ["hb"]:print(msg)
if msg[0] == "p":
brodcast(f"s:{msg[1]}:{msg[2]}")
if msg[0] == "quit":
index=clients["cls"].index(client)
clients["closed"].pop(index)
clients["closed"].insert(index,False)
except:
pass
def main():
global l
server.listen()
while True:
try:
client,ip=server.accept()
print(ip)
clients["cls"].append(client)
clients["closed"].append(True)
keeper=Thread(target=keep,args= (client,))
receiver=Thread(target=receive ,args=(client,))
keeper.start()
receiver.start()
except:
pass
print ("starting")
mainer=Thread(target=main)
mainer.start()
while l:
if input("stop?(y/n)") == "y":
l=False
print("please press Ctrl+C to correctly stop")
|
credentials.py
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import time
import datetime
import logging
import os
import getpass
import threading
import json
import subprocess
from collections import namedtuple
from copy import deepcopy
from hashlib import sha1
import json
from dateutil.parser import parse
from dateutil.tz import tzlocal
import ibm_botocore.configloader
import ibm_botocore.compat
from ibm_botocore.compat import total_seconds
from ibm_botocore.compat import compat_shell_split
from ibm_botocore.exceptions import UnknownCredentialError
from ibm_botocore.exceptions import PartialCredentialsError
from ibm_botocore.exceptions import ConfigNotFound
from ibm_botocore.exceptions import InvalidConfigError
from ibm_botocore.exceptions import InfiniteLoopConfigError
from ibm_botocore.exceptions import RefreshWithMFAUnsupportedError
from ibm_botocore.exceptions import MetadataRetrievalError
from ibm_botocore.exceptions import CredentialRetrievalError
from ibm_botocore.utils import InstanceMetadataFetcher, parse_key_val_file
from ibm_botocore.utils import ContainerMetadataFetcher
logger = logging.getLogger(__name__)
ReadOnlyCredentials = namedtuple('ReadOnlyCredentials',
['access_key', 'secret_key', 'token'])
def create_credential_resolver(session, cache=None):
"""Create a default credential resolver.
This creates a pre-configured credential resolver
that includes the default lookup chain for
credentials.
"""
profile_name = session.get_config_variable('profile') or 'default'
credential_file = session.get_config_variable('credentials_file')
cos_credentials_file = session.get_config_variable('cos_credentials_file')
config_file = session.get_config_variable('config_file')
metadata_timeout = session.get_config_variable('metadata_service_timeout')
num_attempts = session.get_config_variable('metadata_service_num_attempts')
if cache is None:
cache = {}
env_provider = EnvProvider()
cos_provider = IbmCosCredentialsProvider(ibm_credentials_filename=cos_credentials_file)
container_provider = ContainerProvider()
instance_metadata_provider = InstanceMetadataProvider(
iam_role_fetcher=InstanceMetadataFetcher(
timeout=metadata_timeout,
num_attempts=num_attempts,
user_agent=session.user_agent())
)
assume_role_provider = AssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=session.create_client,
cache=cache,
profile_name=profile_name,
credential_sourcer=CanonicalNameCredentialSourcer([
env_provider, container_provider, instance_metadata_provider
])
)
providers = [
env_provider,
cos_provider,
assume_role_provider,
SharedCredentialProvider(
creds_filename=credential_file,
profile_name=profile_name
),
ProcessProvider(profile_name=profile_name,
load_config=lambda: session.full_config),
# The new config file has precedence over the legacy
# config file.
ConfigProvider(config_filename=config_file, profile_name=profile_name),
OriginalEC2Provider(),
BotoProvider(),
container_provider,
instance_metadata_provider
]
if session.instance_variables().get('profile') is not None:
# An explicitly provided profile will negate an EnvProvider.
# We will defer to providers that understand the "profile"
# concept to retrieve credentials.
# The one edge case if is all three values are provided via
# env vars:
# export AWS_ACCESS_KEY_ID=foo
# export AWS_SECRET_ACCESS_KEY=bar
# export AWS_PROFILE=baz
# Then, just like our client() calls, the explicit credentials
# will take precedence.
#
# This precedence is enforced by leaving the EnvProvider in the chain.
# This means that the only way a "profile" would win is if the
# EnvProvider does not return credentials, which is what we want
# in this scenario.
providers.remove(env_provider)
providers.remove(cos_provider)
logger.debug('Skipping environment variable credential check'
' because profile name was explicitly set.')
resolver = CredentialResolver(providers=providers)
return resolver
def get_credentials(session):
resolver = create_credential_resolver(session)
return resolver.load_credentials()
def _local_now():
return datetime.datetime.now(tzlocal())
def _parse_if_needed(value):
if isinstance(value, datetime.datetime):
return value
return parse(value)
def _serialize_if_needed(value, iso=False):
if isinstance(value, datetime.datetime):
if iso:
return value.isoformat()
return value.strftime('%Y-%m-%dT%H:%M:%S%Z')
return value
def create_assume_role_refresher(client, params):
def refresh():
response = client.assume_role(**params)
credentials = response['Credentials']
# We need to normalize the credential names to
# the values expected by the refresh creds.
return {
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['SessionToken'],
'expiry_time': _serialize_if_needed(credentials['Expiration']),
}
return refresh
def create_mfa_serial_refresher(actual_refresh):
class _Refresher(object):
def __init__(self, refresh):
self._refresh = refresh
self._has_been_called = False
def __call__(self):
if self._has_been_called:
# We can explore an option in the future to support
# reprompting for MFA, but for now we just error out
# when the temp creds expire.
raise RefreshWithMFAUnsupportedError()
self._has_been_called = True
return self._refresh()
return _Refresher(actual_refresh)
class JSONFileCache(object):
"""JSON file cache.
This provides a dict like interface that stores JSON serializable
objects.
The objects are serialized to JSON and stored in a file. These
values can be retrieved at a later time.
"""
CACHE_DIR = os.path.expanduser(os.path.join('~', '.aws', 'boto', 'cache'))
def __init__(self, working_dir=CACHE_DIR):
self._working_dir = working_dir
def __contains__(self, cache_key):
actual_key = self._convert_cache_key(cache_key)
return os.path.isfile(actual_key)
def __getitem__(self, cache_key):
"""Retrieve value from a cache key."""
actual_key = self._convert_cache_key(cache_key)
try:
with open(actual_key) as f:
return json.load(f)
except (OSError, ValueError, IOError):
raise KeyError(cache_key)
def __setitem__(self, cache_key, value):
full_key = self._convert_cache_key(cache_key)
try:
file_content = json.dumps(value, default=_serialize_if_needed)
except (TypeError, ValueError):
raise ValueError("Value cannot be cached, must be "
"JSON serializable: %s" % value)
if not os.path.isdir(self._working_dir):
os.makedirs(self._working_dir)
with os.fdopen(os.open(full_key,
os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:
f.truncate()
f.write(file_content)
def _convert_cache_key(self, cache_key):
full_path = os.path.join(self._working_dir, cache_key + '.json')
return full_path
class Credentials(object):
"""
Holds the credentials needed to authenticate requests.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found.
"""
def __init__(self, access_key, secret_key, token=None,
method=None):
self.access_key = access_key
self.secret_key = secret_key
self.token = token
if method is None:
method = 'explicit'
self.method = method
self._normalize()
def _normalize(self):
# Keys would sometimes (accidentally) contain non-ascii characters.
# It would cause a confusing UnicodeDecodeError in Python 2.
# We explicitly convert them into unicode to avoid such error.
#
# Eventually the service will decide whether to accept the credential.
# This also complies with the behavior in Python 3.
self.access_key = ibm_botocore.compat.ensure_unicode(self.access_key)
self.secret_key = ibm_botocore.compat.ensure_unicode(self.secret_key)
def get_frozen_credentials(self):
return ReadOnlyCredentials(self.access_key,
self.secret_key,
self.token)
class RefreshableCredentials(Credentials):
"""
Holds the credentials needed to authenticate requests. In addition, it
knows how to refresh itself.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found.
"""
# The time at which we'll attempt to refresh, but not
# block if someone else is refreshing.
_advisory_refresh_timeout = 15 * 60
# The time at which all threads will block waiting for
# refreshed credentials.
_mandatory_refresh_timeout = 10 * 60
def __init__(self, access_key, secret_key, token,
expiry_time, refresh_using, method,
time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = access_key
self._secret_key = secret_key
self._token = token
self._expiry_time = expiry_time
self._time_fetcher = time_fetcher
self._refresh_lock = threading.Lock()
self.method = method
self._frozen_credentials = ReadOnlyCredentials(
access_key, secret_key, token)
self._normalize()
def _normalize(self):
self._access_key = ibm_botocore.compat.ensure_unicode(self._access_key)
self._secret_key = ibm_botocore.compat.ensure_unicode(self._secret_key)
@classmethod
def create_from_metadata(cls, metadata, refresh_using, method):
instance = cls(
access_key=metadata['access_key'],
secret_key=metadata['secret_key'],
token=metadata['token'],
expiry_time=cls._expiry_datetime(metadata['expiry_time']),
method=method,
refresh_using=refresh_using
)
return instance
@property
def access_key(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._access_key
@access_key.setter
def access_key(self, value):
self._access_key = value
@property
def secret_key(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._secret_key
@secret_key.setter
def secret_key(self, value):
self._secret_key = value
@property
def token(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._token
@token.setter
def token(self, value):
self._token = value
def _seconds_remaining(self):
delta = self._expiry_time - self._time_fetcher()
return total_seconds(delta)
def refresh_needed(self, refresh_in=None):
"""Check if a refresh is needed.
A refresh is needed if the expiry time associated
with the temporary credentials is less than the
provided ``refresh_in``. If ``time_delta`` is not
provided, ``self.advisory_refresh_needed`` will be used.
For example, if your temporary credentials expire
in 10 minutes and the provided ``refresh_in`` is
``15 * 60``, then this function will return ``True``.
:type refresh_in: int
:param refresh_in: The number of seconds before the
credentials expire in which refresh attempts should
be made.
:return: True if refresh neeeded, False otherwise.
"""
if self._expiry_time is None:
# No expiration, so assume we don't need to refresh.
return False
if refresh_in is None:
refresh_in = self._advisory_refresh_timeout
# The credentials should be refreshed if they're going to expire
# in less than 5 minutes.
if self._seconds_remaining() >= refresh_in:
# There's enough time left. Don't refresh.
return False
logger.debug("Credentials need to be refreshed.")
return True
def _is_expired(self):
# Checks if the current credentials are expired.
return self.refresh_needed(refresh_in=0)
def _refresh(self):
# In the common case where we don't need a refresh, we
# can immediately exit and not require acquiring the
# refresh lock.
if not self.refresh_needed(self._advisory_refresh_timeout):
return
# acquire() doesn't accept kwargs, but False is indicating
# that we should not block if we can't acquire the lock.
# If we aren't able to acquire the lock, we'll trigger
# the else clause.
if self._refresh_lock.acquire(False):
try:
if not self.refresh_needed(self._advisory_refresh_timeout):
return
is_mandatory_refresh = self.refresh_needed(
self._mandatory_refresh_timeout)
self._protected_refresh(is_mandatory=is_mandatory_refresh)
return
finally:
self._refresh_lock.release()
elif self.refresh_needed(self._mandatory_refresh_timeout):
# If we're within the mandatory refresh window,
# we must block until we get refreshed credentials.
with self._refresh_lock:
if not self.refresh_needed(self._mandatory_refresh_timeout):
return
self._protected_refresh(is_mandatory=True)
def _protected_refresh(self, is_mandatory):
# precondition: this method should only be called if you've acquired
# the self._refresh_lock.
try:
metadata = self._refresh_using()
except Exception as e:
period_name = 'mandatory' if is_mandatory else 'advisory'
logger.warning("Refreshing temporary credentials failed "
"during %s refresh period.",
period_name, exc_info=True)
if is_mandatory:
# If this is a mandatory refresh, then
# all errors that occur when we attempt to refresh
# credentials are propagated back to the user.
raise
# Otherwise we'll just return.
# The end result will be that we'll use the current
# set of temporary credentials we have.
return
self._set_from_data(metadata)
self._frozen_credentials = ReadOnlyCredentials(
self._access_key, self._secret_key, self._token)
if self._is_expired():
# We successfully refreshed credentials but for whatever
# reason, our refreshing function returned credentials
# that are still expired. In this scenario, the only
# thing we can do is let the user know and raise
# an exception.
msg = ("Credentials were refreshed, but the "
"refreshed credentials are still expired.")
logger.warning(msg)
raise RuntimeError(msg)
@staticmethod
def _expiry_datetime(time_str):
return parse(time_str)
def _set_from_data(self, data):
expected_keys = ['access_key', 'secret_key', 'token', 'expiry_time']
if not data:
missing_keys = expected_keys
else:
missing_keys = [k for k in expected_keys if k not in data]
if missing_keys:
message = "Credential refresh failed, response did not contain: %s"
raise CredentialRetrievalError(
provider=self.method,
error_msg=message % ', '.join(missing_keys),
)
self.access_key = data['access_key']
self.secret_key = data['secret_key']
self.token = data['token']
self._expiry_time = parse(data['expiry_time'])
logger.debug("Retrieved credentials will expire at: %s",
self._expiry_time)
self._normalize()
def get_frozen_credentials(self):
"""Return immutable credentials.
The ``access_key``, ``secret_key``, and ``token`` properties
on this class will always check and refresh credentials if
needed before returning the particular credentials.
This has an edge case where you can get inconsistent
credentials. Imagine this:
# Current creds are "t1"
tmp.access_key ---> expired? no, so return t1.access_key
# ---- time is now expired, creds need refreshing to "t2" ----
tmp.secret_key ---> expired? yes, refresh and return t2.secret_key
This means we're using the access key from t1 with the secret key
from t2. To fix this issue, you can request a frozen credential object
which is guaranteed not to change.
The frozen credentials returned from this method should be used
immediately and then discarded. The typical usage pattern would
be::
creds = RefreshableCredentials(...)
some_code = SomeSignerObject()
# I'm about to sign the request.
# The frozen credentials are only used for the
# duration of generate_presigned_url and will be
# immediately thrown away.
request = some_code.sign_some_request(
with_credentials=creds.get_frozen_credentials())
print("Signed request:", request)
"""
self._refresh()
return self._frozen_credentials
class DeferredRefreshableCredentials(RefreshableCredentials):
"""Refreshable credentials that don't require initial credentials.
refresh_using will be called upon first access.
"""
def __init__(self, refresh_using, method, time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = None
self._secret_key = None
self._token = None
self._expiry_time = None
self._time_fetcher = time_fetcher
self._refresh_lock = threading.Lock()
self.method = method
self._frozen_credentials = None
def refresh_needed(self, refresh_in=None):
if self._frozen_credentials is None:
return True
return super(DeferredRefreshableCredentials, self).refresh_needed(
refresh_in
)
class CachedCredentialFetcher(object):
def __init__(self, cache=None, expiry_window_seconds=60 * 15):
if cache is None:
cache = {}
self._cache = cache
self._cache_key = self._create_cache_key()
self._expiry_window_seconds = expiry_window_seconds
def _create_cache_key(self):
raise NotImplementedError('_create_cache_key()')
def _make_file_safe(self, filename):
# Replace :, path sep, and / to make it the string filename safe.
filename = filename.replace(':', '_').replace(os.path.sep, '_')
return filename.replace('/', '_')
def _get_credentials(self):
raise NotImplementedError('_get_credentials()')
def fetch_credentials(self):
return self._get_cached_credentials()
def _get_cached_credentials(self):
"""Get up-to-date credentials.
This will check the cache for up-to-date credentials, calling assume
role if none are available.
"""
response = self._load_from_cache()
if response is None:
response = self._get_credentials()
self._write_to_cache(response)
else:
logger.debug("Credentials for role retrieved from cache.")
creds = response['Credentials']
expiration = _serialize_if_needed(creds['Expiration'], iso=True)
return {
'access_key': creds['AccessKeyId'],
'secret_key': creds['SecretAccessKey'],
'token': creds['SessionToken'],
'expiry_time': expiration,
}
def _load_from_cache(self):
if self._cache_key in self._cache:
creds = deepcopy(self._cache[self._cache_key])
if not self._is_expired(creds):
return creds
else:
logger.debug(
"Credentials were found in cache, but they are expired."
)
return None
def _write_to_cache(self, response):
self._cache[self._cache_key] = deepcopy(response)
def _is_expired(self, credentials):
"""Check if credentials are expired."""
end_time = _parse_if_needed(credentials['Credentials']['Expiration'])
seconds = total_seconds(end_time - _local_now())
return seconds < self._expiry_window_seconds
class AssumeRoleCredentialFetcher(CachedCredentialFetcher):
def __init__(self, client_creator, source_credentials, role_arn,
extra_args=None, mfa_prompter=None, cache=None,
expiry_window_seconds=60 * 15):
"""
:type client_creator: callable
:param client_creator: A callable that creates a client taking
arguments like ``Session.create_client``.
:type source_credentials: Credentials
:param source_credentials: The credentials to use to create the
client for the call to AssumeRole.
:type role_arn: str
:param role_arn: The ARN of the role to be assumed.
:type extra_args: dict
:param extra_args: Any additional arguments to add to the assume
role request using the format of the ibm_botocore operation.
Possible keys include, but may not be limited to,
DurationSeconds, Policy, SerialNumber, ExternalId and
RoleSessionName.
:type mfa_prompter: callable
:param mfa_prompter: A callable that returns input provided by the
user (i.e raw_input, getpass.getpass, etc.).
:type cache: dict
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example of this is
the ``JSONFileCache`` class in aws-cli.
:type expiry_window_seconds: int
:param expiry_window_seconds: The amount of time, in seconds,
"""
self._client_creator = client_creator
self._source_credentials = source_credentials
self._role_arn = role_arn
if extra_args is None:
self._assume_kwargs = {}
else:
self._assume_kwargs = deepcopy(extra_args)
self._assume_kwargs['RoleArn'] = self._role_arn
self._role_session_name = self._assume_kwargs.get('RoleSessionName')
self._using_default_session_name = False
if not self._role_session_name:
self._role_session_name = 'botocore-session-%s' % (
int(time.time()))
self._assume_kwargs['RoleSessionName'] = self._role_session_name
self._using_default_session_name = True
self._mfa_prompter = mfa_prompter
if self._mfa_prompter is None:
self._mfa_prompter = getpass.getpass
super(AssumeRoleCredentialFetcher, self).__init__(
cache, expiry_window_seconds
)
def _create_cache_key(self):
"""Create a predictable cache key for the current configuration.
The cache key is intended to be compatible with file names.
"""
args = deepcopy(self._assume_kwargs)
# The role session name gets randomly generated, so we don't want it
# in the hash.
if self._using_default_session_name:
del args['RoleSessionName']
if 'Policy' in args:
# To have a predictable hash, the keys of the policy must be
# sorted, so we have to load it here to make sure it gets sorted
# later on.
args['Policy'] = json.loads(args['Policy'])
args = json.dumps(args, sort_keys=True)
argument_hash = sha1(args.encode('utf-8')).hexdigest()
return self._make_file_safe(argument_hash)
def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
client = self._create_client()
return client.assume_role(**kwargs)
def _assume_role_kwargs(self):
"""Get the arguments for assume role based on current configuration."""
assume_role_kwargs = deepcopy(self._assume_kwargs)
mfa_serial = assume_role_kwargs.get('SerialNumber')
if mfa_serial is not None:
prompt = 'Enter MFA code for %s: ' % mfa_serial
token_code = self._mfa_prompter(prompt)
assume_role_kwargs['TokenCode'] = token_code
duration_seconds = assume_role_kwargs.get('DurationSeconds')
if duration_seconds is not None:
assume_role_kwargs['DurationSeconds'] = duration_seconds
return assume_role_kwargs
def _create_client(self):
"""Create an STS client using the source credentials."""
frozen_credentials = self._source_credentials.get_frozen_credentials()
return self._client_creator(
'sts',
aws_access_key_id=frozen_credentials.access_key,
aws_secret_access_key=frozen_credentials.secret_key,
aws_session_token=frozen_credentials.token,
)
class CredentialProvider(object):
# A short name to identify the provider within ibm_botocore.
METHOD = None
# A name to identify the provider for use in cross-sdk features like
# assume role's `credential_source` configuration option. These names
# are to be treated in a case-insensitive way. NOTE: any providers not
# implemented in ibm_botocore MUST prefix their canonical names with
# 'custom' or we DO NOT guarantee that it will work with any features
# that this provides.
CANONICAL_NAME = None
def __init__(self, session=None):
self.session = session
def load(self):
"""
Loads the credentials from their source & sets them on the object.
Subclasses should implement this method (by reading from disk, the
environment, the network or wherever), returning ``True`` if they were
found & loaded.
If not found, this method should return ``False``, indictating that the
``CredentialResolver`` should fall back to the next available method.
The default implementation does nothing, assuming the user has set the
``access_key/secret_key/token`` themselves.
:returns: Whether credentials were found & set
:rtype: Credentials
"""
return True
def _extract_creds_from_mapping(self, mapping, *key_names):
found = []
for key_name in key_names:
# ibm_service_instance_id and ibm_auth_endpoint are optional; append None in list
if key_name.lower() in ['ibm_service_instance_id','ibm_auth_endpoint'] and key_name not in mapping:
found.append(None)
else:
try:
found.append(mapping[key_name])
except KeyError:
raise PartialCredentialsError(provider=self.METHOD, cred_var=key_name)
return found
class ProcessProvider(CredentialProvider):
METHOD = 'custom-process'
def __init__(self, profile_name, load_config, popen=subprocess.Popen):
self._profile_name = profile_name
self._load_config = load_config
self._loaded_config = None
self._popen = popen
def load(self):
credential_process = self._credential_process
if credential_process is None:
return
creds_dict = self._retrieve_credentials_using(credential_process)
if creds_dict.get('expiry_time') is not None:
return RefreshableCredentials.create_from_metadata(
creds_dict,
lambda: self._retrieve_credentials_using(credential_process),
self.METHOD
)
return Credentials(
access_key=creds_dict['access_key'],
secret_key=creds_dict['secret_key'],
token=creds_dict.get('token'),
method=self.METHOD
)
def _retrieve_credentials_using(self, credential_process):
# We're not using shell=True, so we need to pass the
# command and all arguments as a list.
process_list = compat_shell_split(credential_process)
p = self._popen(process_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise CredentialRetrievalError(
provider=self.METHOD, error_msg=stderr.decode('utf-8'))
parsed = ibm_botocore.compat.json.loads(stdout.decode('utf-8'))
version = parsed.get('Version', '<Version key not provided>')
if version != 1:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg=("Unsupported version '%s' for credential process "
"provider, supported versions: 1" % version))
try:
return {
'access_key': parsed['AccessKeyId'],
'secret_key': parsed['SecretAccessKey'],
'token': parsed.get('SessionToken'),
'expiry_time': parsed.get('Expiration'),
}
except KeyError as e:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg="Missing required key in response: %s" % e
)
@property
def _credential_process(self):
if self._loaded_config is None:
self._loaded_config = self._load_config()
profile_config = self._loaded_config.get(
'profiles', {}).get(self._profile_name, {})
return profile_config.get('credential_process')
class InstanceMetadataProvider(CredentialProvider):
METHOD = 'iam-role'
CANONICAL_NAME = 'Ec2InstanceMetadata'
def __init__(self, iam_role_fetcher):
self._role_fetcher = iam_role_fetcher
def load(self):
fetcher = self._role_fetcher
# We do the first request, to see if we get useful data back.
# If not, we'll pass & move on to whatever's next in the credential
# chain.
metadata = fetcher.retrieve_iam_role_credentials()
if not metadata:
return None
logger.debug('Found credentials from IAM Role: %s',
metadata['role_name'])
# We manually set the data here, since we already made the request &
# have it. When the expiry is hit, the credentials will auto-refresh
# themselves.
creds = RefreshableCredentials.create_from_metadata(
metadata,
method=self.METHOD,
refresh_using=fetcher.retrieve_iam_role_credentials,
)
return creds
class EnvProvider(CredentialProvider):
METHOD = 'env'
CANONICAL_NAME = 'Environment'
ACCESS_KEY = 'AWS_ACCESS_KEY_ID'
SECRET_KEY = 'AWS_SECRET_ACCESS_KEY'
IBM_COS_API_KEY_ID = 'IBM_API_KEY_ID'
IBM_COS_SERVICE_INSTANCE_ID = 'IBM_SERVICE_INSTANCE_ID'
IBM_COS_AUTH_ENDPOINT = 'IBM_AUTH_ENDPOINT'
# The token can come from either of these env var.
# AWS_SESSION_TOKEN is what other AWS SDKs have standardized on.
TOKENS = ['AWS_SECURITY_TOKEN', 'AWS_SESSION_TOKEN']
EXPIRY_TIME = 'AWS_CREDENTIAL_EXPIRATION'
def __init__(self, environ=None, mapping=None):
"""
:param environ: The environment variables (defaults to
``os.environ`` if no value is provided).
:param mapping: An optional mapping of variable names to
environment variable names. Use this if you want to
change the mapping of access_key->AWS_ACCESS_KEY_ID, etc.
The dict can have up to 3 keys: ``access_key``, ``secret_key``,
``session_token``.
"""
if environ is None:
environ = os.environ
self.environ = environ
self._mapping = self._build_mapping(mapping)
def _build_mapping(self, mapping):
# Mapping of variable name to env var name.
var_mapping = {}
if mapping is None:
# Use the class var default.
var_mapping['access_key'] = self.ACCESS_KEY
var_mapping['secret_key'] = self.SECRET_KEY
var_mapping['ibm_api_key_id'] = self.IBM_COS_API_KEY_ID
var_mapping['ibm_service_instance_id'] = self.IBM_COS_SERVICE_INSTANCE_ID
var_mapping['ibm_auth_endpoint'] = self.IBM_COS_AUTH_ENDPOINT
var_mapping['token'] = self.TOKENS
var_mapping['expiry_time'] = self.EXPIRY_TIME
else:
var_mapping['access_key'] = mapping.get(
'access_key', self.ACCESS_KEY)
var_mapping['secret_key'] = mapping.get(
'secret_key', self.SECRET_KEY)
var_mapping['ibm_api_key_id'] = mapping.get(
'ibm_api_key_id', self.IBM_COS_API_KEY_ID)
var_mapping['ibm_service_instance_id'] = mapping.get(
'ibm_service_instance_id', self.IBM_COS_SERVICE_INSTANCE_ID)
var_mapping['ibm_auth_endpoint'] = mapping.get(
'ibm_auth_endpoint', self.IBM_COS_AUTH_ENDPOINT)
var_mapping['token'] = mapping.get(
'token', self.TOKENS)
if not isinstance(var_mapping['token'], list):
var_mapping['token'] = [var_mapping['token']]
var_mapping['expiry_time'] = mapping.get(
'expiry_time', self.EXPIRY_TIME)
return var_mapping
def load(self):
"""
Search for credentials in explicit environment variables.
"""
if self._mapping['ibm_api_key_id'] in self.environ:
logger.info('Found IBM credentials in environment variables.')
ibm_api_key_id, ibm_service_instance_id, ibm_auth_endpoint = self._extract_creds_from_mapping(
self.environ, self._mapping['ibm_api_key_id'],
self._mapping['ibm_service_instance_id'],
self._mapping['ibm_auth_endpoint'])
return OAuth2Credentials(api_key_id=ibm_api_key_id,
service_instance_id=ibm_service_instance_id,
auth_endpoint=ibm_auth_endpoint,
method=self.METHOD)
elif self._mapping['access_key'] in self.environ:
logger.info('Found credentials in environment variables.')
fetcher = self._create_credentials_fetcher()
credentials = fetcher(require_expiry=False)
expiry_time = credentials['expiry_time']
if expiry_time is not None:
expiry_time = parse(expiry_time)
return RefreshableCredentials(
credentials['access_key'], credentials['secret_key'],
credentials['token'], expiry_time,
refresh_using=fetcher, method=self.METHOD
)
return Credentials(
credentials['access_key'], credentials['secret_key'],
credentials['token'], method=self.METHOD
)
else:
return None
def _create_credentials_fetcher(self):
mapping = self._mapping
method = self.METHOD
environ = self.environ
def fetch_credentials(require_expiry=True):
credentials = {}
access_key = environ.get(mapping['access_key'])
if access_key is None:
raise PartialCredentialsError(
provider=method, cred_var=mapping['access_key'])
credentials['access_key'] = access_key
secret_key = environ.get(mapping['secret_key'])
if secret_key is None:
raise PartialCredentialsError(
provider=method, cred_var=mapping['secret_key'])
credentials['secret_key'] = secret_key
token = None
for token_env_var in mapping['token']:
if token_env_var in environ:
token = environ[token_env_var]
break
credentials['token'] = token
expiry_time = environ.get(mapping['expiry_time'])
if require_expiry and expiry_time is None:
raise PartialCredentialsError(
provider=method, cred_var=mapping['expiry_time'])
credentials['expiry_time'] = expiry_time
return credentials
return fetch_credentials
class OriginalEC2Provider(CredentialProvider):
METHOD = 'ec2-credentials-file'
CANONICAL_NAME = 'Ec2Config'
CRED_FILE_ENV = 'AWS_CREDENTIAL_FILE'
ACCESS_KEY = 'AWSAccessKeyId'
SECRET_KEY = 'AWSSecretKey'
def __init__(self, environ=None, parser=None):
if environ is None:
environ = os.environ
if parser is None:
parser = parse_key_val_file
self._environ = environ
self._parser = parser
def load(self):
"""
Search for a credential file used by original EC2 CLI tools.
"""
if 'AWS_CREDENTIAL_FILE' in self._environ:
full_path = os.path.expanduser(
self._environ['AWS_CREDENTIAL_FILE'])
creds = self._parser(full_path)
if self.ACCESS_KEY in creds:
logger.info('Found credentials in AWS_CREDENTIAL_FILE.')
access_key = creds[self.ACCESS_KEY]
secret_key = creds[self.SECRET_KEY]
# EC2 creds file doesn't support session tokens.
return Credentials(access_key, secret_key, method=self.METHOD)
else:
return None
class SharedCredentialProvider(CredentialProvider):
METHOD = 'shared-credentials-file'
CANONICAL_NAME = 'SharedCredentials'
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
IBM_COS_API_KEY_ID = 'ibm_api_key_id'
IBM_COS_SERVICE_INSTANCE_ID = 'ibm_service_instance_id'
IBM_COS_AUTH_ENDPOINT = 'ibm_auth_endpoint'
# Same deal as the EnvProvider above. Botocore originally supported
# aws_security_token, but the SDKs are standardizing on aws_session_token
# so we support both.
TOKENS = ['aws_security_token', 'aws_session_token']
def __init__(self, creds_filename, profile_name=None, ini_parser=None, hmac_takes_precedence=False):
self._creds_filename = creds_filename
if profile_name is None:
profile_name = 'default'
self._profile_name = profile_name
if ini_parser is None:
ini_parser = ibm_botocore.configloader.raw_config_parse
self._ini_parser = ini_parser
self._hmac_takes_precedence = hmac_takes_precedence
def load_ibm_cos_credentials(self, config):
if self._hmac_takes_precedence and self.ACCESS_KEY in config:
logger.info('HMAC takes precedence.')
return False
return self.IBM_COS_API_KEY_ID in config
def load_hmac_credentials(self, config):
return self.ACCESS_KEY in config
def load(self):
try:
available_creds = self._ini_parser(self._creds_filename)
except ConfigNotFound:
return None
if self._profile_name in available_creds:
config = available_creds[self._profile_name]
if self.load_ibm_cos_credentials(config):
logger.info("Found IBMCOS credentials in shared credentials file: %s",
self._creds_filename)
ibm_api_key_id, ibm_service_instance_id, ibm_auth_endpoint = self._extract_creds_from_mapping(
config, self.IBM_COS_API_KEY_ID,
self.IBM_COS_SERVICE_INSTANCE_ID,
self.IBM_COS_AUTH_ENDPOINT)
token = self._get_session_token(config)
return OAuth2Credentials(api_key_id=ibm_api_key_id,
service_instance_id=ibm_service_instance_id,
auth_endpoint=ibm_auth_endpoint,
method=self.METHOD)
elif self.load_hmac_credentials(config):
logger.info("Found credentials in shared credentials file: %s",
self._creds_filename)
access_key, secret_key = self._extract_creds_from_mapping(
config, self.ACCESS_KEY, self.SECRET_KEY)
token = self._get_session_token(config)
return Credentials(access_key, secret_key, token,
method=self.METHOD)
else:
return None
def _get_session_token(self, config):
for token_envvar in self.TOKENS:
if token_envvar in config:
return config[token_envvar]
class ConfigProvider(CredentialProvider):
"""INI based config provider with profile sections."""
METHOD = 'config-file'
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
IBM_COS_API_KEY_ID = 'ibm_api_key_id'
IBM_COS_SERVICE_INSTANCE_ID = 'ibm_service_instance_id'
IBM_COS_AUTH_ENDPOINT = 'ibm_auth_endpoint'
# Same deal as the EnvProvider above. Botocore originally supported
# aws_security_token, but the SDKs are standardizing on aws_session_token
# so we support both.
TOKENS = ['aws_security_token', 'aws_session_token']
def __init__(self, config_filename, profile_name, config_parser=None):
"""
:param config_filename: The session configuration scoped to the current
profile. This is available via ``session.config``.
:param profile_name: The name of the current profile.
:param config_parser: A config parser callable.
"""
self._config_filename = config_filename
self._profile_name = profile_name
if config_parser is None:
config_parser = ibm_botocore.configloader.load_config
self._config_parser = config_parser
def load(self):
"""
If there is are credentials in the configuration associated with
the session, use those.
"""
try:
full_config = self._config_parser(self._config_filename)
except ConfigNotFound:
return None
if self._profile_name in full_config['profiles']:
profile_config = full_config['profiles'][self._profile_name]
if self.IBM_COS_API_KEY_ID in profile_config:
logger.info("IBM Credentials found in AWS config file: %s",
self._config_filename)
ibm_api_key_id, ibm_service_instance_id, ibm_auth_endpoint = self._extract_creds_from_mapping(
profile_config, self.IBM_COS_API_KEY_ID, self.IBM_COS_SERVICE_INSTANCE_ID, self.IBM_COS_AUTH_ENDPOINT)
return OAuth2Credentials(api_key_id=ibm_api_key_id,
service_instance_id=ibm_service_instance_id,
auth_endpoint=ibm_auth_endpoint,
method=self.METHOD)
elif self.ACCESS_KEY in profile_config:
logger.info("Credentials found in AWS config file: %s",
self._config_filename)
access_key, secret_key = self._extract_creds_from_mapping(
profile_config, self.ACCESS_KEY, self.SECRET_KEY)
token = self._get_session_token(profile_config)
return Credentials(access_key, secret_key, token,
method=self.METHOD)
else:
return None
def _get_session_token(self, profile_config):
for token_name in self.TOKENS:
if token_name in profile_config:
return profile_config[token_name]
class BotoProvider(CredentialProvider):
METHOD = 'boto-config'
CANONICAL_NAME = 'Boto2Config'
BOTO_CONFIG_ENV = 'BOTO_CONFIG'
DEFAULT_CONFIG_FILENAMES = ['/etc/boto.cfg', '~/.boto']
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
IBM_COS_API_KEY_ID = 'ibm_api_key_id'
IBM_COS_SERVICE_INSTANCE_ID = 'ibm_service_instance_id'
IBM_COS_AUTH_ENDPOINT = 'ibm_auth_endpoint'
def __init__(self, environ=None, ini_parser=None):
if environ is None:
environ = os.environ
if ini_parser is None:
ini_parser = ibm_botocore.configloader.raw_config_parse
self._environ = environ
self._ini_parser = ini_parser
def load(self):
"""
Look for credentials in boto config file.
"""
if self.BOTO_CONFIG_ENV in self._environ:
potential_locations = [self._environ[self.BOTO_CONFIG_ENV]]
else:
potential_locations = self.DEFAULT_CONFIG_FILENAMES
for filename in potential_locations:
try:
config = self._ini_parser(filename)
except ConfigNotFound:
# Move on to the next potential config file name.
continue
if 'Credentials' in config:
credentials = config['Credentials']
if self.IBM_COS_API_KEY_ID in credentials:
logger.info("Found IBM credentials in boto config file: %s", filename)
ibm_api_key_id, ibm_service_instance_id, ibm_auth_endpoint = self._extract_creds_from_mapping(credentials,
self.IBM_COS_API_KEY_ID,
self.IBM_COS_SERVICE_INSTANCE_ID,
self.IBM_COS_AUTH_ENDPOINT)
return OAuth2Credentials(api_key_id=ibm_api_key_id,
service_instance_id=ibm_service_instance_id,
auth_endpoint=ibm_auth_endpoint,
method=self.METHOD)
elif self.ACCESS_KEY in credentials:
logger.info("Found credentials in boto config file: %s",
filename)
access_key, secret_key = self._extract_creds_from_mapping(
credentials, self.ACCESS_KEY, self.SECRET_KEY)
return Credentials(access_key, secret_key,
method=self.METHOD)
class AssumeRoleProvider(CredentialProvider):
METHOD = 'assume-role'
# The AssumeRole provider is logically part of the SharedConfig and
# SharedCredentials providers. Since the purpose of the canonical name
# is to provide cross-sdk compatibility, calling code will need to be
# aware that either of those providers should be tied to the AssumeRole
# provider as much as possible.
CANONICAL_NAME = None
ROLE_CONFIG_VAR = 'role_arn'
# Credentials are considered expired (and will be refreshed) once the total
# remaining time left until the credentials expires is less than the
# EXPIRY_WINDOW.
EXPIRY_WINDOW_SECONDS = 60 * 15
def __init__(self, load_config, client_creator, cache, profile_name,
prompter=getpass.getpass, credential_sourcer=None):
"""
:type load_config: callable
:param load_config: A function that accepts no arguments, and
when called, will return the full configuration dictionary
for the session (``session.full_config``).
:type client_creator: callable
:param client_creator: A factory function that will create
a client when called. Has the same interface as
``ibm_botocore.session.Session.create_client``.
:type cache: dict
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example
of this is the ``JSONFileCache`` class in the CLI.
:type profile_name: str
:param profile_name: The name of the profile.
:type prompter: callable
:param prompter: A callable that returns input provided
by the user (i.e raw_input, getpass.getpass, etc.).
:type credential_sourcer: CanonicalNameCredentialSourcer
:param credential_sourcer: A credential provider that takes a
configuration, which is used to provide the source credentials
for the STS call.
"""
#: The cache used to first check for assumed credentials.
#: This is checked before making the AssumeRole API
#: calls and can be useful if you have short lived
#: scripts and you'd like to avoid calling AssumeRole
#: until the credentials are expired.
self.cache = cache
self._load_config = load_config
# client_creator is a callable that creates function.
# It's basically session.create_client
self._client_creator = client_creator
self._profile_name = profile_name
self._prompter = prompter
# The _loaded_config attribute will be populated from the
# load_config() function once the configuration is actually
# loaded. The reason we go through all this instead of just
# requiring that the loaded_config be passed to us is to that
# we can defer configuration loaded until we actually try
# to load credentials (as opposed to when the object is
# instantiated).
self._loaded_config = {}
self._credential_sourcer = credential_sourcer
self._visited_profiles = [self._profile_name]
def load(self):
self._loaded_config = self._load_config()
profiles = self._loaded_config.get('profiles', {})
profile = profiles.get(self._profile_name, {})
if self._has_assume_role_config_vars(profile):
return self._load_creds_via_assume_role(self._profile_name)
def _has_assume_role_config_vars(self, profile):
return self.ROLE_CONFIG_VAR in profile
def _load_creds_via_assume_role(self, profile_name):
role_config = self._get_role_config(profile_name)
source_credentials = self._resolve_source_credentials(
role_config, profile_name
)
extra_args = {}
role_session_name = role_config.get('role_session_name')
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
external_id = role_config.get('external_id')
if external_id is not None:
extra_args['ExternalId'] = external_id
mfa_serial = role_config.get('mfa_serial')
if mfa_serial is not None:
extra_args['SerialNumber'] = mfa_serial
duration_seconds = role_config.get('duration_seconds')
if duration_seconds is not None:
extra_args['DurationSeconds'] = duration_seconds
fetcher = AssumeRoleCredentialFetcher(
client_creator=self._client_creator,
source_credentials=source_credentials,
role_arn=role_config['role_arn'],
extra_args=extra_args,
mfa_prompter=self._prompter,
cache=self.cache,
)
refresher = fetcher.fetch_credentials
if mfa_serial is not None:
refresher = create_mfa_serial_refresher(refresher)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return DeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=refresher,
time_fetcher=_local_now
)
def _get_role_config(self, profile_name):
"""Retrieves and validates the role configuration for the profile."""
profiles = self._loaded_config.get('profiles', {})
profile = profiles[profile_name]
source_profile = profile.get('source_profile')
role_arn = profile['role_arn']
credential_source = profile.get('credential_source')
mfa_serial = profile.get('mfa_serial')
external_id = profile.get('external_id')
role_session_name = profile.get('role_session_name')
duration_seconds = profile.get('duration_seconds')
role_config = {
'role_arn': role_arn,
'external_id': external_id,
'mfa_serial': mfa_serial,
'role_session_name': role_session_name,
'source_profile': source_profile,
'credential_source': credential_source
}
if duration_seconds is not None:
try:
role_config['duration_seconds'] = int(duration_seconds)
except ValueError:
pass
# Either the credential source or the source profile must be
# specified, but not both.
if credential_source is not None and source_profile is not None:
raise InvalidConfigError(
error_msg=(
'The profile "%s" contains both source_profile and '
'credential_source.' % profile_name
)
)
elif credential_source is None and source_profile is None:
raise PartialCredentialsError(
provider=self.METHOD,
cred_var='source_profile or credential_source'
)
elif credential_source is not None:
self._validate_credential_source(
profile_name, credential_source)
else:
self._validate_source_profile(profile_name, source_profile)
return role_config
def _validate_credential_source(self, parent_profile, credential_source):
if self._credential_sourcer is None:
raise InvalidConfigError(error_msg=(
'The credential_source "%s" is specified in profile "%s", '
'but no source provider was configured.' % (
credential_source, parent_profile)
))
if not self._credential_sourcer.is_supported(credential_source):
raise InvalidConfigError(error_msg=(
'The credential source "%s" referenced in profile "%s" is not '
'valid.' % (credential_source, parent_profile)
))
def _source_profile_has_credentials(self, profile):
return any([
self._has_static_credentials(profile),
self._has_assume_role_config_vars(profile),
])
def _validate_source_profile(self, parent_profile_name,
source_profile_name):
profiles = self._loaded_config.get('profiles', {})
if source_profile_name not in profiles:
raise InvalidConfigError(
error_msg=(
'The source_profile "%s" referenced in '
'the profile "%s" does not exist.' % (
source_profile_name, parent_profile_name)
)
)
source_profile = profiles[source_profile_name]
# Ensure the profile has valid credential type
if not self._source_profile_has_credentials(source_profile):
raise InvalidConfigError(
error_msg=(
'The source_profile "%s" must specify either static '
'credentials or an assume role configuration' % (
source_profile_name)
)
)
# Make sure we aren't going into an infinite loop. If we haven't
# visited the profile yet, we're good.
if source_profile_name not in self._visited_profiles:
return
# If we have visited the profile and the profile isn't simply
# referencing itself, that's an infinite loop.
if source_profile_name != parent_profile_name:
raise InfiniteLoopConfigError(
source_profile=source_profile_name,
visited_profiles=self._visited_profiles
)
# A profile is allowed to reference itself so that it can source
# static credentials and have configuration all in the same
# profile. This will only ever work for the top level assume
# role because the static credentials will otherwise take
# precedence.
if not self._has_static_credentials(source_profile):
raise InfiniteLoopConfigError(
source_profile=source_profile_name,
visited_profiles=self._visited_profiles
)
def _has_static_credentials(self, profile):
static_keys = ['aws_secret_access_key', 'aws_access_key_id']
return any(static_key in profile for static_key in static_keys)
def _resolve_source_credentials(self, role_config, profile_name):
credential_source = role_config.get('credential_source')
if credential_source is not None:
return self._resolve_credentials_from_source(
credential_source, profile_name
)
source_profile = role_config['source_profile']
self._visited_profiles.append(source_profile)
return self._resolve_credentials_from_profile(source_profile)
def _resolve_credentials_from_profile(self, profile_name):
profiles = self._loaded_config.get('profiles', {})
profile = profiles[profile_name]
if self._has_static_credentials(profile):
return self._resolve_static_credentials_from_profile(profile)
return self._load_creds_via_assume_role(profile_name)
def _resolve_static_credentials_from_profile(self, profile):
try:
return Credentials(
access_key=profile['aws_access_key_id'],
secret_key=profile['aws_secret_access_key'],
token=profile.get('aws_session_token')
)
except KeyError as e:
raise PartialCredentialsError(
provider=self.METHOD, cred_var=str(e))
def _resolve_credentials_from_source(self, credential_source,
profile_name):
credentials = self._credential_sourcer.source_credentials(
credential_source)
if credentials is None:
raise CredentialRetrievalError(
provider=credential_source,
error_msg=(
'No credentials found in credential_source referenced '
'in profile %s' % profile_name
)
)
return credentials
class CanonicalNameCredentialSourcer(object):
def __init__(self, providers):
self._providers = providers
def is_supported(self, source_name):
"""Validates a given source name.
:type source_name: str
:param source_name: The value of credential_source in the config
file. This is the canonical name of the credential provider.
:rtype: bool
:returns: True if the credential provider is supported,
False otherwise.
"""
return source_name in [p.CANONICAL_NAME for p in self._providers]
def source_credentials(self, source_name):
"""Loads source credentials based on the provided configuration.
:type source_name: str
:param source_name: The value of credential_source in the config
file. This is the canonical name of the credential provider.
:rtype: Credentials
"""
source = self._get_provider(source_name)
if isinstance(source, CredentialResolver):
return source.load_credentials()
return source.load()
def _get_provider(self, canonical_name):
"""Return a credential provider by its canonical name.
:type canonical_name: str
:param canonical_name: The canonical name of the provider.
:raises UnknownCredentialError: Raised if no
credential provider by the provided name
is found.
"""
provider = self._get_provider_by_canonical_name(canonical_name)
# The AssumeRole provider should really be part of the SharedConfig
# provider rather than being its own thing, but it is not. It is
# effectively part of both the SharedConfig provider and the
# SharedCredentials provider now due to the way it behaves.
# Therefore if we want either of those providers we should return
# the AssumeRole provider with it.
if canonical_name.lower() in ['sharedconfig', 'sharedcredentials']:
assume_role_provider = self._get_provider_by_method('assume-role')
if assume_role_provider is not None:
# The SharedConfig or SharedCredentials provider may not be
# present if it was removed for some reason, but the
# AssumeRole provider could still be present. In that case,
# return the assume role provider by itself.
if provider is None:
return assume_role_provider
# If both are present, return them both as a
# CredentialResolver so that calling code can treat them as
# a single entity.
return CredentialResolver([assume_role_provider, provider])
if provider is None:
raise UnknownCredentialError(name=canonical_name)
return provider
def _get_provider_by_canonical_name(self, canonical_name):
"""Return a credential provider by its canonical name.
This function is strict, it does not attempt to address
compatibility issues.
"""
for provider in self._providers:
name = provider.CANONICAL_NAME
# Canonical names are case-insensitive
if name and name.lower() == canonical_name.lower():
return provider
def _get_provider_by_method(self, method):
"""Return a credential provider by its METHOD name."""
for provider in self._providers:
if provider.METHOD == method:
return provider
class ContainerProvider(CredentialProvider):
METHOD = 'container-role'
CANONICAL_NAME = 'EcsContainer'
ENV_VAR = 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'
ENV_VAR_FULL = 'AWS_CONTAINER_CREDENTIALS_FULL_URI'
ENV_VAR_AUTH_TOKEN = 'AWS_CONTAINER_AUTHORIZATION_TOKEN'
def __init__(self, environ=None, fetcher=None):
if environ is None:
environ = os.environ
if fetcher is None:
fetcher = ContainerMetadataFetcher()
self._environ = environ
self._fetcher = fetcher
def load(self):
# This cred provider is only triggered if the self.ENV_VAR is set,
# which only happens if you opt into this feature.
if self.ENV_VAR in self._environ or self.ENV_VAR_FULL in self._environ:
return self._retrieve_or_fail()
def _retrieve_or_fail(self):
if self._provided_relative_uri():
full_uri = self._fetcher.full_url(self._environ[self.ENV_VAR])
else:
full_uri = self._environ[self.ENV_VAR_FULL]
headers = self._build_headers()
fetcher = self._create_fetcher(full_uri, headers)
creds = fetcher()
return RefreshableCredentials(
access_key=creds['access_key'],
secret_key=creds['secret_key'],
token=creds['token'],
method=self.METHOD,
expiry_time=_parse_if_needed(creds['expiry_time']),
refresh_using=fetcher,
)
def _build_headers(self):
headers = {}
auth_token = self._environ.get(self.ENV_VAR_AUTH_TOKEN)
if auth_token is not None:
return {
'Authorization': auth_token
}
def _create_fetcher(self, full_uri, headers):
def fetch_creds():
try:
response = self._fetcher.retrieve_full_uri(
full_uri, headers=headers)
except MetadataRetrievalError as e:
logger.debug("Error retrieving container metadata: %s", e,
exc_info=True)
raise CredentialRetrievalError(provider=self.METHOD,
error_msg=str(e))
return {
'access_key': response['AccessKeyId'],
'secret_key': response['SecretAccessKey'],
'token': response['Token'],
'expiry_time': response['Expiration'],
}
return fetch_creds
def _provided_relative_uri(self):
return self.ENV_VAR in self._environ
class CredentialResolver(object):
def __init__(self, providers):
"""
:param providers: A list of ``CredentialProvider`` instances.
"""
self.providers = providers
def insert_before(self, name, credential_provider):
"""
Inserts a new instance of ``CredentialProvider`` into the chain that
will be tried before an existing one.
:param name: The short name of the credentials you'd like to insert the
new credentials before. (ex. ``env`` or ``config``). Existing names
& ordering can be discovered via ``self.available_methods``.
:type name: string
:param cred_instance: An instance of the new ``Credentials`` object
you'd like to add to the chain.
:type cred_instance: A subclass of ``Credentials``
"""
try:
offset = [p.METHOD for p in self.providers].index(name)
except ValueError:
raise UnknownCredentialError(name=name)
self.providers.insert(offset, credential_provider)
def insert_after(self, name, credential_provider):
"""
Inserts a new type of ``Credentials`` instance into the chain that will
be tried after an existing one.
:param name: The short name of the credentials you'd like to insert the
new credentials after. (ex. ``env`` or ``config``). Existing names
& ordering can be discovered via ``self.available_methods``.
:type name: string
:param cred_instance: An instance of the new ``Credentials`` object
you'd like to add to the chain.
:type cred_instance: A subclass of ``Credentials``
"""
offset = self._get_provider_offset(name)
self.providers.insert(offset + 1, credential_provider)
def remove(self, name):
"""
Removes a given ``Credentials`` instance from the chain.
:param name: The short name of the credentials instance to remove.
:type name: string
"""
available_methods = [p.METHOD for p in self.providers]
if name not in available_methods:
# It's not present. Fail silently.
return
offset = available_methods.index(name)
self.providers.pop(offset)
def get_provider(self, name):
"""Return a credential provider by name.
:type name: str
:param name: The name of the provider.
:raises UnknownCredentialError: Raised if no
credential provider by the provided name
is found.
"""
return self.providers[self._get_provider_offset(name)]
def _get_provider_offset(self, name):
try:
return [p.METHOD for p in self.providers].index(name)
except ValueError:
raise UnknownCredentialError(name=name)
def load_credentials(self):
"""
Goes through the credentials chain, returning the first ``Credentials``
that could be loaded.
"""
# First provider to return a non-None response wins.
for provider in self.providers:
logger.debug("Looking for credentials via: %s", provider.METHOD)
creds = provider.load()
if creds is not None:
return creds
# If we got here, no credentials could be found.
# This feels like it should be an exception, but historically, ``None``
# is returned.
#
# +1
# -js
return None
#######################################################################################################
# IBM IAM Credentails
#######################################################################################################
import atexit
import requests
try:
import http.client as httplib
except ImportError:
import httplib
class IbmCosCredentialsProvider(SharedCredentialProvider):
def __init__(self, ibm_credentials_filename):
self.METHOD = 'ibm-cos-credentials-file'
SharedCredentialProvider.__init__(self,
ibm_credentials_filename,
self.METHOD,
self.load_ibm_credentials_filename,
True)
def get_data(self, path):
if not os.path.isfile(path):
raise ibm_botocore.exceptions.ConfigNotFound(path=path)
with open(path, 'r') as f:
return json.load(f)
def load_ibm_credentials_filename(self, ibm_credentials_filename):
config = {}
path = ibm_credentials_filename
if path is not None:
path = os.path.expanduser(path)
_data = self.get_data(path)
try:
def set_dic_value(_sec, _name, _dic, _name1, _name2=None):
if _name1 in _dic.keys():
if not _name2:
_sec[_name] = _dic[_name1]
else:
_dic2 = _dic[_name1]
if _name2 in _dic2.keys():
_sec[_name] = _dic2[_name2]
_sec = config[self.METHOD] = {}
set_dic_value(_sec, 'aws_access_key_id', _data, 'cos_hmac_keys', 'access_key_id')
set_dic_value(_sec, 'aws_secret_access_key', _data, 'cos_hmac_keys', 'secret_access_key')
set_dic_value(_sec, 'ibm_service_instance_id', _data, 'resource_instance_id')
set_dic_value(_sec, 'ibm_api_key_id', _data, 'apikey')
set_dic_value(_sec, 'ibm_kp_root_key_crn', _data, 'iam_serviceid_crn')
# this is for testing - if the value is set in the file then use it
# otherwise the default endpoint is used -- 'https://iam.cloud.ibm.com/identity/token'
set_dic_value(_sec, 'ibm_auth_endpoint', _data, 'iam_auth_endpoint')
if 'ibm_auth_endpoint' not in _sec.keys():
_sec['ibm_auth_endpoint'] = None
except Exception as e:
raise ibm_botocore.exceptions.ConfigParseError(path=ibm_credentials_filename)
return config
class TokenManager(object):
"""An abstract base class for token managers.
Every token manager must derive from this base class
and override get_token method.
"""
def get_token(self):
"""Returns a token, possibly retrieving it first.
When overriden in derived classes, this method always
returns token. If token is not available or is expired,
this method's resposibility is to retrieve or refresh it.
:return: A string representing valid token.
"""
return None
class DefaultTokenManager(TokenManager):
"""A default implementation of token manager.
Retreives token on first use and holds it in memory
for further use. Background thread tries to refresh token
prior to its expiration, so that main thread is always
non-blocking and performant.
:ivar API_TOKEN_URL: Default URL for IAM authentication.
:ivar _advisory_refresh_timeout: The time at which we'll attempt to refresh, but not
block if someone else is refreshing.
:ivar _mandatory_refresh_timeout: The time at which all threads will block waiting for
refreshed credentials.
"""
API_TOKEN_URL = 'https://iam.cloud.ibm.com/identity/token'
_advisory_refresh_timeout = 0
_mandatory_refresh_timeout = 0
REFRESH_OVERRIDE_IN_SECS = 0 # force refresh in this number of secs
def __init__(self,
api_key_id=None,
service_instance_id=None,
auth_endpoint=None,
time_fetcher=_local_now,
auth_function=None,
verify=True):
"""Creates a new DefaultTokenManager object.
:type api_key_id: str
:param api_key_id: IBM api key used for IAM authentication.
:type service_instance_id: str
:param service_instance_id: Service Instance ID used for
PUT bucket and GET service requests.
:type auth_endpoint: str
:param auth_endpoint: URL used for IAM authentication. If not provided,
API_TOKEN_URL will be used.
:type time_fetcher: datetime
:param time_fetcher: current date and time used for calculating
expiration time for token.
:type auth_function: function
:param auth_function: function that does custom authentication
and returns json with token, refresh token, expiry time
and token type. If not provided, a default authentication
function will be used.
:type verify: boolean/string
:param verify: Whether or not to verify IAM service SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
"""
if api_key_id is None and auth_function is None:
raise RuntimeError('api_key_id and auth_function cannot both be None')
self.api_key_id = api_key_id
self.service_instance_id = service_instance_id
self.auth_endpoint = auth_endpoint
self._time_fetcher = time_fetcher
self.set_verify(verify)
self.proxies = None
if auth_function:
self.auth_function = auth_function
else:
self.auth_function = self._default_auth_function
self._refresh_lock = threading.Lock()
self._token_update_lock = threading.Lock()
self._set_cache_token()
self._background_thread = None
self._background_thread_wakeup_event = threading.Event()
self._background_thread_stopped_event = threading.Event()
self._initial_token_set_event = threading.Event()
self._shutdown = False
atexit.register(self._cleanup)
def _cleanup(self):
"""
Cleaup resources
"""
self.stop_refresh_thread()
def stop_refresh_thread(self):
"""
Stop the background thread
"""
if not self._shutdown:
self._shutdown = True
if self._background_thread:
if self._background_thread.isAlive():
self.wakeup_refresh_thread()
self._background_thread_stopped_event.wait(3)
def wakeup_refresh_thread(self):
"""
Force the background thread to wakeup and refresh
"""
self._background_thread_wakeup_event.set()
def _background_refresher(self):
"""Refreshes token that's about to expire.
Runs on background thread and sleeps until _advisory_refresh_timeout
seconds before token expiration when it wakes and refreshes the token.
"""
# This method will run on background thread forever
# or until an exception forces an exit
try:
while not self._shutdown:
# We just woke up and there's a token.
# Will see if refresh is required and will then go back to sleep
remaining = self._seconds_remaining()
if remaining <= self._advisory_refresh_timeout:
self._refresh()
new_remaining = self._seconds_remaining() - self._advisory_refresh_timeout
if new_remaining <= 5: # must be at least five seconds
new_remaining = 5 # possible expired token let the _refresh method throw an exception, if required
logger.debug('Background refresh thread going to sleep for ' + str(new_remaining) + ' seconds')
self._background_thread_wakeup_event.clear()
self._background_thread_wakeup_event.wait(new_remaining)
except Exception as e:
logger.error("Exiting background refresh thread: " + str(e))
self._background_thread_stopped_event.set()
def get_token(self):
"""Returns a token, possibly retrieving it first.
Always returns token. If token is not available, retrieves.
It also spawns background thread that makes sure that token
never expires.
:return: A string representing valid token.
"""
if not self._get_cache_token():
if self._refresh_lock.acquire(False):
self._initial_token_set_event.clear();
try:
if not self._get_cache_token(): # try again another thread may have refreshed it
self._get_initial_token()
self._initial_token_set_event.set();
if self._background_thread:
# check to see if the thread is still running
if not self._background_thread.isAlive():
self._background_thread = None
if not self._background_thread:
self._background_thread = threading.Thread(target=self._background_refresher)
self._background_thread.daemon = True
self._background_thread.start()
finally:
self._initial_token_set_event.set();
self._refresh_lock.release()
else:
self._initial_token_set_event.wait(5);
return self._get_cache_token()
def set_verify(self, verify):
""" Turn on/off ssl cert verify
"""
self._verify = verify
def get_verify(self):
""" True/False - get if ssl cert verify is enabled
"""
return self._verify
def _seconds_remaining(self):
""" Seconds to expiry time
"""
if not self._expiry_time:
return -1
delta = self._expiry_time - self._time_fetcher()
return total_seconds(delta)
def _get_token_url(self):
""" Get the IAM server url if set
If not set use the default usl
"""
if self.auth_endpoint:
return self.auth_endpoint
else:
return DefaultTokenManager.API_TOKEN_URL
def set_from_config(self, config):
""" store any values that are required from the config
"""
if config:
self.proxies = config.proxies
def _get_data(self):
""" Get the data posted to IAM server
If refresh token exists request a token refresh
"""
if self._get_cache_refresh_token():
return {u'grant_type': u'refresh_token',
u'response_type': u'cloud_iam',
u'refresh_token': self._get_cache_refresh_token()}
else:
return {u'grant_type': u'urn:ibm:params:oauth:grant-type:apikey',
u'response_type': u'cloud_iam',
u'apikey': self.api_key_id}
def _get_headers(self):
""" Get the http headers sent to IAM server
"""
return {'accept': "application/json",
'authorization': "Basic Yng6Yng=",
'cache-control': "no-cache",
'Content-Type': "application/x-www-form-urlencoded"}
def _default_auth_function(self):
response = requests.post(
url=self._get_token_url(),
data=self._get_data(),
headers=self._get_headers(),
timeout=5,
proxies=self.proxies,
verify=self.get_verify())
if response.status_code != httplib.OK:
_msg = 'HttpCode({code}) - Retrieval of tokens from server failed.'.format(code=response.status_code)
raise CredentialRetrievalError(provider=self._get_token_url(), error_msg=_msg)
return json.loads(response.content.decode('utf-8'))
def _refresh_needed(self, refresh_in=None):
"""Check if a refresh is needed.
A refresh is needed if the expiry time associated
with the temporary credentials is less than the
provided ``refresh_in``. If ``time_delta`` is not
provided, ``self.advisory_refresh_needed`` will be used.
For example, if your temporary credentials expire
in 10 minutes and the provided ``refresh_in`` is
``15 * 60``, then this function will return ``True``.
:type refresh_in: int
:param refresh_in: The number of seconds before the
credentials expire in which refresh attempts should
be made.
:return: True if refresh neeeded, False otherwise.
"""
if self._get_cache_token() is None:
return True
if self._expiry_time is None:
# No expiration, so assume we don't need to refresh.
return False
if refresh_in is None:
refresh_in = self._advisory_refresh_timeout
# The credentials should be refreshed if they're going to expire
# in less than 5 minutes.
if self._seconds_remaining() >= refresh_in:
# There's enough time left. Don't refresh.
return False
logger.debug("Credentials need to be refreshed.")
return True
def _is_expired(self):
""" Checks if the current credentials are expired.
"""
return self._seconds_remaining() <= 0
def _refresh(self):
"""Initiates mandatory or advisory refresh, if needed,
This method makes sure that refresh is done in critical section,
if refresh is needed:
- if lock can be acquired, mandatory or advisory refresh
is initiated.
- if lock cannot be acquired and refresh is advisory, we cancel
our refresh action (because somebody is already doing the refresh)
- if lock cannot be acquired and refresh is mandatory, be block
until lock can be acquired (although at that point somebody else
probably did the refresh)
"""
# In the common case where we don't need a refresh, we
# can immediately exit and not require acquiring the
# refresh lock.
if not self._refresh_needed(self._advisory_refresh_timeout):
return
# acquire() doesn't accept kwargs, but False is indicating
# that we should not block if we can't acquire the lock.
# If we aren't able to acquire the lock, we'll trigger
# the else clause.
if self._refresh_lock.acquire(False):
try:
if not self._refresh_needed(self._advisory_refresh_timeout):
return
is_mandatory_refresh = self._refresh_needed(
self._mandatory_refresh_timeout)
self._protected_refresh(is_mandatory=is_mandatory_refresh)
return
finally:
self._refresh_lock.release()
elif self._refresh_needed(self._mandatory_refresh_timeout):
# If we're within the mandatory refresh window,
# we must block until we get refreshed credentials.
with self._refresh_lock:
if not self._refresh_needed(self._mandatory_refresh_timeout):
return
self._protected_refresh(is_mandatory=True)
def _protected_refresh(self, is_mandatory):
"""Performs mandatory or advisory refresh.
Precondition: this method should only be called if you've acquired
the self._refresh_lock.
"""
try:
metadata = self.auth_function()
except Exception as e:
period_name = 'mandatory' if is_mandatory else 'advisory'
logger.warning("Refreshing temporary credentials failed "
"during %s refresh period.",
period_name, exc_info=True)
if is_mandatory:
if self._is_expired():
self._set_cache_token() # clear the cache
raise
# if token hasnt expired continue to use it
return
self._set_from_data(metadata)
def _get_initial_token(self, retry_count=3, retry_delay=1):
""" get the inital token - if it fails raise exception
"""
_total_attempts = retry_count
while True:
try:
metadata = self.auth_function()
break
except Exception as e:
_total_attempts -= 1
if _total_attempts > 0:
logger.debug("Retrying auth call")
time.sleep(retry_delay)
else:
logger.warning("Problem fetching initial IAM token.", exc_info=True)
self._set_cache_token() # clear the cache
raise
self._set_from_data(metadata)
self._set_refresh_timeouts()
def _get_cache_refresh_token(self):
""" get the cached refresh token from previous call to IAM server
"""
return self._refresh_token
def _get_cache_token(self):
""" get the cached access token from previous call to IAM server
"""
with self._token_update_lock:
if self._token:
if self._seconds_remaining() <= 0:
return None
return self._token
def _set_cache_token(self,
access_token=None,
refresh_token=None,
token_type=None,
refresh_in_secs=None):
""" cache token and expiry date details retrieved in call to IAM server
if the token is expired raise an exception and return error to user
"""
with self._token_update_lock:
self._token = access_token
self._refresh_token = refresh_token
self._token_type = token_type
if refresh_in_secs is None:
self._expiry_time = None
else:
_refresh_in_secs = self.REFRESH_OVERRIDE_IN_SECS if self.REFRESH_OVERRIDE_IN_SECS > 0 else refresh_in_secs
# Add expires_in to current system time.
self._expiry_time = self._time_fetcher() + datetime.timedelta(seconds=_refresh_in_secs)
if self._is_expired():
self._token = None
self._refresh_token = None
self._token_type = None
self._expiry_time = None
msg = ("Credentials fetched ok : but are expired.")
logger.warning(msg)
raise RuntimeError(msg)
logger.debug("Retrieved credentials will expire at: %s", self._expiry_time)
def _set_from_data(self, data):
""" extract required values from metadata returned from IAM server
"""
_refresh_token = data['refresh_token'] if 'refresh_token' in data else None
self._set_cache_token(data['access_token'], _refresh_token, data['token_type'], data['expires_in'])
def _set_refresh_timeouts(self):
"""
Set the advisory timeout to 25% of remaining time - usually 15 minutes on 1 hour expiry
Set the mandatory timeout to 17% of remaining time - usually 10 minutes on 1 hour expiry
"""
if self._expiry_time:
_secs = self._seconds_remaining()
self._advisory_refresh_timeout = int(_secs / (100 / 25))
self._mandatory_refresh_timeout = int(_secs / (100 / 17))
logger.debug('Refresh Timeouts set to Advisory(' +
str(self._advisory_refresh_timeout) +
') Mandatory(' +
str(self._mandatory_refresh_timeout) + ')')
class DelegatedTokenManager(DefaultTokenManager):
""" Requests and processes IAM delegate tokens
Delegate token refreshed every six days """
def __init__(self,
api_key_id=None,
service_instance_id=None,
auth_endpoint=None,
time_fetcher=_local_now,
auth_function=None,
verify=True,
receiver_client_ids=None):
super(DelegatedTokenManager, self).__init__(api_key_id,
service_instance_id,
auth_endpoint,
time_fetcher,
auth_function,
verify)
self._receiver_client_ids = receiver_client_ids
def _get_data(self):
""" Get the data posted to IAM server
There is currenty no refresh functionality
"""
data = {u'grant_type': u'urn:ibm:params:oauth:grant-type:apikey',
u'response_type': u'delegated_refresh_token',
u'apikey': self.api_key_id}
if self._receiver_client_ids is not None:
data[u'receiver_client_ids'] = u'%s' % self._receiver_client_ids
return data
def _set_from_data(self, data):
""" extract required values from metadata returned from IAM server """
_REFRESH_SIX_DAYS_IN_SECS = 518400 #refresh delgate token every 6days
self._set_cache_token(data['delegated_refresh_token'],
None,
data.get('token_type'),
_REFRESH_SIX_DAYS_IN_SECS)
class OAuth2Credentials(Credentials):
"""
Holds the credentials needed to IAM authenticate requests. Credentials
are kept in token manager, either built-in or custom one.
"""
def __init__(self,
api_key_id=None,
service_instance_id=None,
auth_endpoint=None,
token_manager=None,
auth_function=None,
method=None,
time_fetcher=_local_now,
verify=True):
"""Creates a new OAuth2Credentials object.
:type api_key_id: str
:param api_key_id: IBM api key used for IAM authentication.
:type service_instance_id: str
:param service_instance_id: Service Instance ID used for
PUT bucket and GET service requests.
:type auth_endpoint: str
:param auth_endpoint: URL used for IAM authentication. If not provided,
API_TOKEN_URL will be used.
:type token_manager: TokenManager
:param token_manager: custom token manager to use. If not providedm
an instance of DefaultTokenManager will be used.
:type auth_function: function
:param auth_function: function that does custom authentication
and returns json with token, refresh token, expiry time
and token type. If not provided, a default authentication
function will be used.
:type method: str
:param method: A string which identifies where the credentials
were found..
:type time_fetcher: datetime
:param time_fetcher: current date and time used for calculating
expiration time for token.
:param verify: Whether or not to verify IAM service SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
"""
self.api_key_id = api_key_id
self.service_instance_id = service_instance_id
self.auth_endpoint = auth_endpoint
self.token_manager = token_manager
self.auth_function = auth_function
self.method = method
self._normalize()
# We might get three different IAM options: api key, auth function and custom token manager.
# Precedence logic is this (for now):
# 1. If api key is provided, it will be used with our builtin DefaultTokenManager.
# custom token manager i auth function are ignored.
# 2. If auth function is provided, it is used and custom token manager is ignored.
# 3. If custom token manager is provided, it is used
# 4. If nothing is provided, an error is raised.
if api_key_id:
self.auth_function = None
if token_manager or auth_function:
logger.warning('api_key_id will be used, token_manager/auth_function will be ignored')
elif auth_function:
if token_manager:
logger.warning('auth_function will be used, token_manager will be ignored')
elif token_manager:
logger.debug('token_manager will be used')
else:
raise ValueError("Either api_key_id, auth_function or token_manager must be provided")
if api_key_id or auth_function:
self.token_manager = DefaultTokenManager(self.api_key_id,
self.service_instance_id,
self.auth_endpoint,
time_fetcher,
self.auth_function,
verify)
def _normalize(self):
if self.api_key_id:
self.api_key_id = ibm_botocore.compat.ensure_unicode(self.api_key_id)
if self.service_instance_id:
self.service_instance_id = ibm_botocore.compat.ensure_unicode(self.service_instance_id)
def get_frozen_credentials(self):
"""Return immutable credentials.
The ``access_key``, ``secret_key``, and ``token`` properties
on this class will always check and refresh credentials if
needed before returning the particular credentials.
This has an edge case where you can get inconsistent
credentials. Imagine this:
# Current creds are "t1"
tmp.access_key ---> expired? no, so return t1.access_key
# ---- time is now expired, creds need refreshing to "t2" ----
tmp.secret_key ---> expired? yes, refresh and return t2.secret_key
This means we're using the access key from t1 with the secret key
from t2. To fix this issue, you can request a frozen credential object
which is guaranteed not to change.
The frozen credentials returned from this method should be used
immediately and then discarded. The typical usage pattern would
be::
creds = RefreshableCredentials(...)
some_code = SomeSignerObject()
# I'm about to sign the request.
# The frozen credentials are only used for the
# duration of generate_presigned_url and will be
# immediately thrown away.
request = some_code.sign_some_request(
with_credentials=creds.get_frozen_credentials())
print("Signed request:", request)
"""
token = self.token_manager.get_token()
# Signer is only interested in token, and besides, we might not even have api key
return ReadOnlyCredentials(
None, None, token)
|
3.process_cpu.py
|
'''
说明: 多进程运行时,同一个子进程可能会在多个cpu上运行
注: 本文件代码只能在linux环境下运行
max_workers: 同一时刻,有多少线程在cpu上执行. 如果max_workers=4,cpu核数为16,只能说同一时刻最多只会4个线程在cpu上执行,实际上16个cpu可能都会工作,详细请看:https://www.cnblogs.com/edisonchou/p/5020681.html
cpu_num: 返回此进程当前正在运行的 CPU的编号[编号从0开始]。这个方法只能在linux上使用,如何在windows上部署linux环境
'''
from multiprocessing import Process, Queue, current_process
import psutil
def req1(count, result_queue: Queue):
process = psutil.Process(current_process().ident)
res = set()
for i in range(count):
key = f'pid:{current_process().ident} cpu_id:{process.cpu_num()}'
res.add(key)
result_queue.put(res)
def main1():
count = 10000
result_queue = Queue()
p1 = Process(target=req1, args=(count, result_queue))
p1.start()
p1.join()
return result_queue.get()
if __name__ == '__main__':
print(main1())
'''
可以看到当count=100000并且max_workers=4时,有6个cpu运行了子进程的代码,大于了设置的4个
count为10000,输出:{'pid:817 cpu_id:3', 'pid:817 cpu_id:2'}
count为100000,输出:{'pid:820 cpu_id:12', 'pid:820 cpu_id:5', 'pid:820 cpu_id:4'}
count为1000000,输出:{'pid:823 cpu_id:6', 'pid:823 cpu_id:8', 'pid:823 cpu_id:5', 'pid:823 cpu_id:7', 'pid:823 cpu_id:4', 'pid:823 cpu_id:9'}
'''
|
job.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Fix cloudpickle compatible problem we known.
import compatible_trick
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
os.environ['XPARL'] = 'True'
import argparse
import cloudpickle
import pickle
import psutil
import re
import sys
import tempfile
import threading
import time
import traceback
import zmq
from multiprocessing import Process, Pipe
from parl.utils import to_str, to_byte, get_ip_address, logger
from parl.utils.communication import loads_argument, loads_return,\
dumps_argument, dumps_return
from parl.remote import remote_constants
from parl.utils.exceptions import SerializeError, DeserializeError
from parl.remote.message import InitializedJob
class Job(object):
"""Base class for the job.
After establishing connection with the remote object, the job will
create a remote class instance locally and enter an infinite loop
in a separate process, waiting for commands from the remote object.
"""
def __init__(self, worker_address):
"""
Args:
worker_address(str): worker_address for sending job information(e.g, pid)
Attributes:
pid (int): Job process ID.
max_memory (float): Maximum memory (MB) can be used by each remote instance.
"""
self.max_memory = None
self.job_address_receiver, job_address_sender = Pipe()
self.worker_address = worker_address
self.job_ip = get_ip_address()
self.pid = os.getpid()
self.lock = threading.Lock()
self.run_job_process = Process(
target=self.run, args=(job_address_sender, ))
self.run_job_process.start()
self._create_sockets()
process = psutil.Process(self.pid)
self.init_memory = float(process.memory_info()[0]) / (1024**2)
self.run_job_process.join()
with self.lock:
self.kill_job_socket.send_multipart(
[remote_constants.KILLJOB_TAG,
to_byte(self.job_address)])
try:
_ = self.kill_job_socket.recv_multipart()
except zmq.error.Again as e:
pass
os._exit(1)
def _create_sockets(self):
"""Create five sockets for each job in main process.
(1) job_socket(functional socket): sends job_address and heartbeat_address to worker.
(2) ping_heartbeat_socket: replies ping message of client.
(3) worker_heartbeat_socket: replies heartbeat message of worker.
(4) client_heartbeat_socket: replies heartbeat message of client.
(5) kill_job_socket: sends a command to the corresponding worker to kill the job.
"""
# wait for another process to create reply socket
self.job_address = self.job_address_receiver.recv()
self.ctx = zmq.Context()
# create the job_socket
self.job_socket = self.ctx.socket(zmq.REQ)
self.job_socket.connect("tcp://{}".format(self.worker_address))
# a thread that reply ping signals from the client
ping_heartbeat_socket, ping_heartbeat_address = self._create_heartbeat_server(
timeout=False)
ping_thread = threading.Thread(
target=self._reply_ping, args=(ping_heartbeat_socket, ))
ping_thread.setDaemon(True)
ping_thread.start()
# a thread that reply heartbeat signals from the worker
worker_heartbeat_socket, worker_heartbeat_address = self._create_heartbeat_server(
)
worker_thread = threading.Thread(
target=self._reply_worker_heartbeat,
args=(worker_heartbeat_socket, ))
worker_thread.setDaemon(True)
# a thread that reply heartbeat signals from the client
client_heartbeat_socket, client_heartbeat_address = self._create_heartbeat_server(
)
self.client_thread = threading.Thread(
target=self._reply_client_heartbeat,
args=(client_heartbeat_socket, ))
self.client_thread.setDaemon(True)
# sends job information to the worker
initialized_job = InitializedJob(
self.job_address, worker_heartbeat_address,
client_heartbeat_address, ping_heartbeat_address, None, self.pid)
self.job_socket.send_multipart(
[remote_constants.NORMAL_TAG,
cloudpickle.dumps(initialized_job)])
message = self.job_socket.recv_multipart()
worker_thread.start()
tag = message[0]
assert tag == remote_constants.NORMAL_TAG
# create the kill_job_socket
kill_job_address = to_str(message[1])
self.kill_job_socket = self.ctx.socket(zmq.REQ)
self.kill_job_socket.setsockopt(
zmq.RCVTIMEO, remote_constants.HEARTBEAT_TIMEOUT_S * 1000)
self.kill_job_socket.connect("tcp://{}".format(kill_job_address))
def _check_used_memory(self):
"""Check if the memory used by this job exceeds self.max_memory."""
stop_job = False
if self.max_memory is not None:
process = psutil.Process(self.pid)
used_memory = float(process.memory_info()[0]) / (1024**2)
if used_memory > self.max_memory + self.init_memory:
stop_job = True
return stop_job
def _reply_ping(self, socket):
"""Create a socket server that reply the ping signal from client.
This signal is used to make sure that the job is still alive.
"""
message = socket.recv_multipart()
max_memory = to_str(message[1])
if max_memory != 'None':
self.max_memory = float(max_memory)
socket.send_multipart([remote_constants.HEARTBEAT_TAG])
self.client_thread.start()
socket.close(0)
def _create_heartbeat_server(self, timeout=True):
"""Create a socket server that will raises timeout exception.
"""
heartbeat_socket = self.ctx.socket(zmq.REP)
if timeout:
heartbeat_socket.setsockopt(
zmq.RCVTIMEO, remote_constants.HEARTBEAT_RCVTIMEO_S * 1000)
heartbeat_socket.linger = 0
heartbeat_port = heartbeat_socket.bind_to_random_port(addr="tcp://*")
heartbeat_address = "{}:{}".format(self.job_ip, heartbeat_port)
return heartbeat_socket, heartbeat_address
def _reply_client_heartbeat(self, socket):
"""Create a socket that replies heartbeat signals from the client.
If the job losts connection with the client, it will exit too.
"""
while True:
try:
message = socket.recv_multipart()
stop_job = self._check_used_memory()
socket.send_multipart([
remote_constants.HEARTBEAT_TAG,
to_byte(str(stop_job)),
to_byte(self.job_address)
])
if stop_job == True:
logger.error(
"Memory used by this job exceeds {}. This job will exist."
.format(self.max_memory))
time.sleep(5)
socket.close(0)
os._exit(1)
except zmq.error.Again as e:
logger.warning(
"[Job] Cannot connect to the client. This job will exit and inform the worker."
)
break
socket.close(0)
with self.lock:
self.kill_job_socket.send_multipart(
[remote_constants.KILLJOB_TAG,
to_byte(self.job_address)])
try:
_ = self.kill_job_socket.recv_multipart()
except zmq.error.Again as e:
pass
logger.warning("[Job]lost connection with the client, will exit")
os._exit(1)
def _reply_worker_heartbeat(self, socket):
"""create a socket that replies heartbeat signals from the worker.
If the worker has exited, the job will exit automatically.
"""
while True:
try:
message = socket.recv_multipart()
socket.send_multipart([remote_constants.HEARTBEAT_TAG])
except zmq.error.Again as e:
logger.warning("[Job] Cannot connect to the worker{}. ".format(
self.worker_address) + "Job will quit.")
break
socket.close(0)
os._exit(1)
def wait_for_files(self, reply_socket, job_address):
"""Wait for python files from remote object.
When a remote object receives the allocated job address, it will send
the python files to the job. Later, the job will save these files to a
temporary directory and add the temporary diretory to Python's working
directory.
Args:
reply_socket (sockert): main socket to accept commands of remote object.
job_address (String): address of reply_socket.
Returns:
A temporary directory containing the python files.
"""
message = reply_socket.recv_multipart()
tag = message[0]
if tag == remote_constants.SEND_FILE_TAG:
pyfiles = pickle.loads(message[1])
# save python files to temporary directory
envdir = tempfile.mkdtemp()
for file, code in pyfiles['python_files'].items():
file = os.path.join(envdir, file)
with open(file, 'wb') as code_file:
code_file.write(code)
# save other files to current directory
for file, content in pyfiles['other_files'].items():
# create directory (i.e. ./rom_files/)
if '/' in file:
try:
os.makedirs(os.path.join(*file.rsplit('/')[:-1]))
except OSError as e:
pass
with open(file, 'wb') as f:
f.write(content)
logger.info('[job] reply')
reply_socket.send_multipart([remote_constants.NORMAL_TAG])
return envdir
else:
logger.error("NotImplementedError:{}, received tag:{}".format(
job_address, ))
raise NotImplementedError
def wait_for_connection(self, reply_socket):
"""Wait for connection from the remote object.
The remote object will send its class information and initialization
arguments to the job, these parameters are then used to create a
local instance in the job process.
Args:
reply_socket (sockert): main socket to accept commands of remote object.
Returns:
A local instance of the remote class object.
"""
message = reply_socket.recv_multipart()
tag = message[0]
obj = None
if tag == remote_constants.INIT_OBJECT_TAG:
try:
cls = cloudpickle.loads(message[1])
args, kwargs = cloudpickle.loads(message[2])
obj = cls(*args, **kwargs)
except Exception as e:
traceback_str = str(traceback.format_exc())
error_str = str(e)
logger.error("traceback:\n{}".format(traceback_str))
reply_socket.send_multipart([
remote_constants.EXCEPTION_TAG,
to_byte(error_str + "\ntraceback:\n" + traceback_str)
])
return None
reply_socket.send_multipart([remote_constants.NORMAL_TAG])
else:
logger.error("Message from job {}".format(message))
reply_socket.send_multipart([
remote_constants.EXCEPTION_TAG,
b"[job]Unkonwn tag when tried to receive the class definition"
])
raise NotImplementedError
return obj
def run(self, job_address_sender):
"""An infinite loop waiting for a new task.
Args:
job_address_sender(sending end of multiprocessing.Pipe): send job address of reply_socket to main process.
"""
ctx = zmq.Context()
# create the reply_socket
reply_socket = ctx.socket(zmq.REP)
job_port = reply_socket.bind_to_random_port(addr="tcp://*")
reply_socket.linger = 0
job_ip = get_ip_address()
job_address = "{}:{}".format(job_ip, job_port)
job_address_sender.send(job_address)
try:
# receive source code from the actor and append them to the environment variables.
envdir = self.wait_for_files(reply_socket, job_address)
sys.path.append(envdir)
obj = self.wait_for_connection(reply_socket)
assert obj is not None
self.single_task(obj, reply_socket, job_address)
except Exception as e:
logger.error(
"Error occurs when running a single task. We will reset this job. Reason:{}"
.format(e))
traceback_str = str(traceback.format_exc())
logger.error("traceback:\n{}".format(traceback_str))
def single_task(self, obj, reply_socket, job_address):
"""An infinite loop waiting for commands from the remote object.
Each job will receive two kinds of message from the remote object:
1. When the remote object calls a function, job will run the
function on the local instance and return the results to the
remote object.
2. When the remote object is deleted, the job will quit and release
related computation resources.
Args:
reply_socket (sockert): main socket to accept commands of remote object.
job_address (String): address of reply_socket.
"""
while True:
message = reply_socket.recv_multipart()
tag = message[0]
if tag == remote_constants.CALL_TAG:
try:
function_name = to_str(message[1])
data = message[2]
args, kwargs = loads_argument(data)
ret = getattr(obj, function_name)(*args, **kwargs)
ret = dumps_return(ret)
reply_socket.send_multipart(
[remote_constants.NORMAL_TAG, ret])
except Exception as e:
# reset the job
error_str = str(e)
logger.error(error_str)
if type(e) == AttributeError:
reply_socket.send_multipart([
remote_constants.ATTRIBUTE_EXCEPTION_TAG,
to_byte(error_str)
])
raise AttributeError
elif type(e) == SerializeError:
reply_socket.send_multipart([
remote_constants.SERIALIZE_EXCEPTION_TAG,
to_byte(error_str)
])
raise SerializeError
elif type(e) == DeserializeError:
reply_socket.send_multipart([
remote_constants.DESERIALIZE_EXCEPTION_TAG,
to_byte(error_str)
])
raise DeserializeError
else:
traceback_str = str(traceback.format_exc())
logger.error("traceback:\n{}".format(traceback_str))
reply_socket.send_multipart([
remote_constants.EXCEPTION_TAG,
to_byte(error_str + "\ntraceback:\n" +
traceback_str)
])
break
# receive DELETE_TAG from actor, and stop replying worker heartbeat
elif tag == remote_constants.KILLJOB_TAG:
reply_socket.send_multipart([remote_constants.NORMAL_TAG])
logger.warning("An actor exits and this job {} will exit.".
format(job_address))
break
else:
logger.error(
"The job receives an unknown message: {}".format(message))
raise NotImplementedError
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--worker_address", required=True, type=str, help="worker_address")
args = parser.parse_args()
job = Job(args.worker_address)
|
ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for operations in eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import weakref
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
class OpsTest(test_util.TensorFlowTestCase):
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
def testMatMulGPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = math_ops.matmul(three, five)
self.assertEqual([[15.0]], product.numpy())
def testExecuteStringAttr(self):
three = constant_op.constant(3.0)
checked_three = array_ops.check_numerics(three,
message='just checking')
self.assertEqual([[3]], checked_three.numpy())
def testExecuteFloatAttr(self):
three = constant_op.constant(3.0)
almost_three = constant_op.constant(2.8)
almost_equal = math_ops.approximate_equal(
three, almost_three, tolerance=0.3)
self.assertTrue(almost_equal)
def testExecuteIntAttr(self):
three = constant_op.constant(3)
four = constant_op.constant(4)
total = math_ops.add_n([three, four])
self.assertAllEqual(7, total)
def testExecuteBoolAttr(self):
three = constant_op.constant([[3]])
five = constant_op.constant([[5]])
product = math_ops.matmul(three, five, transpose_a=True)
self.assertAllEqual([[15]], product)
def testExecuteOneListOutput(self):
split_dim = constant_op.constant(1)
value = constant_op.constant([[0, 1, 2], [3, 4, 5]])
x1, x2, x3 = array_ops.split(value, 3, axis=split_dim)
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testGraphMode(self):
graph = ops.Graph()
with graph.as_default(), context.graph_mode():
array_ops.placeholder(dtypes.int32)
self.assertEqual(1, len(graph.get_operations()))
# See comments on handling of int32 tensors on GPU in
# EagerTensor.__init__.
def testInt32CPUDefault(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
with context.device('/gpu:0'):
r = constant_op.constant(1) + constant_op.constant(2)
self.assertAllEqual(r, 3)
def testExecuteListOutputLen1(self):
split_dim = constant_op.constant(1)
value = constant_op.constant([[0, 1, 2], [3, 4, 5]])
result = array_ops.split(value, 1, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(1, len(result))
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], result[0])
def testExecuteListOutputLen0(self):
empty = constant_op.constant([], dtype=dtypes.int32)
result = array_ops.unstack(empty, 0)
self.assertTrue(isinstance(result, list))
self.assertEqual(0, len(result))
def testExecuteMultipleNonListOutput(self):
x = constant_op.constant([1, 2, 3, 4, 5, 6])
y = constant_op.constant([1, 3, 5])
result = array_ops.listdiff(x, y)
out, idx = result
self.assertTrue(out is result.out)
self.assertTrue(idx is result.idx)
self.assertAllEqual([2, 4, 6], out)
self.assertAllEqual([1, 3, 5], idx)
def testExecuteMultipleListOutput(self):
split_dim = constant_op.constant(1, dtype=dtypes.int64)
indices = constant_op.constant([[0, 2], [0, 4], [0, 5], [1, 0], [1, 1]],
dtype=dtypes.int64)
values = constant_op.constant([2, 3, 5, 7, 11])
shape = constant_op.constant([2, 7], dtype=dtypes.int64)
result = sparse_ops.gen_sparse_ops.sparse_split(
split_dim,
indices,
values,
shape,
num_split=2)
output_indices, output_values, output_shape = result
self.assertEqual(2, len(output_indices))
self.assertEqual(2, len(output_values))
self.assertEqual(2, len(output_shape))
self.assertEqual(output_indices, result.output_indices)
self.assertEqual(output_values, result.output_values)
self.assertEqual(output_shape, result.output_shape)
self.assertAllEqual([[0, 2], [1, 0], [1, 1]], output_indices[0])
self.assertAllEqual([[0, 0], [0, 1]], output_indices[1])
self.assertAllEqual([2, 7, 11], output_values[0])
self.assertAllEqual([3, 5], output_values[1])
self.assertAllEqual([2, 4], output_shape[0])
self.assertAllEqual([2, 3], output_shape[1])
# TODO(josh11b): Test an op that has multiple outputs, some but not
# all of which are lists. Examples: barrier_take_many (currently
# unsupported since it uses a type list) or sdca_optimizer (I don't
# have an example of legal inputs & outputs).
def testComposition(self):
x = constant_op.constant(1, dtype=dtypes.int32)
three_x = x + x + x
self.assertEquals(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
def testOperatorOverrides(self):
def ops_test(v1, v2):
a = constant_op.constant(v1)
b = constant_op.constant(v2)
self.assertAllEqual((-a), np.negative(v1))
self.assertAllEqual(abs(b), np.absolute(v2))
self.assertAllEqual((a + b), np.add(v1, v2))
self.assertAllEqual((a - b), np.subtract(v1, v2))
self.assertAllEqual((a * b), np.multiply(v1, v2))
self.assertAllEqual((a * a), np.multiply(v1, v1))
if all(x >= 0 for x in v2):
self.assertAllEqual((a**b), np.power(v1, v2))
self.assertAllEqual((a / b), np.true_divide(v1, v2))
self.assertAllEqual((a / a), np.true_divide(v1, v1))
self.assertAllEqual((a % b), np.mod(v1, v2))
self.assertAllEqual((a < b), np.less(v1, v2))
self.assertAllEqual((a <= b), np.less_equal(v1, v2))
self.assertAllEqual((a > b), np.greater(v1, v2))
self.assertAllEqual((a >= b), np.greater_equal(v1, v2))
self.assertAllEqual((a == b), np.equal(v1, v2)[0])
self.assertAllEqual((a != b), np.not_equal(v1, v2)[0])
self.assertAllEqual(v1[0], a[constant_op.constant(0)])
ops_test([1, 4, 8], [2, 3, 5])
ops_test([1, -4, -5], [-2, 3, -6])
def test_basic_slice(self):
npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
t = constant_op.constant(npt)
self.assertAllEqual(npt[:, :, :], t[:, :, :])
self.assertAllEqual(npt[::, ::, ::], t[::, ::, ::])
self.assertAllEqual(npt[::1, ::1, ::1], t[::1, ::1, ::1])
self.assertAllEqual(npt[::1, ::5, ::2], t[::1, ::5, ::2])
self.assertAllEqual(npt[::-1, :, :], t[::-1, :, :])
self.assertAllEqual(npt[:, ::-1, :], t[:, ::-1, :])
self.assertAllEqual(npt[:, :, ::-1], t[:, :, ::-1])
self.assertAllEqual(npt[-2::-1, :, ::1], t[-2::-1, :, ::1])
self.assertAllEqual(npt[-2::-1, :, ::2], t[-2::-1, :, ::2])
def testDegenerateSlices(self):
npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
t = constant_op.constant(npt)
# degenerate by offering a forward interval with a negative stride
self.assertAllEqual(npt[0:-1:-1, :, :], t[0:-1:-1, :, :])
# degenerate with a reverse interval with a positive stride
self.assertAllEqual(npt[-1:0, :, :], t[-1:0, :, :])
# empty interval in every dimension
self.assertAllEqual(npt[-1:0, 2:2, 2:3:-1], t[-1:0, 2:2, 2:3:-1])
def testEllipsis(self):
npt = np.array(
[[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]])
t = constant_op.constant(npt)
self.assertAllEqual(npt[0:], t[0:])
# implicit ellipsis
self.assertAllEqual(npt[0:, ...], t[0:, ...])
# ellipsis alone
self.assertAllEqual(npt[...], t[...])
# ellipsis at end
self.assertAllEqual(npt[0:1, ...], t[0:1, ...])
# ellipsis at begin
self.assertAllEqual(npt[..., 0:1], t[..., 0:1])
# ellipsis at middle
self.assertAllEqual(npt[0:1, ..., 0:1], t[0:1, ..., 0:1])
def testShrink(self):
npt = np.array([[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]])
t = constant_op.constant(npt)
self.assertAllEqual(npt[:, :, :, :, 3], t[:, :, :, :, 3])
self.assertAllEqual(npt[..., 3], t[..., 3])
self.assertAllEqual(npt[:, 0], t[:, 0])
self.assertAllEqual(npt[:, :, 0], t[:, :, 0])
def testOpWithInputsOnDifferentDevices(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# The GPU kernel for the Reshape op requires that the
# shape input be on CPU.
value = constant_op.constant([1., 2.]).gpu()
shape = constant_op.constant([2, 1])
reshaped = array_ops.reshape(value, shape)
self.assertAllEqual([[1], [2]], reshaped.cpu())
def testInt64(self):
# Fill requires the first input to be an int32 tensor.
self.assertAllEqual(
[1.0, 1.0],
array_ops.fill(constant_op.constant([2], dtype=dtypes.int64),
constant_op.constant(1)))
def testOutputOnHostMemory(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# The Shape op kernel on GPU places the output in host memory.
value = constant_op.constant([1.]).gpu()
shape = array_ops.shape(value)
self.assertEqual([1], shape.numpy())
def testSilentCopy(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# Temporarily replace the context
# pylint: disable=protected-access
del context._context
context._context = context.Context()
try:
config.set_device_policy('silent')
cpu_tensor = constant_op.constant(1.0)
gpu_tensor = cpu_tensor.gpu()
self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0)
finally:
del context._context
context._context = context.Context()
# pylint: enable=protected-access
def testSoftPlacement(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# Temporarily replace the context
# pylint: disable=protected-access
del context._context
context._context = context.Context()
try:
config.set_device_policy('silent')
config.set_soft_device_placement(True)
cpu_tensor = constant_op.constant(1.0)
result = cpu_tensor + cpu_tensor
self.assertEqual(result.device,
'/job:localhost/replica:0/task:0/device:GPU:0')
finally:
del context._context
context._context = context.Context()
# pylint: enable=protected-access
def testRandomUniform(self):
scalar_shape = constant_op.constant([], dtype=dtypes.int32)
x = random_ops.random_uniform(scalar_shape)
self.assertEquals(0, x.shape.ndims)
self.assertEquals(dtypes.float32, x.dtype)
x = random_ops.random_uniform(
scalar_shape, minval=constant_op.constant(5.),
maxval=constant_op.constant(6.))
self.assertLess(x, 6)
self.assertGreaterEqual(x, 5)
def testArgsToMatchingEagerDefault(self):
# Uses default
ctx = context.context()
t, r = execute.args_to_matching_eager([[3, 4]], ctx, dtypes.int32)
self.assertEquals(t, dtypes.int32)
self.assertEquals(r[0].dtype, dtypes.int32)
t, r = execute.args_to_matching_eager([[3, 4]], ctx, dtypes.int64)
self.assertEquals(t, dtypes.int64)
self.assertEquals(r[0].dtype, dtypes.int64)
# Doesn't use default
t, r = execute.args_to_matching_eager(
[['string', 'arg']], ctx, dtypes.int32)
self.assertEquals(t, dtypes.string)
self.assertEquals(r[0].dtype, dtypes.string)
def testFlattenLayer(self):
flatten_layer = core.Flatten()
x = constant_op.constant([[[-10, -20], [-30, -40]], [[10, 20], [30, 40]]])
y = flatten_layer(x)
self.assertAllEqual([[-10, -20, -30, -40], [10, 20, 30, 40]], y)
def testIdentity(self):
self.assertAllEqual(2, array_ops.identity(2))
def testIdentityOnVariable(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
with context.device('/gpu:0'):
v = resource_variable_ops.ResourceVariable(True)
self.assertAllEqual(True, array_ops.identity(v))
def testIncompatibleSetShape(self):
x = constant_op.constant(1)
with self.assertRaises(ValueError):
x.set_shape((1, 2))
def testCompatibleSetShape(self):
x = constant_op.constant([[1, 2]])
x.set_shape(tensor_shape.TensorShape([None, 2]))
self.assertEqual(x.get_shape(), (1, 2))
def testCastScalarToPrimitiveTypes(self):
x = constant_op.constant(1.3)
self.assertIsInstance(int(x), int)
self.assertEqual(int(x), 1)
self.assertIsInstance(float(x), float)
self.assertAllClose(float(x), 1.3)
def testCastNonScalarToPrimitiveTypesFails(self):
x = constant_op.constant([1.3, 2])
with self.assertRaises(TypeError):
int(x)
with self.assertRaises(TypeError):
float(x)
def testRange(self):
x = constant_op.constant(2)
self.assertEqual([0, 1], list(range(x)))
def testFormatString(self):
x = constant_op.constant(3.1415)
self.assertEqual('3.14', '{:.2f}'.format(x))
def testNoOpIsNone(self):
self.assertTrue(control_flow_ops.no_op() is None)
def testEagerContextPreservedAcrossThreads(self):
def init_fn():
self.assertTrue(context.executing_eagerly())
with ops.init_scope():
self.assertTrue(context.executing_eagerly())
context_switches = context.context().context_switches
self.assertEqual(len(context_switches.stack), 1)
self.assertFalse(context_switches.stack[0].is_building_function)
self.assertEqual(context_switches.stack[0].enter_context_fn,
context.eager_mode)
self.assertTrue(context.executing_eagerly())
t1 = threading.Thread(target=init_fn)
t1.start()
t1.join()
def testWeakrefEagerTensor(self):
x = constant_op.constant([[1.]])
x.at1 = constant_op.constant([[2.]])
x.at2 = 3.
weak_x = weakref.ref(x)
weak_xat1 = weakref.ref(x.at1)
del x
self.assertIs(weak_x(), None)
self.assertIs(weak_xat1(), None)
def testWeakKeyDictionaryTensor(self):
weak_key_dict = weakref.WeakKeyDictionary()
strong_x = constant_op.constant([[1.]])
strong_y = constant_op.constant([[2.]])
weak_key_dict[strong_x] = constant_op.constant([[3.]])
weak_key_dict[strong_y] = constant_op.constant([[4.]])
strong_y.a = constant_op.constant([[5.]])
weak_x = weakref.ref(strong_x)
del strong_x
self.assertIs(weak_x(), None)
self.assertEqual([strong_y], list(weak_key_dict))
self.assertEqual(1, len(list(weak_key_dict)))
self.assertEqual(1, len(weak_key_dict))
del strong_y
self.assertEqual([], list(weak_key_dict))
if __name__ == '__main__':
test.main()
|
Udp.py
|
import socket
import threading
from PyQt5.QtCore import pyqtSignal
from Network import StopThreading
def get_host_ip() -> str:
"""获取本机IP地址"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
class UdpLogic:
udp_signal_write_msg = pyqtSignal(str)
udp_signal_write_info = pyqtSignal(str, int)
def __init__(self):
self.link_flag = False
self.udp_socket = None
self.address = None
self.sever_th = None
self.client_th = None
def udp_server_start(self, port) -> None:
"""
开启UDP服务端方法
"""
self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
port = int(port)
address = ('', port)
self.udp_socket.bind(address)
self.sever_th = threading.Thread(target=self.udp_server_concurrency)
self.sever_th.start()
msg = 'UDP服务端正在监听端口:{}\n'.format(port)
self.udp_signal_write_msg.emit(msg)
def udp_server_concurrency(self) -> None:
"""
用于创建一个线程持续监听UDP通信
"""
while True:
recv_msg, recv_addr = self.udp_socket.recvfrom(1024)
info = recv_msg.decode('utf-8')
msg = f'来自IP:{recv_addr[0]}端口:{recv_addr[1]}:'
self.udp_signal_write_msg.emit(msg)
self.udp_signal_write_info.emit(info, self.InfoRec)
def udp_client_start(self, ip: str, port: int) -> None:
"""
确认UDP客户端的ip及地址
"""
self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.address = (ip, port)
msg = 'UDP客户端已启动\n'
self.udp_signal_write_msg.emit(msg)
def udp_send(self, send_info: str) -> None:
"""
功能函数,用于UDP客户端发送消息
"""
try:
send_info_encoded = send_info.encode('utf-8')
self.udp_socket.sendto(send_info_encoded, self.address)
msg = 'UDP客户端已发送'
self.udp_signal_write_msg.emit(msg)
self.udp_signal_write_info.emit(send_info, self.InfoSend)
except Exception as ret:
msg = '发送失败\n'
self.udp_signal_write_msg.emit(msg)
def udp_close(self) -> None:
"""
功能函数,关闭网络连接的方法
"""
if self.link_flag == self.ServerUDP:
try:
self.udp_socket.close()
msg = '已断开网络\n'
self.udp_signal_write_msg.emit(msg)
except Exception as ret:
pass
try:
StopThreading.stop_thread(self.sever_th)
except Exception:
pass
if self.link_flag == self.ClientUDP:
try:
self.udp_socket.close()
msg = '已断开网络\n'
self.udp_signal_write_msg.emit(msg)
except Exception as ret:
pass
try:
StopThreading.stop_thread(self.client_th)
except Exception:
pass
NoLink = -1
ServerUDP = 2
ClientUDP = 3
InfoSend = 0
InfoRec = 1
|
bmv2stf.py
|
#!/usr/bin/env python3
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Runs the BMv2 behavioral model simulator with input from an stf file
from subprocess import Popen
from threading import Thread
from glob import glob
import json
import sys
import re
import os
import stat
import tempfile
import shutil
import difflib
import subprocess
import signal
import time
import random
import errno
import socket
from collections import OrderedDict
try:
from scapy.layers.all import *
from scapy.utils import *
except ImportError:
pass
SUCCESS = 0
FAILURE = 1
class TimeoutException(Exception): pass
def signal_handler(signum, frame):
raise TimeoutException("Timed out!")
signal.signal(signal.SIGALRM, signal_handler)
class Options(object):
def __init__(self):
self.binary = None
self.verbose = False
self.preserveTmp = False
self.observationLog = None
self.usePsa = False
def nextWord(text, sep = None):
# Split a text at the indicated separator.
# Note that the separator can be a string.
# Separator is discarded.
spl = text.split(sep, 1)
if len(spl) == 0:
return '', ''
elif len(spl) == 1:
return spl[0].strip(), ''
else:
return spl[0].strip(), spl[1].strip()
def ByteToHex(byteStr):
return ''.join( [ "%02X " % ord( x ) for x in byteStr ] ).strip()
def convert_packet_bin2hexstr(pkt_bin):
return pkt_bin.convert_to(Raw).load.hex().upper()
def convert_packet_stf2hexstr(pkt_stf_text):
return ''.join(pkt_stf_text.split()).upper()
def reportError(*message):
print("***", *message)
class Local(object):
# object to hold local vars accessable to nested functions
pass
def FindExe(dirname, exe):
dir = os.getcwd()
while len(dir) > 1:
if os.path.isdir(os.path.join(dir, dirname)):
rv = None
rv_time = 0
for dName, sdName, fList in os.walk(os.path.join(dir, dirname)):
if exe in fList:
n=os.path.join(dName, exe)
if os.path.isfile(n) and os.access(n, os.X_OK):
n_time = os.path.getmtime(n)
if n_time > rv_time:
rv = n
rv_time = n_time
if rv is not None:
return rv
dir = os.path.dirname(dir)
return exe
def run_timeout(verbose, args, timeout, stderr):
if verbose:
print("Executing ", " ".join(args))
local = Local()
local.process = None
def target():
procstderr = None
if stderr is not None:
procstderr = open(stderr, "w")
local.process = Popen(args, stderr=procstderr)
local.process.wait()
thread = Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print("Timeout ", " ".join(args), file=sys.stderr)
local.process.terminate()
thread.join()
if local.process is None:
# never even started
reportError("Process failed to start")
return -1
if verbose:
print("Exit code ", local.process.returncode)
return local.process.returncode
timeout = 10 * 60
class ConcurrentInteger(object):
# Generates exclusive integers in a range 0-max
# in a way which is safe across multiple processes.
# It uses a simple form of locking using folder names.
# This is necessary because this script may be invoked
# concurrently many times by make, and we need the many simulator instances
# to use different port numbers.
def __init__(self, folder, max):
self.folder = folder
self.max = max
def lockName(self, value):
return "lock_" + str(value)
def release(self, value):
os.rmdir(self.lockName(value))
def generate(self):
# try 10 times
for i in range(0, 10):
index = random.randint(0, self.max)
file = self.lockName(index)
try:
os.makedirs(file)
return index
except:
time.sleep(1)
continue
return None
class BMV2ActionArg(object):
def __init__(self, name, width):
# assert isinstance(name, str)
# assert isinstance(width, int)
self.name = name
self.width = width
class TableKey(object):
def __init__(self):
self.fields = OrderedDict()
def append(self, name, type):
self.fields[name] = type
def __str__(self):
result = ""
for f in self.fields.keys():
if result != "":
result += " "
result += f + ":" + self.fields[f]
return result
class TableKeyInstance(object):
def __init__(self, tableKey):
assert isinstance(tableKey, TableKey)
self.values = {}
self.key = tableKey
for f,t in tableKey.fields.items():
if t == "ternary":
self.values[f] = "0&&&0"
elif t == "lpm":
self.values[f] = "0/0"
elif t == "exact":
self.values[f] = "0"
elif t == "valid":
self.values[f] = "0"
else:
raise Exception("Unexpected key type " + t)
def set(self, key, value):
array = re.compile("(.*)\$([0-9]+)(.*)");
m = array.match(key)
if m:
key = m.group(1) + "[" + m.group(2) + "]" + m.group(3)
found = False
if key in self.key.fields:
found = True
elif key + '$' in self.key.fields:
key = key + '$'
found = True
elif key + '.$valid$' in self.key.fields:
key = key + '.$valid$'
found = True
elif key.endswith(".valid"):
alt = key[:-5] + "$valid$"
if alt in self.key.fields:
key = alt
found = True
if not found:
for i in self.key.fields:
if i.endswith("." + key) or i.endswith("." + key + "$"):
key = i
found = True
elif key == "valid" and i.endswith(".$valid$"):
key = i
found = True
if not found and key == "valid" and "$valid$" in self.key.fields:
key = "$valid$"
found = True
if not found:
print(self.key.fields)
raise Exception("Unexpected key field " + key)
if self.key.fields[key] == "ternary":
self.values[key] = self.makeMask(value)
elif self.key.fields[key] == "lpm":
self.values[key] = self.makeLpm(value)
else:
self.values[key] = value
def makeMask(self, value):
# TODO -- we really need to know the size of the key to make the mask properly,
# but to find that, we need to parse the headers and header_types from the json
if value.startswith("0x"):
mask = "F"
value = value[2:]
prefix = "0x"
elif value.startswith("0b"):
mask = "1"
value = value[2:]
prefix = "0b"
elif value.startswith("0o"):
mask = "7"
value = value[2:]
prefix = "0o"
else:
raise Exception("Decimal value "+value+" not supported for ternary key")
return value
values = "0123456789abcdefABCDEF*"
replacements = (mask * 22) + "0"
trans = str.maketrans(values, replacements)
m = value.translate(trans)
return prefix + value.replace("*", "0") + "&&&" + prefix + m
def makeLpm(self, value):
if value.find('/') >= 0:
return value
if value.startswith("0x"):
bits_per_digit = 4
elif value.startswith("0b"):
bits_per_digit = 1
elif value.startswith("0o"):
bits_per_digit = 3
else:
value = "0x" + hex(int(value))
bits_per_digit = 4
digits = len(value) - 2 - value.count('*')
return value.replace('*', '0') + "/" + str(digits*bits_per_digit)
def __str__(self):
result = ""
for f in self.key.fields:
if result != "":
result += " "
result += self.values[f]
return result
class BMV2ActionArguments(object):
def __init__(self, action):
assert isinstance(action, BMV2Action)
self.action = action
self.values = {}
def set(self, key, value):
found = False
for i in self.action.args:
if key == i.name:
found = True
if not found:
raise Exception("Unexpected action arg " + key)
self.values[key] = value
def __str__(self):
result = ""
for f in self.action.args:
if result != "":
result += " "
result += self.values[f.name]
return result
def size(self):
return len(self.action.args)
class BMV2Action(object):
def __init__(self, jsonAction):
self.name = jsonAction["name"]
self.args = []
for a in jsonAction["runtime_data"]:
arg = BMV2ActionArg(a["name"], a["bitwidth"])
self.args.append(arg)
def __str__(self):
return self.name
def makeArgsInstance(self):
return BMV2ActionArguments(self)
class BMV2Table(object):
def __init__(self, jsonTable):
self.match_type = jsonTable["match_type"]
self.name = jsonTable["name"]
self.key = TableKey()
self.actions = {}
for k in jsonTable["key"]:
name = k["name"]
if name is None:
name = k["target"]
if isinstance(name, list):
name = ""
for t in k["target"]:
if name != "":
name += "."
name += t
self.key.append(name, k["match_type"])
actions = jsonTable["actions"]
action_ids = jsonTable["action_ids"]
for i in range(0, len(actions)):
actionName = actions[i]
actionId = action_ids[i]
self.actions[actionName] = actionId
def __str__(self):
return self.name
def makeKeyInstance(self):
return TableKeyInstance(self.key)
# Represents enough about the program executed to be
# able to invoke the BMV2 simulator, create a CLI file
# and test packets in pcap files.
class RunBMV2(object):
def __init__(self, folder, options, jsonfile):
self.clifile = folder + "/cli.txt"
self.jsonfile = jsonfile
self.stffile = None
self.folder = folder
self.pcapPrefix = "pcap"
self.interfaces = {}
self.expected = {} # for each interface number of packets expected
self.expectedAny = [] # interface on which any number of packets is fine
self.packetDelay = 0
self.options = options
self.json = None
self.tables = []
self.actions = []
self.switchLogFile = "switch.log" # .txt is added by BMv2
self.readJson()
self.cmd_line_args = getattr(options, 'switchOptions', ())
self.target_specific_cmd_line_args = getattr(options, 'switchTargetSpecificOptions', ())
def readJson(self):
with open(self.jsonfile) as jf:
self.json = json.load(jf)
for a in self.json["actions"]:
self.actions.append(BMV2Action(a))
for t in self.json["pipelines"][0]["tables"]:
self.tables.append(BMV2Table(t))
for t in self.json["pipelines"][1]["tables"]:
self.tables.append(BMV2Table(t))
def filename(self, interface, direction):
return self.folder + "/" + self.pcapPrefix + str(interface) + "_" + direction + ".pcap"
def interface_of_filename(self, f):
return int(os.path.basename(f).rstrip('.pcap').lstrip(self.pcapPrefix).rsplit('_', 1)[0])
def do_cli_command(self, cmd):
if self.options.verbose:
print(cmd)
self.cli_stdin.write(bytes(cmd + "\n", encoding='utf8'))
self.cli_stdin.flush()
self.packetDelay = 1
def do_command(self, cmd):
if self.options.verbose and cmd != "":
print("STF Command:", cmd)
first, cmd = nextWord(cmd)
if first == "":
pass
elif first == "add":
self.do_cli_command(self.parse_table_add(cmd))
elif first == "setdefault":
self.do_cli_command(self.parse_table_set_default(cmd))
elif first == "mirroring_add":
# Pass through mirroring_add commands unchanged, with same
# arguments as expected by simple_switch_CLI
self.do_cli_command(first + " " + cmd)
elif first == "mc_mgrp_create" or first == "mc_node_create" or first == "mc_node_associate":
# Pass through multicast group commands unchanged, with
# same arguments as expected by simple_switch_CLI
self.do_cli_command(first + " " + cmd)
elif first == "packet":
interface, data = nextWord(cmd)
interface = int(interface)
data = ''.join(data.split())
time.sleep(self.packetDelay)
try:
self.interfaces[interface]._write_packet(bytes.fromhex(data))
except ValueError:
reportError("Invalid packet data", data)
return FAILURE
self.interfaces[interface].flush()
self.packetDelay = 0
elif first == "expect":
interface, data = nextWord(cmd)
interface = int(interface)
data = ''.join(data.split())
if data != '':
self.expected.setdefault(interface, []).append(data)
else:
self.expectedAny.append(interface)
else:
if self.options.verbose:
print("ignoring stf command:", first, cmd)
def parse_table_set_default(self, cmd):
tableName, cmd = nextWord(cmd)
table = self.tableByName(tableName)
actionName, cmd = nextWord(cmd, "(")
action = self.actionByName(table, actionName)
actionArgs = action.makeArgsInstance()
cmd = cmd.strip(")")
while cmd != "":
word, cmd = nextWord(cmd, ",")
k, v = nextWord(word, ":")
actionArgs.set(k, v)
command = "table_set_default " + tableName + " " + actionName
if actionArgs.size():
command += " => " + str(actionArgs)
return command
def parse_table_add(self, cmd):
tableName, cmd = nextWord(cmd)
table = self.tableByName(tableName)
key = table.makeKeyInstance()
actionArgs = None
actionName = None
prio, cmd = nextWord(cmd)
number = re.compile("[0-9]+")
if not number.match(prio):
# not a priority; push back
cmd = prio + " " + cmd
prio = ""
while cmd != "":
if actionName != None:
# parsing action arguments
word, cmd = nextWord(cmd, ",")
k, v = nextWord(word, ":")
actionArgs.set(k, v)
else:
# parsing table key
word, cmd = nextWord(cmd)
if cmd.find("=") >= 0:
# This command retrieves a handle for the key
# This feature is currently not supported, so we just ignore the handle part
cmd = cmd.split("=")[0]
if word.find("(") >= 0:
# found action
actionName, arg = nextWord(word, "(")
action = self.actionByName(table, actionName)
actionArgs = action.makeArgsInstance()
cmd = arg + cmd
cmd = cmd.strip("()")
else:
k, v = nextWord(word, ":")
key.set(k, v)
if prio != "":
# Priorities in BMV2 seem to be reversed with respect to the stf file
# Hopefully 10000 is large enough
prio = str(10000 - int(prio))
command = "table_add " + table.name + " " + action.name + " " + str(key) + " => " + str(actionArgs)
if table.match_type == "ternary":
command += " " + prio
return command
def actionByName(self, table, actionName):
for name, id in table.actions.items():
action = self.actions[id]
if action.name == actionName:
return action
# Try again with suffixes
candidate = None
for name, id in table.actions.items():
action = self.actions[id]
if action.name.endswith(actionName):
if candidate is None:
candidate = action
else:
raise Exception("Ambiguous action name " + actionName + " in " + table.name)
if candidate is not None:
return candidate
raise Exception("No action", actionName, "in table", table)
def tableByName(self, tableName):
originalName = tableName
for t in self.tables:
if t.name == tableName:
return t
# If we can't find that try to match the tableName with a table suffix
candidate = None
for t in self.tables:
if t.name.endswith(tableName):
if candidate == None:
candidate = t
else:
raise Exception("Table name " + tableName + " is ambiguous between " +
candidate.name + " and " + t.name)
if candidate is not None:
return candidate
raise Exception("Could not find table " + tableName)
def interfaceArgs(self):
# return list of interface names suitable for bmv2
result = []
for interface in sorted(self.interfaces):
result.append("-i " + str(interface) + "@" + self.pcapPrefix + str(interface))
return result
def generate_model_inputs(self, stffile):
self.stffile = stffile
with open(stffile) as i:
for line in i:
line, comment = nextWord(line, "#")
first, cmd = nextWord(line)
if first == "packet" or first == "expect":
interface, cmd = nextWord(cmd)
interface = int(interface)
if not interface in self.interfaces:
# Can't open the interfaces yet, as that would block
ifname = self.interfaces[interface] = self.filename(interface, "in")
os.mkfifo(ifname)
return SUCCESS
def check_switch_server_ready(self, proc, thriftPort):
"""While the process is running, we check if the Thrift server has been
started. If the Thrift server is ready, we assume that the switch was
started successfully. This is only reliable if the Thrift server is
started at the end of the init process"""
while True:
if proc.poll() is not None:
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
result = sock.connect_ex(("localhost", thriftPort))
if result == 0:
return True
def run(self):
if self.options.verbose:
print("Running model")
wait = 0 # Time to wait before model starts running
if self.options.usePsa:
switch = "psa_switch"
switch_cli = "psa_switch_CLI"
else:
switch = "simple_switch"
switch_cli = "simple_switch_CLI"
concurrent = ConcurrentInteger(os.getcwd(), 1000)
rand = concurrent.generate()
if rand is None:
reportError("Could not find a free port for Thrift")
return FAILURE
thriftPort = str(9090 + rand)
rv = SUCCESS
try:
os.remove("/tmp/bmv2-%d-notifications.ipc" % rand)
except OSError:
pass
try:
runswitch = [FindExe("behavioral-model", switch),
"--log-file", self.switchLogFile, "--log-flush",
"--use-files", str(wait), "--thrift-port", thriftPort,
"--device-id", str(rand)] + self.interfaceArgs() + ["../" + self.jsonfile]
if self.cmd_line_args:
runswitch += self.cmd_line_args
if self.target_specific_cmd_line_args:
runswitch += ['--',] + self.target_specific_cmd_line_args
if self.options.verbose:
print("Running", " ".join(runswitch))
sw = subprocess.Popen(runswitch, cwd=self.folder)
def openInterface(ifname):
fp = self.interfaces[interface] = RawPcapWriter(ifname, linktype=0)
fp._write_header(None)
# Try to open input interfaces. Each time, we set a 2 second
# timeout. If the timeout expires we check if the bmv2 process is
# not running anymore. If it is, we check if we have exceeded the
# one minute timeout (exceeding this timeout is very unlikely and
# could mean the system is very slow for some reason). If one of the
# 2 conditions above is met, the test is considered a FAILURE.
start = time.time()
sw_timeout = 60
# open input interfaces
# DANGER -- it is critical that we open these fifos in the same
# order as bmv2, as otherwise we'll deadlock. Would be nice if we
# could open nonblocking.
for interface in sorted(self.interfaces):
ifname = self.interfaces[interface]
while True:
try:
signal.alarm(2)
openInterface(ifname)
signal.alarm(0)
except TimeoutException:
if time.time() - start > sw_timeout:
return FAILURE
if sw.poll() is not None:
return FAILURE
else:
break
# at this point we wait until the Thrift server is ready
# also useful if there are no interfaces
try:
signal.alarm(int(sw_timeout + start - time.time()))
self.check_switch_server_ready(sw, int(thriftPort))
signal.alarm(0)
except TimeoutException:
return FAILURE
time.sleep(0.1)
runcli = [FindExe("behavioral-model", switch_cli), "--thrift-port", thriftPort]
if self.options.verbose:
print("Running", " ".join(runcli))
try:
cli = subprocess.Popen(runcli, cwd=self.folder, stdin=subprocess.PIPE)
self.cli_stdin = cli.stdin
with open(self.stffile) as i:
for line in i:
line, comment = nextWord(line, "#")
self.do_command(line)
cli.stdin.close()
for interface, fp in self.interfaces.items():
fp.close()
# Give time to the model to execute
time.sleep(2)
cli.terminate()
sw.terminate()
sw.wait()
except Exception as e:
cli.terminate()
sw.terminate()
sw.wait()
raise e
# This only works on Unix: negative returncode is
# minus the signal number that killed the process.
if sw.returncode != 0 and sw.returncode != -15: # 15 is SIGTERM
reportError(switch, "died with return code", sw.returncode);
rv = FAILURE
elif self.options.verbose:
print(switch, "exit code", sw.returncode)
cli.wait()
if cli.returncode != 0 and cli.returncode != -15:
reportError("CLI process failed with exit code", cli.returncode)
rv = FAILURE
finally:
try:
os.remove("/tmp/bmv2-%d-notifications.ipc" % rand)
except OSError:
pass
concurrent.release(rand)
if self.options.verbose:
print("Execution completed")
return rv
def comparePacket(self, expected, received):
received = convert_packet_bin2hexstr(received)
expected = convert_packet_stf2hexstr(expected)
strict_length_check = False
if expected[-1] == '$':
strict_length_check = True
expected = expected[:-1]
if len(received) < len(expected):
reportError("Received packet too short", len(received), "vs",
len(expected), "(in units of hex digits)")
reportError("Full expected packet is ", expected)
reportError("Full received packet is ", received)
return FAILURE
for i in range(0, len(expected)):
if expected[i] == "*":
continue;
if expected[i] != received[i]:
reportError("Received packet ", received)
reportError("Packet different at position", i, ": expected", expected[i], ", received", received[i])
reportError("Full expected packet is ", expected)
reportError("Full received packet is ", received)
return FAILURE
if strict_length_check and len(received) > len(expected):
reportError("Received packet too long", len(received), "vs",
len(expected), "(in units of hex digits)")
reportError("Full expected packet is ", expected)
reportError("Full received packet is ", received)
return FAILURE
return SUCCESS
def showLog(self):
with open(self.folder + "/" + self.switchLogFile + ".txt") as a:
log = a.read()
print("Log file:")
print(log)
def checkOutputs(self):
if self.options.verbose:
print("Comparing outputs")
direction = "out"
for file in glob(self.filename('*', direction)):
interface = self.interface_of_filename(file)
if os.stat(file).st_size == 0:
packets = []
else:
try:
packets = rdpcap(file)
except:
reportError("Corrupt pcap file", file)
self.showLog()
return FAILURE
# Log packets.
if self.options.observationLog:
observationLog = open(self.options.observationLog, 'w')
for pkt in packets:
observationLog.write('%d %s\n' % (
interface,
convert_packet_bin2hexstr(pkt)))
observationLog.close()
# Check for expected packets.
if interface in self.expectedAny:
if interface in self.expected:
reportError("Interface " + interface + " has both expected with packets and without")
continue
if interface not in self.expected:
expected = []
else:
expected = self.expected[interface]
if len(expected) != len(packets):
reportError("Expected", len(expected), "packets on port", str(interface),
"got", len(packets))
reportError("Full list of %d expected packets on port %d:"
"" % (len(expected), interface))
for i in range(len(expected)):
reportError(" packet #%2d: %s"
"" % (i+1,
convert_packet_stf2hexstr(expected[i])))
reportError("Full list of %d received packets on port %d:"
"" % (len(packets), interface))
for i in range(len(packets)):
reportError(" packet #%2d: %s"
"" % (i+1,
convert_packet_bin2hexstr(packets[i])))
self.showLog()
return FAILURE
for i in range(0, len(expected)):
cmp = self.comparePacket(expected[i], packets[i])
if cmp != SUCCESS:
reportError("Packet", i, "on port", str(interface), "differs")
return FAILURE
# remove successfully checked interfaces
if interface in self.expected:
del self.expected[interface]
if len(self.expected) != 0:
# didn't find all the expects we were expecting
reportError("Expected packets on ports",
list(self.expected.keys()), "not received")
return FAILURE
else:
return SUCCESS
def run_model(options, tmpdir, jsonfile, testfile):
bmv2 = RunBMV2(tmpdir, options, jsonfile)
result = bmv2.generate_model_inputs(testfile)
if result != SUCCESS:
return result
result = bmv2.run()
if result != SUCCESS:
return result
result = bmv2.checkOutputs()
return result
######################### main
def usage(options):
print("usage:", options.binary, "[-v] [-p] [-observation-log <file>] <json file> <stf file>");
def main(argv):
options = Options()
options.binary = argv[0]
argv = argv[1:]
while len(argv) > 0 and argv[0][0] == '-':
if argv[0] == "-b":
options.preserveTmp = True
elif argv[0] == "-v":
options.verbose = True
elif argv[0] == "-p":
options.usePsa = True
elif argv[0] == '-observation-log':
if len(argv) == 1:
reportError("Missing argument", argv[0])
usage(options)
sys.exit(1)
options.observationLog = argv[1]
argv = argv[1:]
else:
reportError("Unknown option ", argv[0])
usage(options)
argv = argv[1:]
if len(argv) < 2:
usage(options)
return FAILURE
if not os.path.isfile(argv[0]) or not os.path.isfile(argv[1]):
usage(options)
return FAILURE
tmpdir = tempfile.mkdtemp(dir=".")
result = run_model(options, tmpdir, argv[0], argv[1])
if options.preserveTmp:
print("preserving", tmpdir)
else:
shutil.rmtree(tmpdir)
if options.verbose:
if result == SUCCESS:
print("SUCCESS")
else:
print("FAILURE", result)
return result
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
tests.py
|
from __future__ import unicode_literals
import sys
import threading
import time
from unittest import skipIf, skipUnless
from django.db import (
DatabaseError, Error, IntegrityError, OperationalError, connection,
transaction,
)
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from .models import Reporter
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicTests(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
available_apps = ['transactions']
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(
Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>']
)
def test_nested_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_nested_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(
Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>']
)
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with atomic:
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_force_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# atomic block shouldn't rollback, but force it.
self.assertFalse(transaction.get_rollback())
transaction.set_rollback(True)
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_prevent_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
sid = transaction.savepoint()
# trigger a database error inside an inner atomic without savepoint
with self.assertRaises(DatabaseError):
with transaction.atomic(savepoint=False):
with connection.cursor() as cursor:
cursor.execute(
"SELECT no_such_col FROM transactions_reporter")
# prevent atomic from rolling back since we're recovering manually
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
transaction.savepoint_rollback(sid)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
class AtomicInsideTransactionTests(AtomicTests):
"""All basic tests for atomic should also pass within an existing transaction."""
def setUp(self):
self.atomic = transaction.atomic()
self.atomic.__enter__()
def tearDown(self):
self.atomic.__exit__(*sys.exc_info())
@skipIf(
connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit."
)
class AtomicWithoutAutocommitTests(AtomicTests):
"""All basic tests for atomic should also pass when autocommit is turned off."""
def setUp(self):
transaction.set_autocommit(False)
def tearDown(self):
# The tests access the database after exercising 'atomic', initiating
# a transaction ; a rollback is required before restoring autocommit.
transaction.rollback()
transaction.set_autocommit(True)
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicMergeTests(TransactionTestCase):
"""Test merging transactions with savepoint=False."""
available_apps = ['transactions']
def test_merged_outer_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The first block has a savepoint and must roll back.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_inner_savepoint_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second block has a savepoint and must roll back.
self.assertEqual(Reporter.objects.count(), 1)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicErrorsTests(TransactionTestCase):
available_apps = ['transactions']
def test_atomic_prevents_setting_autocommit(self):
autocommit = transaction.get_autocommit()
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.set_autocommit(not autocommit)
# Make sure autocommit wasn't changed.
self.assertEqual(connection.autocommit, autocommit)
def test_atomic_prevents_calling_transaction_methods(self):
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.commit()
with self.assertRaises(transaction.TransactionManagementError):
transaction.rollback()
def test_atomic_prevents_queries_in_broken_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# The transaction is marked as needing rollback.
with self.assertRaises(transaction.TransactionManagementError):
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Haddock")
@skipIfDBFeature('atomic_transactions')
def test_atomic_allows_queries_after_fixing_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# Mark the transaction as no longer needing rollback.
transaction.set_rollback(False)
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Calculus")
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_atomic_prevents_queries_in_broken_transaction_after_client_close(self):
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
connection.close()
# The connection is closed and the transaction is marked as
# needing rollback. This will raise an InterfaceError on databases
# that refuse to create cursors on closed connections (PostgreSQL)
# and a TransactionManagementError on other databases.
with self.assertRaises(Error):
Reporter.objects.create(first_name="Cuthbert", last_name="Calculus")
# The connection is usable again .
self.assertEqual(Reporter.objects.count(), 0)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific behaviors")
class AtomicMySQLTests(TransactionTestCase):
available_apps = ['transactions']
@skipIf(threading is None, "Test requires threading")
def test_implicit_savepoint_rollback(self):
"""MySQL implicitly rolls back savepoints when it deadlocks (#22291)."""
other_thread_ready = threading.Event()
def other_thread():
try:
with transaction.atomic():
Reporter.objects.create(id=1, first_name="Tintin")
other_thread_ready.set()
# We cannot synchronize the two threads with an event here
# because the main thread locks. Sleep for a little while.
time.sleep(1)
# 2) ... and this line deadlocks. (see below for 1)
Reporter.objects.exclude(id=1).update(id=2)
finally:
# This is the thread-local connection, not the main connection.
connection.close()
other_thread = threading.Thread(target=other_thread)
other_thread.start()
other_thread_ready.wait()
with self.assertRaisesMessage(OperationalError, 'Deadlock found'):
# Double atomic to enter a transaction and create a savepoint.
with transaction.atomic():
with transaction.atomic():
# 1) This line locks... (see above for 2)
Reporter.objects.create(id=1, first_name="Tintin")
other_thread.join()
class AtomicMiscTests(TransactionTestCase):
available_apps = []
def test_wrap_callable_instance(self):
"""#20028 -- Atomic must support wrapping callable instances."""
class Callable(object):
def __call__(self):
pass
# Must not raise an exception
transaction.atomic(Callable())
@skipUnlessDBFeature('can_release_savepoints')
def test_atomic_does_not_leak_savepoints_on_failure(self):
"""#23074 -- Savepoints must be released after rollback."""
# Expect an error when rolling back a savepoint that doesn't exist.
# Done outside of the transaction block to ensure proper recovery.
with self.assertRaises(Error):
# Start a plain transaction.
with transaction.atomic():
# Swallow the intentional error raised in the sub-transaction.
with self.assertRaisesMessage(Exception, "Oops"):
# Start a sub-transaction with a savepoint.
with transaction.atomic():
sid = connection.savepoint_ids[-1]
raise Exception("Oops")
# This is expected to fail because the savepoint no longer exists.
connection.savepoint_rollback(sid)
@skipIf(
connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit."
)
class NonAutocommitTests(TransactionTestCase):
available_apps = []
def test_orm_query_after_error_and_rollback(self):
"""
ORM queries are allowed after an error and a rollback in non-autocommit
mode (#27504).
"""
transaction.set_autocommit(False)
r1 = Reporter.objects.create(first_name='Archibald', last_name='Haddock')
r2 = Reporter(first_name='Cuthbert', last_name='Calculus', id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
transaction.rollback()
Reporter.objects.last()
def test_orm_query_without_autocommit(self):
"""#24921 -- ORM queries must be possible after set_autocommit(False)."""
transaction.set_autocommit(False)
try:
Reporter.objects.create(first_name="Tintin")
finally:
transaction.rollback()
transaction.set_autocommit(True)
|
colorScenarios.py
|
# -*- coding: utf-8 -*-
"""
colorScenarios.py
Author: SMFSW
Copyright (c) 2016-2021 SMFSW
Desc: color scenario & fader classes
"""
import os
import sys
import csv
import threading
from collections import OrderedDict
from copy import deepcopy
from colorConv import RGBtoHSV, HSVtoRGB, RGBtoXYZ, XYZtoRGB, XYZtoYxy, YxytoXYZ
from colorConvCIE import xy_to_uv76, uv76_to_xy
currentVersion = sys.version_info
if currentVersion[0] < 3 or currentVersion[1] < 3: # till python 3.3
from time import clock as get_clock
else:
from time import process_time as get_clock
if sys.version_info > (3,):
long = int
class Fader(object):
""" Fader object """
color_spaces = ['RGB', 'HSV', 'CIE1976']
data_fields = (['Red', 'Green', 'Blue', 'Time'], # RGB Scenario csv fields
['Hue', 'Saturation', 'Value', 'Time'], # HSV Scenario csv fields
['Y', 'u\'', 'v\'', 'Time']) # CIE1976 Scenario csv fields
dfields = dict(zip(color_spaces, data_fields))
def __init__(self, use_timer=False, **kwargs):
""" Fader class initialization
:param args:
*use_timer (int): Set to True, uses timer for fading, otherwise needs to iterate manually
:param kwargs:
**on_period_elapsed (str): Callback function when period elapsed
**on_fading_finished (str): Callback function when fading has finished
**fading_space (str): Color space used for fading
**input (str): Fading inputs color space
**output (str): Fading outputs color space
"""
self._fCbackPeriod = kwargs['on_period_elapsed'] if 'on_period_elapsed' in kwargs else None
self._fCbackEnd = kwargs['on_fading_finished'] if 'on_fading_finished' in kwargs else None
self._fTranisition = kwargs['fading_space'] if 'fading_space' in kwargs and kwargs['fading_space'] in self.color_spaces else 'RGB'
self._fInput = kwargs['input'] if 'input' in kwargs and kwargs['input'] in self.color_spaces else 'RGB'
self._fOutput = kwargs['output'] if 'output' in kwargs and kwargs['output'] in self.color_spaces else 'RGB'
self.clamp_byte = lambda v: max(0.0, min(255.0, v))
self.clamp_float = lambda v: max(0.0, min(1.0, v))
self._fFields = Fader.dfields[self._fInput]
self._fInc = [0, 0, 0]
self._fCurrent = [0, 0, 0]
self._fTimeCurrent, self._fTimeInc, self._fNbInc = 0, 0, 0
self.fRun, self.fFinished = False, False
self._fTimeUpdate = 0
self._fHandler_en = False
if use_timer is True:
self._fHandler = threading.Thread(target=self.fade_inc)
self._fHandler.daemon = True
self._fHandler.start()
def __iter__(self):
return self
def __next__(self):
if self._fNbInc == 0:
self.fFinished = True
self._fCbackEnd() if self._fCbackEnd is not None else None
raise StopIteration
else:
self._fNbInc -= 1
self._fTimeCurrent += self._fTimeInc
for i in range(len(self._fCurrent)):
if self._fTranisition == 'RGB':
self._fCurrent[i] = self.clamp_byte(self._fCurrent[i] + self._fInc[i])
elif self._fTranisition == 'HSV':
if i == 0: # Hue
# TODO: for conversion, Hue can be coded on a float 0.0-1.0
self._fCurrent[i] = self.clamp_hue(self._fCurrent[i] + self._fInc[i])
else:
self._fCurrent[i] = self.clamp_byte(self._fCurrent[i] + self._fInc[i])
elif self._fTranisition == 'CIE1976':
self._fCurrent[i] = self.clamp_float(self._fCurrent[i] + self._fInc[i])
col = self.convert_space(self._fTranisition, self._fOutput, self._fCurrent)
self._fCbackPeriod(col) if self._fCbackPeriod is not None else None
return col, round(self._fTimeCurrent, 3)
@staticmethod
def clamp_hue(hue, scale=360.0):
""" Start fading process
:param hue: Hue value to clamp
:param scale: Hue scale
:return: Hue clamped value
"""
if hue >= scale:
return hue - scale
elif hue < 0.0:
return scale - hue
else:
return hue
def fader_start(self, *args):
""" Start fading process
:param args:
*args (OrderedDict or int x4): Arguments passed to fade_init """
self.fRun, self.fFinished = True, False
self.fade_init(*args)
self._fTimeUpdate = get_clock()
self.fHandler_start()
def fader_stop(self):
""" Stop fading process """
self.fRun = False
self.fHandler_stop()
def fader_pause(self):
""" Pause fading process """
self.fRun = False
self.fHandler_stop()
def fader_resume(self):
""" Resume fading process """
self.fRun = True
self._fTimeUpdate = get_clock()
self.fHandler_start()
def fHandler_start(self):
""" Fader thread enable """
self._fHandler_en = True
def fHandler_stop(self):
""" Fader thread disable """
self._fHandler_en = False
@staticmethod
def convert_space(from_space, to_space, *args):
col = args[0]
if from_space == to_space:
return col
elif from_space == 'RGB':
if to_space == 'HSV':
return RGBtoHSV(*col)
elif to_space == 'CIE1976':
tmp = XYZtoYxy(*RGBtoXYZ(*col))
uv = xy_to_uv76(tmp[1], tmp[2])
return tmp[0], uv[0], uv[1]
elif from_space == 'HSV':
if to_space == 'RGB':
return HSVtoRGB(*col)
elif to_space == 'CIE1976':
tmp = XYZtoYxy(*RGBtoXYZ(*HSVtoRGB(*col)))
uv = xy_to_uv76(tmp[1], tmp[2])
return tmp[0], uv[0], uv[1]
elif from_space == 'CIE1976':
xy = uv76_to_xy(col[1], col[2])
if to_space == 'RGB':
return XYZtoRGB(*YxytoXYZ(col[0], xy[0], xy[1]))
elif to_space == 'HSV':
return RGBtoHSV(*XYZtoRGB(*YxytoXYZ(col[0], xy[0], xy[1])))
return col
def fade_init(self, *args):
""" Fading process init and launch
:param args:
*args[0] (OrderedDict): Comprising Red, Green, Blue, Time
*args[0-3] (int): of the form Red, Green, Blue, Time """
if isinstance(args[0], OrderedDict) or isinstance(args[0], dict):
# TODO: get params as HSV & u'v' too
col, tim = [int(args[0][self._fFields[0]]), int(args[0][self._fFields[1]]),
int(args[0][self._fFields[2]])], long(args[0][self._fFields[3]])
else:
col, tim = [args[0], args[1], args[2]], args[3]
col = list(self.convert_space(self._fInput, self._fTranisition, col))
if self._fTranisition == 'HSV':
if self.convert_space('HSV', 'RGB', self._fCurrent) == (0, 0, 0):
# Position Hue & Saturation, only increase value
self._fCurrent[0] = col[0]
self._fCurrent[1] = col[1]
elif self.convert_space('HSV', 'RGB', col) == (0, 0, 0):
# No change of Hue & Saturation, only decrease value
col[0] = self._fCurrent[0]
col[1] = self._fCurrent[1]
delta = [0, 0, 0]
for i in range(len(delta)):
delta[i] = col[i] - self._fCurrent[i]
if self._fTranisition == 'HSV' and i == 0:
# Make the nearest transition on Hue if delta is more than half the color wheel
if abs(delta[i]) > 180.0:
if delta[i] < 0.0:
delta[i] = delta[i] + 360.0
else:
delta[i] = delta[i] - 360.0
delta_max = max([abs(i) for i in delta])
if delta_max == 0:
return
elif tim == 0:
self._fNbInc = 1
self._fTimeInc = 1 # After 1ms
else:
self._fNbInc = delta_max
self._fTimeInc = tim / delta_max
if self._fTimeInc < 100:
self._fTimeInc = 100
self._fNbInc = max(1, int(tim / self._fTimeInc))
self._fTimeInc /= 1000.0 # time.clock gives seconds
for i in range(len(delta)):
self._fInc[i] = float(delta[i]) / self._fNbInc
def fade_inc(self):
""" Fading thread (handles increments) """
while 1:
if self._fHandler_en is True:
now = get_clock()
if (now - self._fTimeUpdate) >= self._fTimeInc:
self._fTimeUpdate = now
if not self.fFinished:
try:
next(self)
except StopIteration:
pass
def set_transition(self, space):
""" Set new fading transition color space
:param space: Color space used for fading """
old = self._fTranisition
self._fTranisition = space if space in self.color_spaces else 'RGB'
self._fCurrent = list(self.convert_space(old, self._fTranisition, self._fCurrent))
def set_input(self, space):
""" Set new input values color space
:param space: Fading inputs color space """
self._fInput = space if space in self.color_spaces else 'RGB'
self._fFields = Fader.dfields[self._fInput]
def set_output(self, space):
""" Set new output values color space
:param space: Fading output color space """
self._fOutput = space if space in self.color_spaces else 'RGB'
class Scenario(Fader):
""" Scenario object """
speed_mult = [16, 8, 4, 2, 1, 1.0 / 2, 1.0 / 4, 1.0 / 8, 1.0 / 16] # Speeds
def __init__(self, *args, **kwargs):
""" Scenario class initialization
:param args:
*args[0] (list): scenario list (optional), specify file in kwargs instead
:param kwargs:
**dir (str): Scenario file directory
**file (str): Scenario filename
**loops (int): Number of loops to perform (0 - infinite)
**on_scenario_finished (str): Callback function when scenario has finished
"""
super(Scenario, self).__init__(on_fading_finished=self.scenar_step, **kwargs)
self.sFile = ''
self.sDatas = []
self._sLoopsDone = 0
self._sStep = -1
self._sSpeed = 4
self.sRun, self.sFinished = False, False
if 'dir' in kwargs:
os.chdir(kwargs['dir'])
if 'file' in kwargs:
self.read_datas(kwargs['file'])
elif args and isinstance(args[0], list):
self.sDatas = deepcopy(args[0])
self._sCbackEnd = kwargs['on_scenario_finished'] if 'on_scenario_finished' in kwargs else None
if 'loops' in kwargs:
self.sLoop, self.sNbLoops = True, kwargs['loops']
else:
self.sLoop, self.sNbLoops = False, 0
def read_datas(self, csv_file):
""" Read scenario datas from csv file
:param csv_file: input file """
self.sFile = csv_file
with open(self.sFile, 'r') as f:
reader = csv.DictReader(f, fieldnames=self._fFields)
for row in reader:
self.sDatas.append(row)
self.sDatas.pop(0) # Remove header line
def scenar_start(self):
""" Start scenario process """
self._fCurrent = [0, 0, 0]
self._sSpeed = 4
self._sLoopsDone = 0
self._sStep = -1
self.sRun, self.sFinished = True, False
self.scenar_step()
def scenar_stop(self):
""" Stop scenario process """
self.sRun = False
self.fader_stop()
def scenar_pause(self):
""" Pause scenario process """
self.fader_pause()
def scenar_resume(self):
""" Resume scenario process """
self.fader_resume()
def scenar_speed_up(self):
""" Increase scenario speed """
self._sSpeed = min(self._sSpeed + 1, len(self.speed_mult) - 1)
def scenar_speed_down(self):
""" Decrease scenario speed """
self._sSpeed = max(self._sSpeed - 1, 0)
def scenar_step(self):
""" Scenario steps handler """
end = False
if self._sStep == len(self.sDatas) - 1:
self._sStep = 0
if self.sLoop:
self._sLoopsDone += 1
if self.sNbLoops:
if self.sNbLoops == self._sLoopsDone:
end = True
else:
end = True
else:
self._sStep += 1
if end:
self.scenar_stop()
self.sFinished = True
self._sCbackEnd() if self._sCbackEnd is not None else None
if self.sRun:
dat = deepcopy(self.sDatas[self._sStep])
dat['Time'] = long(dat['Time']) * self.speed_mult[self._sSpeed]
self.fader_start(dat)
# if __name__ == "__main__":
|
game.py
|
import re
import os
import subprocess
import threading
import signal
import random
import psutil
import numpy as np
from gym import logger
class GameRegion:
"""A rectangle. Represents the game region.
"""
def __init__(self, x: int = 0, y: int = 0, width: int = None, height: int = None):
"""Creates a new game region
"""
self.x = x
self.y = y
self.width = width
self.height = height
def as_tuple(self) -> tuple:
"""Returns the game region as a tuple (x, y, width, height)
"""
return (self.x, self.y, self.width, self.height)
class Action:
def __init__(self, action, parallel=False):
self._action = action
self.parallel = parallel
self._pre = lambda: None
self._post = lambda: None
def set_pre(self, action):
self._pre = action
def set_post(self, action):
self._post = action
def pre(self):
self._pre()
def run(self):
if type(self._action) == list:
if not self.parallel:
for action in self._action:
action()
else:
threads = []
for action in self._action:
thread = threading.Thread(target=action)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
else:
self._action()
def post(self):
self._post()
class GameHandler:
"""A wrapper that allows to easily handle the game process and window. It allows to read the process standard
output, to focus the window, and so on.
"""
SPACE_SPLITTER = re.compile("\s+")
def run_process(command: list, monitor_stdout=True) -> subprocess.Popen:
"""Runs the command specified and spawns a process. It also pipes the standard output so that it can be
easily handled, if needed.
"""
kwargs = {}
if monitor_stdout:
kwargs['stdout'] = subprocess.PIPE
return subprocess.Popen(command, **kwargs)
def find_window_by_class(klass: str) -> str:
"""Returns the window ID of the specified class. Returns None if no window is present for the given class.
If more than a window matches the class, this method returns the last one (usually, the correct one).
"""
window_id = subprocess.check_output(['xdotool', 'search', '--class', klass])
window_ids = window_id.decode("ascii").rstrip().split("\n")
if len(window_ids) > 0:
return window_ids[-1]
else:
return None
def find_windows_by_pid(pid: int) -> str:
"""Returns the window ID of the specified process ID. Returns None if no window is present for the given class.
This method assumes that there is exactly a window for the searched class: if more than a window if found,
the assertion fails.
"""
window_id = subprocess.check_output(['xdotool', 'search', '--pid', str(pid)])
window_ids = window_id.decode("ascii").rstrip().split("\n")
assert len(window_ids) <= 1
if len(window_ids) == 1:
return window_ids
else:
return None
def __init__(self, process, window:str = None, **kwargs):
"""Creates a new handler for a given process (subprocess.Popen) and a window ID (string). If the window ID is
not specified, it uses the process ID to automatically detect the game window.
"""
self._process = process
self._process_info = psutil.Process(self._process.pid)
self._ram = None
self._vram = None
if window:
self._windows = [window]
else:
self._windows = GameHandler.find_windows_by_pid(self._process.pid)
self._stdout = []
self.autofocus = True
if 'autofocus' in kwargs:
self.autofocus = kwargs['autofocus']
if self._process.stdout is not None:
self._process_log_thread = threading.Thread(target=lambda: self._process_log_mainloop())
self._process_log_thread.start()
self.reset_metrics()
logger.info(f"New game process: {window}")
def screenshot(self):
return Screenshot.take_window(self._windows[0])
def focus_window(self) -> None:
"""Focuses the game window.
"""
if self.autofocus:
for window in self._windows:
logger.info(f"Focusing window: {window}")
subprocess.call(['xdotool', 'windowfocus', window])
else:
logger.info("Avoiding autofocus")
def move_window(self, x, y):
for window in self._windows:
logger.info(f"Moving window {window} to {x}, {y}")
subprocess.call(['xdotool', 'windowmove', window, str(x), str(y)])
def get_window_region(self) -> GameRegion:
"""Returns the region of the game region (GameRegion).
"""
raw = subprocess.check_output(['xwininfo', '-id', self._windows[0]])
txt = raw.decode("utf8")
x = int(re.findall("Absolute upper\-left X\:\s+([0-9]+)\n", txt)[0])
y = int(re.findall("Absolute upper\-left Y\:\s+([0-9]+)\n", txt)[0])
width = int(re.findall("Width\:\s+([0-9]+)\n", txt)[0])
height = int(re.findall("Height\:\s+([0-9]+)\n", txt)[0])
logger.info(f"Retrieved window region: {x}, {y}, {width}, {height}")
return GameRegion(x, y, width, height)
def _used_ram(self) -> float:
return self.used_total_ram()
def used_total_ram(self) -> float:
"""Returns the RAM used by the main process in bytes
"""
return self._process_info.memory_info().vms
def used_data_ram(self) -> float:
"""Returns the data RAM used by the main process in bytes
"""
return self._process_info.memory_info().data
def _used_cpu(self) -> float:
"""Returns the CPU clocks used by the main process
"""
result = 0
with open(f"/proc/{self._process.pid}/stat") as f:
parts = f.read().split(" ")
result = float(parts[13]) + float(parts[14]) + float(parts[15]) + float(parts[16])
return result
def _used_vram(self) -> float:
"""Returns the VRAM used by the main process on the VGA
"""
try:
dump = subprocess.check_output(['nvidia-smi', 'pmon', '-c', '1', '-s', 'm']).decode("utf8")
lines = dump.split("\n")
for i in range(2, len(lines)):
line = lines[i].strip()
if len(line) == 0:
continue
stats = GameHandler.SPACE_SPLITTER.split(line)
if int(stats[1]) == self._process.pid:
return float(stats[3])
return None
except subprocess.CalledProcessError:
return None
def _used_gpu(self) -> float:
"""Returns the total GPU percentage used
"""
return 0
def used_cpu(self) -> float:
current = self._used_cpu()
if self._base_cpu is None or current is None:
return None
else:
return current - self._base_cpu
def used_gpu(self) -> float:
current = self._used_gpu()
if self._base_gpu is None or current is None:
return None
else:
return current - self._base_gpu
def used_ram(self) -> float:
current = self._used_ram()
if self._base_ram is None or current is None:
return None
else:
return current - self._base_ram
def used_vram(self) -> float:
current = self._used_vram()
if self._base_vram is None or current is None:
return None
else:
return current - self._base_vram
def reset_metrics(self) -> None:
self._base_cpu = self._used_cpu()
self._base_gpu = self._used_gpu()
self._base_ram = self._used_ram()
self._base_vram = self._used_vram()
def process_id(self) -> int:
return self._process.pid
def terminate(self) -> None:
"""Terminates the game process.
"""
self._process.terminate()
self._process.returncode = 0
def suspend(self) -> None:
self._process.send_signal(signal.SIGSTOP)
def resume(self) -> None:
self._process.send_signal(signal.SIGCONT)
def alive(self) -> bool:
return self._process.returncode is None
def read_log_line(self) -> str:
"""Returns the last process output line not read yet. Returns None if no new line is available.
"""
if len(self._stdout) > 0:
return self._stdout.pop(0)
else:
return None
def read_log_lines(self) -> list:
"""Returns all the last process output lines not read yet. Returns an empty list if no new line is available.
"""
result = []
while len(self._stdout) > 0:
result.append(self._stdout.pop(0))
return result
def _process_log_mainloop(self) -> None:
"""Helper method: it is the main loop of the thread that fetches the process output.
"""
for line in self._process.stdout:
self._stdout.append(line.decode("utf8"))
def _process_performance_mainloop(self) -> None:
while self.alive():
self._ram = self._used_ram()
self._vram = self._used_vram()
class ProcessWrapper:
def __init__(self, pid):
self.pid = pid
self.stdout = None
self.stderr = None
self.stdin = None
self.returncode = None
def terminate(self):
os.kill(self.pid, signal.SIGINT)
def send_signal(self, sig):
os.kill(self.pid, sig)
class Screenshot:
TEMP_SCREENSHOT_NAME = f"/tmp/gym-screenshot-{random.randint(0, 100000000)}.ppm"
@classmethod
def take_window(cls, window):
subprocess.call(['import', '-window', window, '-silent', cls.TEMP_SCREENSHOT_NAME])
array = None
with open(cls.TEMP_SCREENSHOT_NAME, 'rb') as f:
array = np.fromstring(f.read(), dtype='B')
# Reads the header
header = [b'', b'', b'']
header_line = 0
index = 0
while header_line < 3:
header[header_line] += array[index]
if array[index] == 10:
header_line += 1
index += 1
# Reads the image pixels based on the maxval header value
maxval = int(header[2].decode('ascii'))
if maxval == 255:
img_data = array[index:]
elif maxval == 65535:
img_data = array[index::2]
else:
raise WrongPPMFileException(f"Invalid maxvalue {maxval}")
wh = header[1].decode('ascii').split(' ')
return Screenshot(int(wh[0]), int(wh[1]), img_data)
def __init__(self, width, height, data):
if len(data) != width * height * 3:
raise WrongImageDataException("Image data not matching declared size")
self.width = width
self.height = height
self.pixels = data
def pixel(self, x, y):
if x < 0 or x >= self.width or y < 0 or y >= self.height:
raise WrongPixelException(f"Invalid pixel {x}, {y}")
base = (x * self.height + y) * 3
return (self.pixels[base], self.pixels[base + 1], self.pixels[base + 2])
def save(self, filename, overwrite=False, fast=True):
if os.path.exists(filename) and not overwrite:
return False
with open(filename, 'wb') as f:
f.write(f"P6\n{self.width} {self.height}\n255\n".encode())
if not fast:
for x in range(self.width):
for y in range(self.height):
f.write(bytes(self.pixel(x, y)))
else:
f.write(bytes(self.pixels))
class SlidingWindowWithOverlap:
def __init__(self, size=20, overlap=10):
self._observations = []
self._size = size
self._overlap = overlap
self._current_overlap = -1
def add(self, value):
self._observations.append(value)
if len(self._observations) >= self._size:
self._observations.pop(0)
self._current_overlap += 1
def get(self):
print(f"Overlap: {self._current_overlap}; Size: {len(self._observations)}")
overlap = self._current_overlap
if len(self._observations) < self._size - 1:
return None
if overlap % self._overlap == 0:
return sum(self._observations)
else:
return None
def force_get(self):
return sum(self._observations)
def clear(self):
self._observations.clear()
self._current_overlap = -1
class WrongPPMFileException(Exception):
pass
class WrongImageDataException(Exception):
pass
class WrongPixelException(Exception):
pass
|
run.py
|
import threading
from datamodel import Bundle
from signalserver import SignalingServer
from voiceserver import VoiceServer
if __name__ == "__main__":
ADDR = "0.0.0.0"
PORT = 9001
print("RicoChat loading server")
bundle = Bundle()
def thread_tcp():
_port = PORT + 1
print("RicoChat Signal server: listening on {}:{}".format(ADDR, _port))
serverTCP = SignalingServer(bundle, (ADDR, _port))
serverTCP.serve_forever()
def thread_udp():
_port = PORT
print("RicoChat Voice server: listening on {}:{}".format(ADDR, _port))
serverUDP = VoiceServer(bundle, (ADDR, _port))
serverUDP.serve_forever()
t1 = threading.Thread(target=thread_tcp)
t1.start()
t2 = threading.Thread(target=thread_udp)
t2.start()
|
kill_restart.py
|
import sys
import queue
import struct
import threading
import importlib
import torch.multiprocessing as mp
from util.utils import TcpServer, TcpAgent, timestamp
def func_get_request(qout):
# Listen connections
server = TcpServer('localhost', 12345)
while True:
# Get connection
conn, _ = server.accept()
agent = TcpAgent(conn)
model_name_length_b = agent.recv(4)
model_name_length = struct.unpack('I', model_name_length_b)[0]
if model_name_length == 0:
break
model_name_b = agent.recv(model_name_length)
model_name = model_name_b.decode()
timestamp('tcp', 'get_name')
data_length_b = agent.recv(4)
data_length = struct.unpack('I', data_length_b)[0]
if data_length > 0:
data_b = agent.recv(data_length)
else:
data_b = None
timestamp('tcp', 'get_data')
qout.put((agent, model_name, data_b))
def func_schedule(qin):
active_worker = None
while True:
agent, model_name, data_b = qin.get()
if active_worker is not None:
active_worker.kill()
active_worker.join()
active_worker = mp.Process(target=worker_compute, args=(agent, model_name, data_b))
active_worker.start()
def worker_compute(agent, model_name, data_b):
# Load model
model_module = importlib.import_module('task.' + model_name)
model, func, _ = model_module.import_task()
data_loader = model_module.import_data_loader()
# Model to GPU
model = model.to('cuda')
# Compute
if 'training' in model_name:
agent.send(b'FNSH')
del agent
timestamp('server', 'reply')
output = func(model, data_loader)
timestamp('server', 'complete')
else:
output = func(model, data_b)
timestamp('server', 'complete')
agent.send(b'FNSH')
del agent
timestamp('server', 'reply')
def main():
# Create threads and worker process
q_to_schedule = queue.Queue()
t_get = threading.Thread(target=func_get_request, args=(q_to_schedule,))
t_get.start()
t_schedule = threading.Thread(target=func_schedule, args=(q_to_schedule,))
t_schedule.start()
# Accept connection
t_get.join()
t_schedule.join()
if __name__ == '__main__':
mp.set_start_method('spawn')
main()
|
fixity.py
|
#!/usr/bin/python3.5
import re
import os
import sys
import json
import timeit
import hashlib
import datetime
import requests
import subprocess
from io import BytesIO
from shutil import copyfile
from threading import Thread
from time import gmtime, strftime
from warcio.warcwriter import WARCWriter
from warcio.archiveiterator import ArchiveIterator
from warcio.statusandheaders import StatusAndHeaders
def extrcated_headers_from_warc_record(record, record_status):
response_headers = {}
response_headers_values = ''
response_headers_keys = ''
if str(record_status)[0] == '2':
h_v = record.http_headers.get_header('Content-Type')
if h_v:
response_headers['Content-Type'] = h_v
response_headers_values = response_headers_values +' '+ h_v
response_headers_keys = response_headers_keys +' '+ '$Content-Type'
h_v = record.http_headers.get_header('Content-Length')
if h_v:
response_headers['Content-Length'] = h_v
response_headers_values = response_headers_values +' '+ h_v
response_headers_keys = response_headers_keys +' '+ '$Content-Length'
h_v = record.http_headers.get_header('Content-Location')
if h_v:
response_headers['Content-Location'] = h_v
response_headers_values = response_headers_values +' '+ h_v
response_headers_keys = response_headers_keys +' '+ '$Content-Location'
h_v = record.http_headers.get_header('X-Archive-Orig-content-md5')
if h_v:
response_headers['X-Archive-Orig-content-md5'] = h_v
response_headers_values = response_headers_values +' '+ h_v
response_headers_keys = response_headers_keys +' '+ '$X-Archive-Orig-content-md5'
h_v = record.http_headers.get_header('X-Archive-Orig-x-fb-content-md5')
if h_v:
response_headers['X-Archive-Orig-x-fb-content-md5'] = h_v
response_headers_values = response_headers_values +' '+ h_v
response_headers_keys = response_headers_keys +' '+ '$X-Archive-Orig-x-fb-content-md5'
h_v = record.http_headers.get_header('X-Archive-Orig-age')
if h_v:
response_headers['X-Archive-Orig-age'] = h_v
response_headers_values = response_headers_values +' '+ h_v
response_headers_keys = response_headers_keys +' '+ '$X-Archive-Orig-age'
h_v = record.http_headers.get_header('X-Archive-Orig-status')
if h_v:
response_headers['X-Archive-Orig-status'] = h_v
response_headers_values = response_headers_values +' '+ h_v
response_headers_keys = response_headers_keys +' '+ '$X-Archive-Orig-status'
h_v = record.http_headers.get_header('X-Archive-Orig-date')
if h_v:
response_headers['X-Archive-Orig-date'] = h_v
response_headers_values = response_headers_values +' '+ h_v
response_headers_keys = response_headers_keys +' '+ '$X-Archive-Orig-date'
h_v = record.http_headers.get_header('X-Archive-Orig-user-agent')
if h_v:
response_headers['X-Archive-Orig-user-agent'] = h_v
response_headers_values = response_headers_values +' '+ h_v
response_headers_keys = response_headers_keys +' '+ '$X-Archive-Orig-user-agent'
h_v = record.http_headers.get_header('X-Archive-Orig-etag')
if h_v:
response_headers['X-Archive-Orig-etag'] = h_v
response_headers_values = response_headers_values +' '+ h_v
response_headers_keys = response_headers_keys +' '+ '$X-Archive-Orig-etag'
h_v = record.http_headers.get_header('X-Archive-Orig-link')
if h_v:
response_headers['X-Archive-Orig-link'] = h_v
response_headers_values = response_headers_values +' '+ h_v
response_headers_keys = response_headers_keys +' '+ '$X-Archive-Orig-link'
h_v = record.http_headers.get_header('X-Archive-Orig-last-modified')
if h_v:
response_headers['X-Archive-Orig-last-modified'] = h_v
response_headers_values = response_headers_values +' '+ h_v
response_headers_keys = response_headers_keys +' '+ '$X-Archive-Orig-last-modified'
elif str(record_status)[0] in ['3','4','5']:
h_v = record.http_headers.get_header('Location')
if h_v:
response_headers['Location'] = h_v
response_headers_values = response_headers_values +' '+ h_v
response_headers_keys = response_headers_keys +' '+ '$Location'
h_v = record.http_headers.get_header('X-Archive-Orig-date')
if h_v:
response_headers['X-Archive-Orig-date'] = h_v
response_headers_values = response_headers_values +' '+ h_v
response_headers_keys = response_headers_keys +' '+ '$X-Archive-Orig-date'
h_v = record.http_headers.get_header('X-Archive-Orig-status')
if h_v:
response_headers['X-Archive-Orig-status'] = h_v
response_headers_values = response_headers_values +' '+ h_v
response_headers_keys = response_headers_keys +' '+ '$X-Archive-Orig-status'
if len(response_headers_values) > 0:
response_headers_values = response_headers_values[1:]
if len(response_headers_keys) > 0:
response_headers_keys = response_headers_keys[1:]
return response_headers, response_headers_values, response_headers_keys
def convert_to_original_link(uri):
tmp = re.findall('\d+', uri)
for t in tmp:
if len(str(t)) == 14:
before = uri.split(str(t),1)[0]
after = uri.split(str(t),1)[1].split("/",1)[1]
return before+str(t)+'id_/'+after,t, after
return None, None, None
def generate_atomic(urim):
tic_all=timeit.default_timer()
time_json = {
'date': strftime("%Y%m%d%H%M%S", gmtime()),
'time_in_seconds_to_download_memento':0,
'time_in_seconds_to_generate_fixity':0
}
urimid_, mdatetime, urir = convert_to_original_link(urim)
manif = {
"@context": "http://manifest.ws-dl.cs.odu.edu/terms.json",
"uri-r": urir,
"uri-m": urim,
"memento-datetime": datetime.datetime.strptime(mdatetime, '%Y%m%d%H%M%S').strftime('%a, %d %b %Y %H:%M:%S GMT')
}
urimh = hashlib.md5(urim.encode()).hexdigest()
downloadtime = strftime("%Y%m%d%H%M%S", gmtime())
manif["created"] = datetime.datetime.strptime(downloadtime, '%Y%m%d%H%M%S').strftime('%a, %d %b %Y %H:%M:%S GMT')
# outMainDir = '/data/Fixity/mementos/'+urimh+'/'+downloadtime
outMainDir = '/Users/maturban/Fixity/generate_manifest3/'+urimh+'/'+downloadtime
warc_file = outMainDir + '/raw.warc'
tic0=timeit.default_timer()
if not os.path.exists(outMainDir):
os.makedirs(outMainDir)
with open(warc_file, 'wb') as poutput:
writer = WARCWriter(poutput, gzip=False)
headers = {
'User-Agent': 'Web Science and Digital Libraries Group (@WebSciDL); Project/archives_fixity; Contact/Mohamed Aturban (maturban@odu.edu)',
'Accept-Encoding': None
}
try:
resp = requests.get(urimid_, headers=headers, timeout=180, allow_redirects=True, stream=True)
except:
pass;
cont = resp.content
headers_list = resp.headers.items()
http_headers = StatusAndHeaders(str(resp.status_code), headers_list, protocol='HTTP/1.0')
record = writer.create_warc_record(urimid_, 'response',
payload=BytesIO(cont),
http_headers=http_headers)
try:
writer.write_record(record)
except Exception as e:
print(str(e))
toc0=timeit.default_timer()
if os.path.exists(warc_file):
with open(warc_file, 'rb') as stream:
counter_raw = 0
for record in ArchiveIterator(stream):
if record.rec_type == 'response':
uri = record.rec_headers.get_header('WARC-Target-URI')
if uri == urimid_:
status_code = record.http_headers.statusline.split()[0]
entity = record.content_stream().read() #.strip()
hdrs, hdrs_values, hdrs_keys = extrcated_headers_from_warc_record(record, status_code)
hdrs["Preference-Applied"] = "original-links, original-content"
md5h = hashlib.md5(entity + hdrs_values.encode()).hexdigest()
sha256h = hashlib.sha256(entity + hdrs_values.encode()).hexdigest()
hash_v = "md5:{} sha256:{}".format(md5h, sha256h)
hash_constructor = "(curl -s '$uri-m' && echo -n '"+hdrs_keys+"') | tee >(sha256sum) >(md5sum) >/dev/null | cut -d ' ' -f 1 | paste -d':' <(echo -e 'md5\nsha256') - | paste -d' ' - -"
manif["http-headers"] = hdrs
manif["hash"] = hash_v
manif["hash-constructor"] = hash_constructor
manif["@id"] = "http://manifest.ws-dl.cs.odu.edu/manifest/"+downloadtime+'/ /'+urim
manif_file = json.dumps(manif,indent=4)
self_hash = hashlib.sha256(manif_file.encode()).hexdigest()
manif["@id"] = manif["@id"].replace("/ /","/"+self_hash+"/")
with open(outMainDir+'/'+self_hash+'.json', 'w') as outfile:
json.dump(manif, outfile, indent=4)
toc_all=timeit.default_timer()
time_json['time_in_seconds_to_download_memento'] = toc0 - tic0
time_json['time_in_seconds_to_generate_fixity'] = (toc_all - tic_all) - time_json['time_in_seconds_to_download_memento']
with open(outMainDir+'/'+self_hash+'.json.time', 'w') as outfile:
json.dump(time_json, outfile, indent=4)
return outMainDir+'/'+self_hash+'.json'
def generate_current(urim):
tic_all=timeit.default_timer()
time_json = {
'date': strftime("%Y%m%d%H%M%S", gmtime()),
'time_in_seconds_to_download_memento':0,
'time_in_seconds_to_generate_fixity':0
}
urimid_, mdatetime, urir = convert_to_original_link(urim)
manif = {
"@context": "http://manifest.ws-dl.cs.odu.edu/terms.json",
"uri-r": urir,
"uri-m": urim,
"memento-datetime": datetime.datetime.strptime(mdatetime, '%Y%m%d%H%M%S').strftime('%a, %d %b %Y %H:%M:%S GMT')
}
urimh = hashlib.md5(urim.encode()).hexdigest()
downloadtime = strftime("%Y%m%d%H%M%S", gmtime())
manif["created"] = datetime.datetime.strptime(downloadtime, '%Y%m%d%H%M%S').strftime('%a, %d %b %Y %H:%M:%S GMT')
outMainDir = '/data/Fixity/verification/'+urimh+'/'+downloadtime
warc_file = outMainDir + '/raw.warc'
tic0=timeit.default_timer()
if not os.path.exists(outMainDir):
os.makedirs(outMainDir)
with open(warc_file, 'wb') as poutput:
writer = WARCWriter(poutput, gzip=False)
headers = {
'User-Agent': 'Web Science and Digital Libraries Group (@WebSciDL); Project/archives_fixity; Contact/Mohamed Aturban (maturban@odu.edu)',
'Accept-Encoding': None
}
try:
resp = requests.get(urimid_, headers=headers, timeout=180, allow_redirects=True, stream=True)
except:
pass;
cont = resp.content
headers_list = resp.headers.items()
http_headers = StatusAndHeaders(str(resp.status_code), headers_list, protocol='HTTP/1.0')
record = writer.create_warc_record(urimid_, 'response',
payload=BytesIO(cont),
http_headers=http_headers)
try:
writer.write_record(record)
except Exception as e:
print(str(e))
toc0=timeit.default_timer()
if os.path.exists(warc_file):
with open(warc_file, 'rb') as stream:
counter_raw = 0
for record in ArchiveIterator(stream):
if record.rec_type == 'response':
uri = record.rec_headers.get_header('WARC-Target-URI')
if uri == urimid_:
status_code = record.http_headers.statusline.split()[0]
entity = record.content_stream().read() #.strip()
hdrs, hdrs_values, hdrs_keys = extrcated_headers_from_warc_record(record, status_code)
hdrs["Preference-Applied"] = "original-links, original-content"
md5h = hashlib.md5(entity + hdrs_values.encode()).hexdigest()
sha256h = hashlib.sha256(entity + hdrs_values.encode()).hexdigest()
hash_v = "md5:{} sha256:{}".format(md5h, sha256h)
hash_constructor = "(curl -s '$uri-m' && echo -n '"+hdrs_keys+"') | tee >(sha256sum) >(md5sum) >/dev/null | cut -d ' ' -f 1 | paste -d':' <(echo -e 'md5\nsha256') - | paste -d' ' - -"
manif["http-headers"] = hdrs
manif["hash"] = hash_v
manif["hash-constructor"] = hash_constructor
manif["@id"] = "http://manifest.ws-dl.cs.odu.edu/manifest/"+downloadtime+'/ /'+urim
manif_file = json.dumps(manif,indent=4)
self_hash = hashlib.sha256(manif_file.encode()).hexdigest()
manif["@id"] = manif["@id"].replace("/ /","/"+self_hash+"/")
with open(outMainDir+'/'+self_hash+'.json', 'w') as outfile:
json.dump(manif, outfile, indent=4)
toc_all=timeit.default_timer()
time_json['time_in_seconds_to_download_memento'] = toc0 - tic0
time_json['time_in_seconds_to_generate_fixity'] = (toc_all - tic_all) - time_json['time_in_seconds_to_download_memento']
with open(outMainDir+'/'+self_hash+'.json.time', 'w') as outfile:
json.dump(time_json, outfile, indent=4)
return outMainDir+'/'+self_hash+'.json'
def publish_atomic(manif):
tic_0=timeit.default_timer()
manifest_file = manif.rsplit("/",1)[1]
urimh = manif.split("/data/Fixity/mementos/",1)[1].split("/",1)[0]
manifest_datetime = manif.split(urimh+'/',1)[1].split("/",1)[0]
try:
os.mkdir('/data/Fixity/manifests/'+urimh)
except:
pass
copyfile(manif, '/data/Fixity/manifests/'+urimh+'/'+manifest_datetime+'-'+manifest_file)
with open(manif) as data_file:
manifest_json = json.load(data_file)
toc_0=timeit.default_timer()
time_json = {"time_in_seconds_to_publish_manifest" : toc_0 - tic_0, 'date': strftime("%Y%m%d%H%M%S", gmtime())}
with open(manif+'.publish_time', 'w') as outfile:
json.dump(time_json, outfile, indent=4)
return 'http://manifest.ws-dl.cs.odu.edu/manifest/'+manifest_json['uri-m'], manifest_json['@id']
def disseminate_block(uri_block,flags):
res_list = []
times = {}
# archivenow
if 'wc' in flags:
tic=timeit.default_timer()
cmdo = "archivenow --wc '"+uri_block+"'"
p = subprocess.Popen(cmdo, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
toc=timeit.default_timer()
for v in out.decode().split("\n"):
if (v != "") and (v.startswith("http")):
print('block_urim:',v)
print('Time-in-secs-to-push-into-wc:',toc-tic)
print('Date:',strftime("%Y%m%d%H%M%S", gmtime()))
print()
if 'ia' in flags:
tic=timeit.default_timer()
cmdo = "archivenow --ia '"+uri_block+"'"
p = subprocess.Popen(cmdo, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
toc=timeit.default_timer()
for v in out.decode().split("\n"):
if (v != "") and (v.startswith("http")):
print('block_urim:',v)
print('Time-in-secs-to-push-into-ia:',toc-tic)
print('Date:',strftime("%Y%m%d%H%M%S", gmtime()))
print()
if 'is' in flags:
tic=timeit.default_timer()
cmdo = "archivenow --is '"+uri_block+"'"
p = subprocess.Popen(cmdo, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
toc=timeit.default_timer()
for v in out.decode().split("\n"):
if (v != "") and (v.startswith("http")):
print('block_urim:',v)
print('Time-in-secs-to-push-into-is:',toc-tic)
print('Date:',strftime("%Y%m%d%H%M%S", gmtime()))
print()
if 'cc' in flags:
tic=timeit.default_timer()
cmdo = "archivenow --cc --cc_api_key=dba45acaac24682584eea381f3c36a2d4dbd54ee '"+uri_block+"'"
p = subprocess.Popen(cmdo, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
toc=timeit.default_timer()
for v in out.decode().split("\n"):
if (v != "") and (v.startswith("http")):
print('block_urim:',v)
print('Time-in-secs-to-push-into-cc:',toc-tic)
print('Date:',strftime("%Y%m%d%H%M%S", gmtime()))
print()
def disseminate_atomic(uri_manif,flags):
res_list = []
times = {}
# archivenow
if 'wc' in flags:
tic=timeit.default_timer()
cmdo = "archivenow --wc '"+uri_manif+"'"
p = subprocess.Popen(cmdo, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
print(out)
toc=timeit.default_timer()
for v in out.decode().split("\n"):
if (v != "") and (v.startswith("http")):
res_list.append(v)
times[v] = {}
times[v]['time_to_archivenow'] = toc-tic
times[v]['date'] = strftime("%Y%m%d%H%M%S", gmtime())
if 'ia' in flags:
tic=timeit.default_timer()
cmdo = "archivenow --ia '"+uri_manif+"'"
p = subprocess.Popen(cmdo, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
toc=timeit.default_timer()
for v in out.decode().split("\n"):
if (v != "") and (v.startswith("http")):
res_list.append(v)
times[v] = {}
times[v]['time_to_archivenow'] = toc-tic
times[v]['date'] = strftime("%Y%m%d%H%M%S", gmtime())
if 'is' in flags:
tic=timeit.default_timer()
cmdo = "archivenow --is '"+uri_manif+"'"
p = subprocess.Popen(cmdo, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
print(out)
toc=timeit.default_timer()
for v in out.decode().split("\n"):
if (v != "") and (v.startswith("http")):
res_list.append(v)
times[v] = {}
times[v]['time_to_archivenow'] = toc-tic
times[v]['date'] = strftime("%Y%m%d%H%M%S", gmtime())
if 'cc' in flags:
tic=timeit.default_timer()
cmdo = "archivenow --cc --cc_api_key=dba45acaac24682584eea381f3c36a2d4dbd54ee '"+uri_manif+"'"
p = subprocess.Popen(cmdo, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
toc=timeit.default_timer()
for v in out.decode().split("\n"):
if (v != "") and (v.startswith("http")):
res_list.append(v)
times[v] = {}
times[v]['time_to_archivenow'] = toc-tic
times[v]['date'] = strftime("%Y%m%d%H%M%S", gmtime())
# the server keeps those manif_urims locally
try:
r = requests.get(uri_manif, timeout=180, allow_redirects=True)
except:
sys.exit(1)
json_manif = r.json()
urimh = hashlib.md5(json_manif['uri-m'].encode()).hexdigest()
created_dt = datetime.datetime.strptime(json_manif["created"],'%a, %d %b %Y %H:%M:%S GMT').strftime('%Y%m%d%H%M%S')
manifh = json_manif["@id"].split('.ws-dl.cs.odu.edu/manifest/',1)[1].split("/",1)[1].split("/",1)[0]
manif_urim_file = '/data/Fixity/manifests/'+urimh+'/'+created_dt+'-'+manifh+'.urim-manif'
try:
with open(manif_urim_file) as myfile:
list_manif_urims = myfile.read().split('\n')
list_manif_urims = list(filter(lambda a: a != "", list_manif_urims))
except:
list_manif_urims = []
pass;
try:
with open(manif_urim_file+'.time') as data_file:
old_times = json.load(data_file)
except:
old_times = {}
pass;
with open(manif_urim_file, "w") as myfile:
for v in res_list:
if (v != "") and (v not in list_manif_urims) and (v.startswith("http")):
list_manif_urims.append(v)
old_times[v] = times[v]
for v in list_manif_urims:
myfile.write(v+'\n')
with open(manif_urim_file+'.time', 'w') as outfile:
json.dump(old_times, outfile, indent=4)
return res_list
res_manifests = []
json_manif = {}
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
def get_manif_ia(m):
try:
r = requests.get(m, headers=headers, allow_redirects=True, timeout=120)
r.raise_for_status()
return r.json()
except:
return None
def get_manif_cc(m):
try:
r = requests.get(m, headers=headers, allow_redirects=True, timeout=120)
r.raise_for_status()
n_urim = "http://perma-archives.org/warc/" +m.rsplit("/",1)[1]+'/'+r.text.split("perma-archives.org/warc/"+m.rsplit("/",1)[1],1)[1].split('"',1)[0]
r = requests.get(n_urim, headers=headers, allow_redirects=True, timeout=120)
r.raise_for_status()
return r.json()
except:
return None
def get_manif_is(m):
try:
r = requests.get(m, headers=headers, allow_redirects=True, timeout=120)
r.raise_for_status()
return json.loads(r.text.split('word;white-space:pre-wrap;">',1)[1].split("</pre></div>",1)[0])
except:
return None
def get_manif_wc(m):
try:
r_init = requests.get(m, headers=headers, allow_redirects=True, timeout=120)
r = requests.get('http://www.webcitation.org/mainframe.php', timeout=180, cookies=r_init.cookies, allow_redirects=True)
r.raise_for_status()
except:
return None
return r.json()
res_manifests_time = {}
def get_manifests(m):
global res_manifests
global json_manif
global res_manifests_time
if m != "":
s_name = m.split("//",1)[1].split("/",1)[0]
if s_name == 'manifest.ws-dl.cs.odu.edu':
res_manifests.append({m : json_manif})
res_manifests_time[m] = 0.0
elif s_name == 'web.archive.org':
tic_get_manif_ia = timeit.default_timer()
res_manifests.append({m : get_manif_ia(m)})
toc_get_manif_ia = timeit.default_timer()
res_manifests_time[m] = toc_get_manif_ia - tic_get_manif_ia
elif s_name == 'perma.cc':
tic_get_manif_cc = timeit.default_timer()
res_manifests.append({m : get_manif_cc(m)})
toc_get_manif_cc = timeit.default_timer()
res_manifests_time[m] = toc_get_manif_cc - tic_get_manif_cc
elif s_name == 'www.webcitation.org':
tic_get_manif_wc = timeit.default_timer()
res_manifests.append({m : get_manif_wc(m)})
toc_get_manif_wc = timeit.default_timer()
res_manifests_time[m] = toc_get_manif_wc - tic_get_manif_wc
elif s_name == 'archive.is':
tic_get_manif_is = timeit.default_timer()
res_manifests.append({m : get_manif_is(m)})
toc_get_manif_is = timeit.default_timer()
res_manifests_time[m] = toc_get_manif_is - tic_get_manif_is
def verify_atomic(current_manifest):
global json_manif
global res_manifests_time
global res_manifests
res_manifests = []
res_manifests_time = {}
with open(current_manifest) as data_file:
current_manifest_json = json.load(data_file)
with open(current_manifest+'.time') as data_file:
curr_manif_time = json.load(data_file)
verify_json = {
"date": strftime("%Y%m%d%H%M%S", gmtime()),
"current_manif":current_manifest,
"time_in_seconds_to_download_current_memento":curr_manif_time["time_in_seconds_to_download_memento"],
"time_in_seconds_to_generate_current_fixity":curr_manif_time["time_in_seconds_to_download_memento"],
"manif_mementos":{}
}
tic_discover=timeit.default_timer()
mdatetime = current_manifest_json['memento-datetime']
mdatetime_ts = datetime.datetime.strptime(mdatetime, '%a, %d %b %Y %H:%M:%S GMT').strftime('%Y%m%d%H%M%S')
generic_manifest = 'http://manifest.ws-dl.cs.odu.edu/manifest/'+mdatetime_ts+'/'+current_manifest_json['uri-m']
hash_md5_sha256 = current_manifest_json["hash"]
try:
r = requests.get(generic_manifest, allow_redirects=True, timeout=120)
r.raise_for_status()
except Exception as e:
print('Error', str(e))
sys.exit(1)
json_manif = r.json()
urimh = hashlib.md5(json_manif['uri-m'].encode()).hexdigest()
created_dt = datetime.datetime.strptime(json_manif["created"],'%a, %d %b %Y %H:%M:%S GMT').strftime('%Y%m%d%H%M%S')
manifh = json_manif["@id"].split('.ws-dl.cs.odu.edu/manifest/',1)[1].split("/",1)[1].split("/",1)[0]
manif_urim_file = '/data/Fixity/manifests/'+urimh+'/'+created_dt+'-'+manifh+'.urim-manif'
try:
with open(manif_urim_file) as myfile:
list_manif_urims = myfile.read().split('\n')
list_manif_urims = list(filter(lambda a: a != "", list_manif_urims))
except:
list_manif_urims = []
pass;
uri_manif = r.url
list_manif_urims = [uri_manif] + list_manif_urims
toc_discover=timeit.default_timer()
verify_json["time-to-discover_manifests_through_server"] = toc_discover - tic_discover
matched = []
mismatched = []
threads = []
for m in list_manif_urims:
threads.append(Thread(target=get_manifests, args=(m,)))
for th in threads:
th.start()
for th in threads:
th.join()
titit = 0.0
for v, k in res_manifests_time.items():
verify_json["manif_mementos"][v] = {"time-to-download-manifest-memento":k}
if k > titit:
titit = k
verify_json['total-time-to-download-all-manifests-in-parallel'] = titit
for v in res_manifests:
m = list(v)[0]
js = v[m]
if js != None:
if hash_md5_sha256 == js["hash"]:
matched.append(m)
verify_json["manif_mementos"][m]['matched'] = "YES"
else:
verify_json["manif_mementos"][m]['matched'] = "No"
mismatched.append(m)
mismatched = list(filter(lambda a: a != None, mismatched))
verify_json['matched-manifests'] = len(matched)
verify_json['mismatched-manifests'] = len(mismatched)
manif_urim_file_verify = manif_urim_file+'.verify'
with open(manif_urim_file_verify, 'w') as outfile:
json.dump(verify_json, outfile, indent=4)
print(json.dumps(verify_json, indent=4))
return hash_md5_sha256, matched, mismatched
if __name__ == '__main__':
action = sys.argv[1]
if action == 'generate_atomic':
manif_loc = generate_atomic(sys.argv[2])
print(manif_loc)
elif action == 'generate_current':
manif_loc = generate_current(sys.argv[2])
print(manif_loc)
elif action == 'publish_atomic':
generic_uri, trusty_uri = publish_atomic(sys.argv[2])
print(generic_uri)
print(trusty_uri)
elif action == 'disseminate_atomic':
manif_urims = disseminate_atomic(sys.argv[2])
for m in manif_urims:
print(m)
elif action == "disseminate_block":
disseminate_block(sys.argv[2], sys.argv[3].split(","))
elif action == 'verify_atomic':
current_h, matched, mismatched = verify_atomic(sys.argv[2])
print('Current hash:', current_h)
print(len(matched),'matched manifests:')
for m in matched:
print('\t'+str(m))
print(len(mismatched),'mismatched manifests:')
for m in mismatched:
print('\t'+str(m))
elif action == 'verify_without_server':
print("ready but not available yes.")
|
logsclient.py
|
"""This file implements a threaded stream controller to return logs back from
the ray clientserver.
"""
import sys
import logging
import queue
import threading
import time
import grpc
from typing import TYPE_CHECKING
import ray.core.generated.ray_client_pb2 as ray_client_pb2
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
from ray.util.debug import log_once
if TYPE_CHECKING:
from ray.util.client.worker import Worker
logger = logging.getLogger(__name__)
# TODO(barakmich): Running a logger in a logger causes loopback.
# The client logger need its own root -- possibly this one.
# For the moment, let's just not propogate beyond this point.
logger.propagate = False
class LogstreamClient:
def __init__(self, client_worker: "Worker", metadata: list):
"""Initializes a thread-safe log stream over a Ray Client gRPC channel.
Args:
client_worker: The Ray Client worker that manages this client
metadata: metadata to pass to gRPC requests
"""
self.client_worker = client_worker
self._metadata = metadata
self.request_queue = queue.Queue()
self.log_thread = self._start_logthread()
self.log_thread.start()
self.last_req = None
def _start_logthread(self) -> threading.Thread:
return threading.Thread(target=self._log_main, args=(), daemon=True)
def _log_main(self) -> None:
reconnecting = False
while not self.client_worker._in_shutdown:
if reconnecting:
# Refresh queue and retry last request
self.request_queue = queue.Queue()
if self.last_req:
self.request_queue.put(self.last_req)
stub = ray_client_pb2_grpc.RayletLogStreamerStub(self.client_worker.channel)
try:
log_stream = stub.Logstream(
iter(self.request_queue.get, None), metadata=self._metadata
)
except ValueError:
# Trying to use the stub on a cancelled channel will raise
# ValueError. This should only happen when the data client
# is attempting to reset the connection -- sleep and try
# again.
time.sleep(0.5)
continue
try:
for record in log_stream:
if record.level < 0:
self.stdstream(level=record.level, msg=record.msg)
self.log(level=record.level, msg=record.msg)
return
except grpc.RpcError as e:
reconnecting = self._process_rpc_error(e)
if not reconnecting:
return
def _process_rpc_error(self, e: grpc.RpcError) -> bool:
"""
Processes RPC errors that occur while reading from data stream.
Returns True if the error can be recovered from, False otherwise.
"""
if self.client_worker._can_reconnect(e):
if log_once("lost_reconnect_logs"):
logger.warning(
"Log channel is reconnecting. Logs produced while "
"the connection was down can be found on the head "
"node of the cluster in "
"`ray_client_server_[port].out`"
)
logger.debug("Log channel dropped, retrying.")
time.sleep(0.5)
return True
logger.debug("Shutting down log channel.")
if not self.client_worker._in_shutdown:
logger.exception("Unexpected exception:")
return False
def log(self, level: int, msg: str):
"""Log the message from the log stream.
By default, calls logger.log but this can be overridden.
Args:
level: The loglevel of the received log message
msg: The content of the message
"""
logger.log(level=level, msg=msg)
def stdstream(self, level: int, msg: str):
"""Log the stdout/stderr entry from the log stream.
By default, calls print but this can be overridden.
Args:
level: The loglevel of the received log message
msg: The content of the message
"""
print_file = sys.stderr if level == -2 else sys.stdout
print(msg, file=print_file, end="")
def set_logstream_level(self, level: int):
logger.setLevel(level)
req = ray_client_pb2.LogSettingsRequest()
req.enabled = True
req.loglevel = level
self.request_queue.put(req)
self.last_req = req
def close(self) -> None:
self.request_queue.put(None)
if self.log_thread is not None:
self.log_thread.join()
def disable_logs(self) -> None:
req = ray_client_pb2.LogSettingsRequest()
req.enabled = False
self.request_queue.put(req)
self.last_req = req
|
tools.py
|
# Lint-as: python3
"""Utilities for locating and invoking compiler tools."""
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import io
import os
import subprocess
import sys
import textwrap
import threading
from typing import List, Optional, Union
__all__ = [
"find_tool",
"invoke_immediate",
"invoke_pipeline",
"get_tool_path",
"CompilerToolError",
]
# In normal distribution circumstances, each named tool is associated with
# a python module that provides a `get_tool` function for getting its absolute
# path. This dictionary maps the tool name to the module.
_TOOL_MODULE_MAP = {
"iree-import-tflite": "iree.tools.tflite",
"iree-import-xla": "iree.tools.xla",
"iree-import-tf": "iree.tools.tf",
"iree-translate": "iree.tools.core",
}
# Map of tool module to package name as distributed to archives (used for
# error messages).
_TOOL_MODULE_PACKAGES = {
"iree.tools.core": "google-iree-tools-core",
"iree.tools.tf": "google-iree-tools-tf",
"iree.tools.tflite": "google-iree-tools-tflite",
"iree.tools.xla": "google-iree-tools-xla",
}
# Environment variable holding directories to be searched for named tools.
# Delimitted by os.pathsep.
_TOOL_PATH_ENVVAR = "IREE_TOOL_PATH"
class CompilerToolError(Exception):
"""Compiler exception that preserves the command line and error output."""
def __init__(self, process: subprocess.CompletedProcess):
try:
errs = process.stderr.decode("utf-8")
except:
errs = str(process.stderr) # Decode error or other: best we can do.
tool_name = os.path.basename(process.args[0])
super().__init__(f"Error invoking IREE compiler tool {tool_name}\n"
f"Diagnostics:\n{errs}\n\n"
f"Invoked with:\n {' '.join(process.args)}")
def get_tool_path() -> List[str]:
"""Returns list of paths to search for tools."""
list_str = os.environ.get(_TOOL_PATH_ENVVAR)
if not list_str:
return []
return list_str.split(os.pathsep)
def find_tool(exe_name: str) -> str:
"""Finds a tool by its (extension-less) executable name.
Args:
exe_name: The name of the executable (extension-less).
Returns:
An absolute path to the tool.
Raises:
ValueError: If the tool is not known or not found.
"""
if exe_name not in _TOOL_MODULE_MAP:
raise ValueError(f"IREE compiler tool '{exe_name}' is not a known tool")
# First search an explicit tool path.
tool_path = get_tool_path()
for path_entry in tool_path:
if not path_entry:
continue
candidate_exe = os.path.join(path_entry, exe_name)
if os.path.isfile(candidate_exe) and os.access(candidate_exe, os.X_OK):
return candidate_exe
# Attempt to load the tool module.
tool_module_name = _TOOL_MODULE_MAP[exe_name]
tool_module_package = _TOOL_MODULE_PACKAGES[tool_module_name]
try:
tool_module = importlib.import_module(tool_module_name)
except ModuleNotFoundError:
raise ValueError(
f"IREE compiler tool '{exe_name}' is not installed (it should have been "
f"found in the python module '{tool_module_name}', typically installed "
f"via the package {tool_module_package}).\n\n"
f"Either install the package or set the {_TOOL_PATH_ENVVAR} environment "
f"variable to contain the path of the tool executable "
f"(current {_TOOL_PATH_ENVVAR} = {repr(tool_path)})") from None
# Ask the module for its tool.
candidate_exe = tool_module.get_tool(exe_name)
if (not candidate_exe or not os.path.isfile(candidate_exe) or
not os.access(candidate_exe, os.X_OK)):
raise ValueError(
f"IREE compiler tool '{exe_name}' was located in module "
f"'{tool_module_name}' but the file was not found or not executable: "
f"{candidate_exe}")
return candidate_exe
def invoke_immediate(command_line: List[str],
*,
input_file: Optional[bytes] = None,
immediate_input=None):
"""Invokes an immediate command.
This is separate from invoke_pipeline as it is simpler and supports more
complex input redirection, using recommended facilities for sub-processes
(less magic).
Note that this differs from the usual way of using subprocess.run or
subprocess.Popen().communicate() because we need to pump all of the error
streams individually and only pump pipes not connected to a different stage.
Uses threads to pump everything that is required.
"""
run_args = {}
input_file_handle = None
stderr_handle = sys.stderr
try:
# Redirect input.
if input_file is not None:
input_file_handle = open(input_file, "rb")
run_args["stdin"] = input_file_handle
elif immediate_input is not None:
run_args["input"] = immediate_input
# Capture output.
# TODO(#4131) python>=3.7: Use capture_output=True.
run_args["stdout"] = subprocess.PIPE
run_args["stderr"] = subprocess.PIPE
process = subprocess.run(command_line, **run_args)
if process.returncode != 0:
raise CompilerToolError(process)
# Emit stderr contents.
_write_binary_stderr(stderr_handle, process.stderr)
return process.stdout
finally:
if input_file_handle:
input_file_handle.close()
def invoke_pipeline(command_lines: List[List[str]], immediate_input=None):
"""Invoke a pipeline of commands.
The first stage of the pipeline will have its stdin set to DEVNULL and each
subsequent stdin will derive from the prior stdout. The final stdout will
be accumulated and returned. All stderr contents are accumulated and printed
to stderr on completion or the first failing stage of the pipeline will have
an exception raised with its stderr output.
"""
stages = []
pipeline_input = (subprocess.DEVNULL
if immediate_input is None else subprocess.PIPE)
prev_out = pipeline_input
stderr_handle = sys.stderr
# Create all stages.
for i in range(len(command_lines)):
command_line = command_lines[i]
popen_args = {
"stdin": prev_out,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
}
process = subprocess.Popen(command_line, **popen_args)
prev_out = process.stdout
capture_output = (i == (len(command_lines) - 1))
stages.append(_PipelineStage(process, capture_output))
# Start stages.
for stage in stages:
stage.start()
# Pump input.
pipe_success = True
if immediate_input is not None:
try:
pipe_success = False
stages[0].process.stdin.write(immediate_input)
pipe_success = True
finally:
stages[0].process.stdin.close()
# Join.
for stage in stages:
stage.join()
# Check for errors.
for stage in stages:
assert stage.completed
if stage.completed.returncode != 0:
raise CompilerToolError(stage.completed)
# Broken pipe.
if not pipe_success:
raise CompilerToolError(stages[0].completed)
# Print any stderr output.
for stage in stages:
_write_binary_stderr(stderr_handle, stage.errs)
return stages[-1].outs
class _PipelineStage(threading.Thread):
"""Wraps a process and pumps its handles, waiting for completion."""
def __init__(self, process, capture_output):
super().__init__()
self.process = process
self.capture_output = capture_output
self.completed: Optional[subprocess.CompletedProcess] = None
self.outs = None
self.errs = None
def pump_stderr(self):
self.errs = self.process.stderr.read()
def pump_stdout(self):
self.outs = self.process.stdout.read()
def run(self):
stderr_thread = threading.Thread(target=self.pump_stderr)
stderr_thread.start()
if self.capture_output:
stdout_thread = threading.Thread(target=self.pump_stdout)
stdout_thread.start()
self.process.wait()
stderr_thread.join()
if self.capture_output:
stdout_thread.join()
self.completed = subprocess.CompletedProcess(self.process.args,
self.process.returncode,
self.outs, self.errs)
self.process.stderr.close()
self.process.stdout.close()
def _write_binary_stderr(out_handle, contents):
# Fast-paths buffered text-io (which stderr is by default) while allowing
# full decode for non buffered and binary io.
if hasattr(out_handle, "buffer"):
out_handle.buffer.write(contents)
elif isinstance(out_handle, io.TextIOBase):
out_handle.write(contents.decode("utf-8"))
else:
out_handle.write(contents)
|
serial_test.py
|
#
# DAPLink Interface Firmware
# Copyright (c) 2009-2016, ARM Limited, All Rights Reserved
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
import Queue
import functools
import serial
import threading
import time
ERROR_TIMEOUT_SECONDS = 10.0
def _same(d1, d2):
d1 = bytearray(d1)
d2 = bytearray(d2)
for i in range(min(len(d1), len(d2))):
if d1[i] != d2[i]:
return False
if len(d1) != len(d2):
return False
return True
# http://digital.ni.com/public.nsf/allkb/D37754FFA24F7C3F86256706005B9BE7
standard_baud = [
9600,
14400,
19200,
28800,
38400,
#56000, #TODO - uncomment once daplink-validation supports 56000 on nrf5x
57600,
115200,
]
timing_test_baud = standard_baud[3:]
def calc_timeout(length, baud):
"""Calculate a timeout given the data and baudrate
Positional arguments:
length - size of data to be sent
baud - baud rate to send data
Calculate a reasonable timeout given the supplied parameters.
This function adds slightly more time then is needed, to accont
for latency and various configurations.
"""
return 12 * float(length) / float(baud) + 0.2
class SerialTester(object):
"""Helper object to buffer serial and setup baud"""
def __init__(self, port):
self.raw_serial = serial.Serial(port)
self.raw_serial.write_timeout = ERROR_TIMEOUT_SECONDS
self._queue = Queue.Queue()
self._write_thread = threading.Thread(target=self._serial_main)
self._write_thread.start()
def __enter__(self):
return self
def __exit__(self, exception_type, value, traceback):
self._queue.put(None)
self._write_thread.join(ERROR_TIMEOUT_SECONDS)
assert not self._write_thread.isAlive(), "Thread join failed"
self.raw_serial.close()
self.raw_serial = None
return False
def new_session_with_baud(self, baud, parent_test):
"""Start a new session by restarting target and setting baud"""
test_info = parent_test.create_subtest("Set Baud")
# Set baud to 115200
self.raw_serial.baudrate = 115200
self.raw_serial.timeout = 1.0
# Reset the target
self.raw_serial.sendBreak()
# Wait until the target is initialized
expected_resp = "{init}"
resp = self.read(len(expected_resp))
if not _same(resp, expected_resp):
test_info.failure("Fail on init: %s" % resp)
return False
# Change baudrate to that of the first test
command = "{baud:%i}" % baud
self.write(command)
resp = self.read(len(command))
if not _same(resp, command):
test_info.failure("Fail on baud command: %s" % resp)
return False
# Update baud of local serial port
self.raw_serial.baudrate = baud
# Read the response indicating that the baudrate
# on the target has changed
expected_resp = "{change}"
resp = self.read(len(expected_resp))
if not _same(resp, expected_resp):
test_info.failure("Fail on baud change %s" % resp)
return False
# Set default timeout
self.raw_serial.timeout = ERROR_TIMEOUT_SECONDS
# Success
return True
def read(self, length):
"""Read serial data"""
return self.raw_serial.read(length)
def write(self, data):
"""Write serial port data in the background"""
func = functools.partial(self.raw_serial.write, data[:])
self._queue.put(func)
def set_read_timeout(self, timeout):
"""Set timeout for read operations"""
assert self._queue.empty(), "Queue must be empty to change timeout"
self.raw_serial.timeout = timeout
def flush(self):
"""Wait for all writes to complete"""
self._queue.join()
assert self._queue.empty()
def _serial_main(self):
"""Write helper thread"""
while True:
task = self._queue.get(True)
if task is None:
self._queue.task_done()
# End of processing is an empty task
break
try:
task()
except serial.SerialTimeoutException:
pass
self._queue.task_done()
def test_serial(workspace, parent_test):
"""Test the serial port endpoint
Requirements:
-daplink-validation must be loaded for the target.
Positional arguments:
port - the serial port to open as a string
Return:
True if the test passed, False otherwise
"""
test_info = parent_test.create_subtest("Serial test")
board = workspace.board
port = board.get_serial_port()
test_info.info("Testing serial port %s" % port)
# Note: OSX sends a break command when a serial port is closed.
# To avoid problems while testing keep the serial port open the
# whole time. Use the property 'baudrate' to change the baud
# instead of opening a new instance.
with SerialTester(port) as sp:
# Generate a 4KB block of dummy data
# and test supported baud rates
test_data = [i for i in range(0, 256)] * 4 * 4
test_data = str(bytearray(test_data))
for baud in standard_baud:
test_info.info("Testing baud %i" % baud)
success = sp.new_session_with_baud(baud, test_info)
if not success:
test_info.failure("Unable to setup session")
continue
# Perform test
sp.write(test_data)
resp = sp.read(len(test_data))
resp = bytearray(resp)
if _same(test_data, resp):
test_info.info("Pass")
else:
test_info.failure("Fail on baud %s" % baud)
# Timing stress test - send data at critical points
# in time like right as the transmitter is turned off
# ------------------
# Test sequence
# 1. Send a block of data (vary size for the test)
# 2. Wait until 1 byte is read back
# 3. Write 1 byte
# 4. Read back all data
test_data = [i for i in range(0, 256)] * 4 * 4
test_data = str(bytearray(test_data))
for baud in timing_test_baud:
test_info.info("Timing test baud %i" % baud)
success = sp.new_session_with_baud(baud, test_info)
if not success:
test_info.failure("Unable to setup session")
continue
test_pass = True
for data_size in range(1, 10):
data = test_data[0:data_size + 1]
for _ in range(0, 1000):
resp = bytearray()
sp.write(data[0:data_size])
resp += sp.read(1)
sp.write(data[-1:])
resp += sp.read(data_size)
sp.flush()
if not _same(data, resp):
test_pass = False
test_info.info("fail size - %s" % data_size)
break
if test_pass:
test_info.info("Pass")
else:
test_info.failure("Fail on timing test with baud %s"
% baud)
# Setting change smoke test - reconfigure settings while
# in the middle of a transfer and verify nothing bad
test_data = [i for i in range(0, 128)]
test_data = str(bytearray(test_data))
sp.new_session_with_baud(115200, test_info)
sp.set_read_timeout(0)
for baud in standard_baud:
sp.raw_serial.baudrate = baud
sp.write(test_data)
xfer_time = float(len(test_data) * 10) / float(baud)
time.sleep(xfer_time / 2)
# Discard data
sp.read(1024)
# Read any leftover data
sp.flush()
sp.raw_serial.baudrate = 115200
sp.set_read_timeout(1.0)
sp.read(128 * len(standard_baud))
# Generate a 8 KB block of dummy data
# and test a large block transfer
test_data = [i for i in range(0, 256)] * 4 * 8
test_data = str(bytearray(test_data))
sp.new_session_with_baud(115200, test_info)
sp.write(test_data)
resp = sp.read(len(test_data))
if _same(resp, test_data):
test_info.info("Block test passed")
else:
test_info.failure("Block test failed")
# Refresh to check for asserts
board.refresh(test_info)
|
nrf802154_sniffer.py
|
#!/usr/bin/env python
# Copyright (c) 2019, Nordic Semiconductor ASA
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form, except as embedded into a Nordic
# Semiconductor ASA integrated circuit in a product or a software update for
# such product, must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of Nordic Semiconductor ASA nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# 4. This software, with or without modification, must only be used with a
# Nordic Semiconductor ASA integrated circuit.
#
# 5. Any software provided in binary form under this license must not be reverse
# engineered, decompiled, modified and/or disassembled.
#
# THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os
is_standalone = __name__ == '__main__'
if sys.version[0] == '2':
# py2 support
import Queue as Queue
else:
import queue as Queue
if is_standalone:
sys.path.insert(0, os.getcwd())
import re
import signal
import struct
import threading
import time
import logging
from argparse import ArgumentParser
from binascii import a2b_hex
from serial import Serial, serialutil
from serial.tools.list_ports import comports
class Nrf802154Sniffer(object):
# Various options for pcap files: http://www.tcpdump.org/linktypes.html
DLT_USER0 = 147
DLT_IEEE802_15_4_NOFCS = 230
DLT_IEEE802_15_4_TAP = 283
# USB device identification.
NORDICSEMI_VID = 0x1915
SNIFFER_802154_PID = 0x154A
# Helpers for Wireshark argument parsing.
CTRL_ARG_CHANNEL = 0
CTRL_ARG_LOGGER = 6
# Pattern for packets being printed over serial.
RCV_REGEX = 'received:\s+([0-9a-fA-F]+)\s+power:\s+(-?\d+)\s+lqi:\s+(\d+)\s+time:\s+(-?\d+)'
TIMER_MAX = 2**32
def __init__(self, connection_open_timeout=None):
self.serial = None
self.serial_queue = Queue.Queue()
self.running = threading.Event()
self.setup_done = threading.Event()
self.setup_done.clear()
self.logger = logging.getLogger(__name__)
self.dev = None
self.channel = None
self.dlt = None
self.threads = []
self.connection_open_timeout = connection_open_timeout
# Time correction variables.
self.first_local_timestamp = None
self.first_sniffer_timestamp = None
def correct_time(self, sniffer_timestamp):
"""
Function responsible for correcting the time reported by the sniffer.
The sniffer timer has 1us resolution and will overflow after
approximately every 72 minutes of operation.
For that reason it is necessary to use the local system timer to find the absolute
time frame, within which the packet has arrived.
This method should work as long as the MCU and PC timers don't drift
from each other by a value of approximately 36 minutes.
:param sniffer_timestamp: sniffer timestamp in microseconds
:return: absolute sniffer timestamp in microseconds
"""
if self.first_local_timestamp is None:
# First received packets - set the reference time and convert to microseconds.
self.first_local_timestamp = int(time.time()*(10**6))
self.first_sniffer_timestamp = sniffer_timestamp
return self.first_local_timestamp
else:
local_timestamp = int(time.time()*(10**6))
time_difference = local_timestamp - self.first_local_timestamp
# Absolute sniffer timestamp calculated locally
absolute_sniffer_timestamp = self.first_sniffer_timestamp + time_difference
overflow_count = absolute_sniffer_timestamp // self.TIMER_MAX
timestamp_modulo = absolute_sniffer_timestamp % self.TIMER_MAX
# Handle the corner case - sniffer timer is about to overflow.
# Due to drift the locally calculated absolute timestamp reports that the overflow
# has already occurred. If the modulo of calculated time with added value of half timer period
# is smaller than the sniffer timestamp, then we decrement the overflow counter.
#
# The second corner case is when the sniffer timestamp just overflowed and the value is close to zero.
# Locally calculated timestamp reports that the overflow hasn't yet occurred. We ensure that this is the
# case by testing if the sniffer timestamp is less than modulo of calculated timestamp substracted by
# half of timer period. In this case we increment overflow count.
if (timestamp_modulo + self.TIMER_MAX//2) < sniffer_timestamp:
overflow_count -= 1
elif (timestamp_modulo - self.TIMER_MAX//2) > sniffer_timestamp:
overflow_count += 1
return self.first_local_timestamp - self.first_sniffer_timestamp + sniffer_timestamp + overflow_count * self.TIMER_MAX
def stop_sig_handler(self, *args, **kwargs):
"""
Function responsible for stopping the sniffer firmware and closing all threads.
"""
# Let's wait with closing afer we're sure that the sniffer started. Protects us
# from very short tests (NOTE: the serial_reader has a delayed start).
while self.running.is_set() and not self.setup_done.is_set():
time.sleep(1)
if self.running.is_set():
self.serial_queue.put(b'')
self.serial_queue.put(b'sleep')
self.running.clear()
alive_threads = []
for thread in self.threads:
try:
thread.join(timeout=10)
if thread.is_alive() is True:
self.logger.error("Failed to stop thread {}".format(thread.name))
alive_threads.append(thread)
except RuntimeError:
# TODO: This may be called from one of threads from thread list - architecture problem
pass
self.threads = alive_threads
else:
self.logger.warning("Asked to stop {} while it was already stopped".format(self))
if self.serial is not None:
if self.serial.is_open is True:
self.serial.close()
self.serial = None
@staticmethod
def extcap_interfaces():
"""
Wireshark-related method that returns configuration options.
:return: string with wireshark-compatible information
"""
res = []
res.append("extcap {version=0.7.2}{help=https://github.com/NordicSemiconductor/nRF-Sniffer-for-802.15.4}{display=nRF Sniffer for 802.15.4}")
for port in comports():
if port.vid == Nrf802154Sniffer.NORDICSEMI_VID and port.pid == Nrf802154Sniffer.SNIFFER_802154_PID:
res.append ("interface {value=%s}{display=nRF Sniffer for 802.15.4}" % (port.device,) )
res.append("control {number=%d}{type=button}{role=logger}{display=Log}{tooltip=Show capture log}" % Nrf802154Sniffer.CTRL_ARG_LOGGER)
return "\n".join(res)
@staticmethod
def extcap_dlts():
"""
Wireshark-related method that returns configuration options.
:return: string with wireshark-compatible information
"""
res = []
res.append("dlt {number=%d}{name=IEEE802_15_4_NOFCS}{display=IEEE 802.15.4 without FCS}" % Nrf802154Sniffer.DLT_IEEE802_15_4_NOFCS)
res.append("dlt {number=%d}{name=IEEE802_15_4_TAP}{display=IEEE 802.15.4 TAP}" % Nrf802154Sniffer.DLT_IEEE802_15_4_TAP)
res.append("dlt {number=%d}{name=USER0}{display=User 0 (DLT=147)}" % Nrf802154Sniffer.DLT_USER0)
return "\n".join(res)
@staticmethod
def extcap_config(option):
"""
Wireshark-related method that returns configuration options.
:return: string with wireshark-compatible information
"""
args = []
values = []
res =[]
args.append ( (0, '--channel', 'Channel', 'IEEE 802.15.4 channel', 'selector', '{required=true}{default=11}') )
args.append ( (1, '--metadata', 'Out-Of-Band meta-data',
'Packet header containing out-of-band meta-data for channel, RSSI and LQI',
'selector', '{default=none}') )
if len(option) <= 0:
for arg in args:
res.append("arg {number=%d}{call=%s}{display=%s}{tooltip=%s}{type=%s}%s" % arg)
values = values + [ (0, "%d" % i, "%d" % i, "true" if i == 11 else "false" ) for i in range(11,27) ]
values.append ( (1, "none", "None", "true") )
values.append ( (1, "ieee802154-tap", "IEEE 802.15.4 TAP", "false") )
values.append ( (1, "user", "Custom Lua dissector", "false") )
for value in values:
res.append("value {arg=%d}{value=%s}{display=%s}{default=%s}" % value)
return "\n".join(res)
def pcap_header(self):
"""
Returns pcap header to be written into pcap file.
"""
header = bytearray()
header += struct.pack('<L', int ('a1b2c3d4', 16 ))
header += struct.pack('<H', 2 ) # Pcap Major Version
header += struct.pack('<H', 4 ) # Pcap Minor Version
header += struct.pack('<I', int(0)) # Timezone
header += struct.pack('<I', int(0)) # Accurancy of timestamps
header += struct.pack('<L', int ('000000ff', 16 )) # Max Length of capture frame
header += struct.pack('<L', self.dlt) # DLT
return header
@staticmethod
def pcap_packet(frame, dlt, channel, rssi, lqi, timestamp):
"""
Creates pcap packet to be seved in pcap file.
"""
pcap = bytearray()
caplength = len(frame)
if dlt == Nrf802154Sniffer.DLT_IEEE802_15_4_TAP:
caplength += 28
elif dlt == Nrf802154Sniffer.DLT_USER0:
caplength += 6
pcap += struct.pack('<L', timestamp // 1000000 ) # Timestamp seconds
pcap += struct.pack('<L', timestamp % 1000000 ) # Timestamp microseconds
pcap += struct.pack('<L', caplength ) # Length captured
pcap += struct.pack('<L', caplength ) # Length in frame
if dlt == Nrf802154Sniffer.DLT_IEEE802_15_4_TAP:
# Append TLVs according to 802.15.4 TAP specification:
# https://github.com/jkcko/ieee802.15.4-tap
pcap += struct.pack('<HH', 0, 28)
pcap += struct.pack('<HHf', 1, 4, rssi)
pcap += struct.pack('<HHHH', 3, 3, channel, 0)
pcap += struct.pack('<HHI', 10, 1, lqi)
elif dlt == Nrf802154Sniffer.DLT_USER0:
pcap += struct.pack('<H', channel)
pcap += struct.pack('<h', rssi)
pcap += struct.pack('<H', lqi)
pcap += frame
return pcap
@staticmethod
def control_read(fn):
"""
Method used for reading wireshark command.
"""
try:
header = fn.read(6)
sp, _, length, arg, typ = struct.unpack('>sBHBB', header)
if length > 2:
payload = fn.read(length - 2)
else:
payload = ''
return arg, typ, payload
except:
return None, None, None
def control_reader(self, fifo):
"""
Thread responsible for reading wireshark commands (read from fifo).
Related to not-yet-implemented wireshark toolbar features.
"""
with open(fifo, 'rb', 0) as fn:
arg = 0
while arg != None:
arg, typ, payload = Nrf802154Sniffer.control_read(fn)
self.stop_sig_handler()
def is_running(self):
return self.serial is not None and self.serial.is_open and self.setup_done.is_set()
def serial_write(self):
"""
Function responsible for sending commands to serial port.
"""
command = self.serial_queue.get(block=True, timeout=1)
try:
self.serial.write(command + b'\r\n')
self.serial.write(b'\r\n')
except IOError:
self.logger.error("Cannot write to {}".format(self))
self.running.clear()
def serial_writer(self):
"""
Thread responsible for sending commands to serial port.
"""
while self.running.is_set():
try:
self.serial_write()
except Queue.Empty:
pass
# Write final commands and break out.
while True:
try:
self.serial_write()
except Queue.Empty:
break
def serial_reader(self, dev, channel, queue):
"""
Thread responsible for reading from serial port, parsing the output and storing parsed packets into queue.
"""
time.sleep(2)
timeout = time.time() + self.connection_open_timeout if self.connection_open_timeout else None
while self.running.is_set():
try:
self.serial = Serial(dev, timeout=1, exclusive=True)
break
except Exception as e:
if timeout and time.time() > timeout:
self.running.clear()
raise Exception(
"Could not open serial connection to sniffer before timeout of {} seconds".format(
self.connection_open_timeout))
self.logger.debug("Can't open serial device: {} reason: {}".format(dev, e))
time.sleep(0.5)
try:
self.serial.reset_input_buffer()
self.serial.reset_output_buffer()
init_cmd = []
init_cmd.append(b'')
init_cmd.append(b'sleep')
init_cmd.append(b'channel ' + bytes(str(channel).encode()))
for cmd in init_cmd:
self.serial_queue.put(cmd)
# Function serial_write appends twice '\r\n' to each command, so we have to calculate that for the echo.
init_res = self.serial.read(len(b"".join(c + b"\r\n\r\n" for c in init_cmd)))
if not all(cmd.decode() in init_res.decode() for cmd in init_cmd):
msg = "{} did not reply properly to setup commands. Please re-plug the device and make sure firmware is correct. " \
"Recieved: {}\n".format(self, init_res)
if self.serial.is_open is True:
self.serial.close()
raise Exception(msg)
self.serial_queue.put(b'receive')
self.setup_done.set()
buf = b''
while self.running.is_set():
ch = self.serial.read()
if ch == b'':
continue
elif ch != b'\n' and ch != '\n':
buf += ch
else:
m = re.search(self.RCV_REGEX, str(buf))
if m:
packet = a2b_hex(m.group(1)[:-4])
rssi = int(m.group(2))
lqi = int(m.group(3))
timestamp = int(m.group(4)) & 0xffffffff
channel = int(channel)
queue.put(self.pcap_packet(packet, self.dlt, channel, rssi, lqi, self.correct_time(timestamp)))
buf = b''
except (serialutil.SerialException, serialutil.SerialTimeoutException) as e:
self.logger.error("Cannot communicate with serial device: {} reason: {}".format(dev, e))
finally:
self.setup_done.set() # In case it wasn't set before.
if self.running.is_set(): # Another precaution.
self.stop_sig_handler()
def fifo_writer(self, fifo, queue):
"""
Thread responsible for writing packets into pcap file/fifo from queue.
"""
with open(fifo, 'wb', 0 ) as fh:
fh.write(self.pcap_header())
fh.flush()
while self.running.is_set():
try:
packet = queue.get(block=True, timeout=1)
try:
fh.write(packet)
fh.flush()
except IOError:
pass
except Queue.Empty:
pass
def extcap_capture(self, fifo, dev, channel, metadata=None, control_in=None, control_out=None):
"""
Main method responsible for starting all other threads. In case of standalone execution this method will block
until SIGTERM/SIGINT and/or stop_sig_handler disables the loop via self.running event.
"""
if len(self.threads):
raise RuntimeError("Old threads were not joined properly")
packet_queue = Queue.Queue()
self.channel = channel
self.dev = dev
self.running.set()
if metadata == "ieee802154-tap":
# For Wireshark 3.0 and later
self.dlt = Nrf802154Sniffer.DLT_IEEE802_15_4_TAP
elif metadata == "user":
# For Wireshark 2.4 and 2.6
self.dlt = Nrf802154Sniffer.DLT_USER0
else:
self.dlt = Nrf802154Sniffer.DLT_IEEE802_15_4_NOFCS
# TODO: Add toolbar with channel selector (channel per interface?)
if control_in:
self.threads.append(threading.Thread(target=self.control_reader, args=(control_in,)))
self.threads.append(threading.Thread(target=self.serial_reader, args=(self.dev, self.channel, packet_queue), name="serial_reader"))
self.threads.append(threading.Thread(target=self.serial_writer, name="serial_writer"))
self.threads.append(threading.Thread(target=self.fifo_writer, args=(fifo, packet_queue), name="fifo_writer"))
for thread in self.threads:
thread.start()
while is_standalone and self.running.is_set():
time.sleep(1)
@staticmethod
def parse_args():
"""
Helper methods to make the standalone script work in console and wireshark.
"""
parser = ArgumentParser(description="Extcap program for the nRF Sniffer for 802.15.4")
parser.add_argument("--extcap-interfaces", help="Provide a list of interfaces to capture from", action="store_true")
parser.add_argument("--extcap-interface", help="Provide the interface to capture from")
parser.add_argument("--extcap-dlts", help="Provide a list of dlts for the given interface", action="store_true")
parser.add_argument("--extcap-config", help="Provide a list of configurations for the given interface", action="store_true")
parser.add_argument("--extcap-reload-option", help="Reload elements for the given option")
parser.add_argument("--capture", help="Start the capture routine", action="store_true" )
parser.add_argument("--fifo", help="Use together with capture to provide the fifo to dump data to")
parser.add_argument("--extcap-capture-filter", help="Used together with capture to provide a capture filter")
parser.add_argument("--extcap-control-in", help="Used to get control messages from toolbar")
parser.add_argument("--extcap-control-out", help="Used to send control messages to toolbar")
parser.add_argument("--channel", help="IEEE 802.15.4 capture channel [11-26]")
parser.add_argument("--metadata", help="Meta-Data type to use for captured packets")
result, unknown = parser.parse_known_args()
if result.capture and not result.extcap_interface:
parser.error("--extcap-interface is required if --capture is present")
return result
def __str__(self):
return "{} ({}) channel {}".format(type(self).__name__, self.dev, self.channel)
def __repr__(self):
return self.__str__()
if is_standalone:
args = Nrf802154Sniffer.parse_args()
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', level=logging.INFO)
sniffer_comm = Nrf802154Sniffer()
if args.extcap_interfaces:
print(sniffer_comm.extcap_interfaces())
if args.extcap_dlts:
print(sniffer_comm.extcap_dlts())
if args.extcap_config:
if args.extcap_reload_option and len(args.extcap_reload_option) > 0:
option = args.extcap_reload_option
else:
option = ''
print(sniffer_comm.extcap_config(option))
if args.capture and args.fifo:
channel = args.channel if args.channel else 11
signal.signal(signal.SIGINT, sniffer_comm.stop_sig_handler)
signal.signal(signal.SIGTERM, sniffer_comm.stop_sig_handler)
try:
sniffer_comm.extcap_capture(args.fifo, args.extcap_interface, channel, args.metadata, args.extcap_control_in, args.extcap_control_out)
except KeyboardInterrupt as e:
sniffer_comm.stop_sig_handler()
|
online.py
|
'''
Online link spider test
'''
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import next
import unittest
from unittest import TestCase
import time
import datetime
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import scrapy
import redis
from redis.exceptions import ConnectionError
import json
import threading, time
from crawling.spiders.link_spider import LinkSpider
from scrapy.utils.project import get_project_settings
from twisted.internet import reactor
from scrapy.crawler import CrawlerRunner
from kafka import KafkaConsumer
class CustomSpider(LinkSpider):
'''
Overridden link spider for testing
'''
name = "test-spider"
class TestLinkSpider(TestCase):
example_feed = "{\"allowed_domains\":null,\"allow_regex\":null,\""\
"crawlid\":\"abc12345\",\"url\":\"http://dmoztools.net/\",\"expires\":0,\""\
"ts\":1461549923.7956631184,\"priority\":1,\"deny_regex\":null,\""\
"cookie\":null,\"attrs\":null,\"appid\":\"test\",\"spiderid\":\""\
"test-spider\",\"useragent\":null,\"deny_extensions\":null,\"maxdepth\":0, \"domain_max_pages\":0}"
example_feed_max = "{\"allowed_domains\":[\"dmoztools.net\"],\"allow_regex\":null,\""\
"crawlid\":\"abc1234567\",\"url\":\"http://dmoztools.net/\",\"expires\":0,\""\
"ts\":1461549923.7956631184,\"priority\":1,\"deny_regex\":null,\""\
"cookie\":null,\"attrs\":null,\"appid\":\"test\",\"spiderid\":\""\
"test-spider\",\"useragent\":null,\"deny_extensions\":null,\"maxdepth\":3, \"domain_max_pages\":4}"
def setUp(self):
self.settings = get_project_settings()
self.settings.set('KAFKA_TOPIC_PREFIX', "demo_test")
# set up redis
self.redis_conn = redis.Redis(host=self.settings['REDIS_HOST'],
port=self.settings['REDIS_PORT'],
db=self.settings['REDIS_DB'],
password=self.settings['REDIS_PASSWORD'],
decode_responses=True)
try:
self.redis_conn.info()
except ConnectionError:
print("Could not connect to Redis")
# plugin is essential to functionality
sys.exit(1)
# clear out older test keys if any
keys = self.redis_conn.keys("test-spider:*")
for key in keys:
self.redis_conn.delete(key)
# set up kafka to consumer potential result
self.consumer = KafkaConsumer(
"demo_test.crawled_firehose",
bootstrap_servers=self.settings['KAFKA_HOSTS'],
group_id="demo-id",
auto_commit_interval_ms=10,
consumer_timeout_ms=5000,
auto_offset_reset='earliest',
value_deserializer=lambda m: m.decode('utf-8')
)
time.sleep(1)
def test_crawler_process(self):
runner = CrawlerRunner(self.settings)
d = runner.crawl(CustomSpider)
d.addBoth(lambda _: reactor.stop())
# add crawl to redis
key = "test-spider:dmoztools.net:queue"
self.redis_conn.zadd(key, {self.example_feed: -80})
self.redis_conn.zadd(key, {self.example_feed_max: -90})
# run the spider, give 20 seconds to see the urls and crawl them
# and send to kafka. Then we kill the reactor
total_time = 60
def thread_func():
time.sleep(total_time)
reactor.stop()
thread = threading.Thread(target=thread_func)
thread.start()
reactor.run()
message_count = 0
max_message_count = 0
start_time = datetime.datetime.now()
# give the consumer X seconds to consume all pages
while (datetime.datetime.now() - start_time).total_seconds() < total_time:
try:
m = None
m = next(self.consumer)
except StopIteration as e:
pass
if m is None:
pass
else:
the_dict = json.loads(m.value)
if the_dict is not None:
if the_dict['appid'] == 'test' \
and the_dict['crawlid'] == 'abc1234567':
max_message_count += 1
elif the_dict['appid'] == 'test' \
and the_dict['crawlid'] == 'abc12345':
message_count += 1
self.assertEqual(message_count, 1)
self.assertEqual(max_message_count, 4)
def tearDown(self):
keys = self.redis_conn.keys('stats:crawler:*:test-spider:*')
keys = keys + self.redis_conn.keys('test-spider:*')
for key in keys:
self.redis_conn.delete(key)
# if for some reason the tests fail, we end up falling behind on
# the consumer
for m in self.consumer:
pass
self.consumer.close()
if __name__ == '__main__':
unittest.main()
|
main.py
|
import addict
import os
import schedule
from mesoshttp.client import MesosClient
from multiprocessing import Process, Queue
from multiprocessing.queues import Empty, Full
from scheduler import config, espa, logger, task, util
log = logger.get_logger()
def get_products_to_process(cfg, espa, work_list):
max_scheduled = cfg.get('product_scheduled_max')
products = cfg.get('product_frequency')
request_count = cfg.get('product_request_count')
if espa.mesos_tasks_disabled():
log.debug("mesos tasks disabled, not requesting products to process")
return True
# qsize() is approximate https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Queue.qsize
if work_list.qsize() < max_scheduled:
# pull the first item off the product types list
product_type = products.pop(0)
# get products to process for the product_type
units = espa.get_products_to_process([product_type], request_count).get("products")
# put that product type at the back of the list
products.append(product_type)
# if units are emtpy...
if not units:
log.info("No work to do for product_type: {}".format(product_type))
else:
log.info("Work to do for product_type: {}, count: {}, appending to work list".format(product_type, len(units)))
for u in units:
try:
# add the units of work to the workList
work_list.put_nowait(u)
# update retrieved products in espa to scheduled status
espa.set_to_scheduled(u)
except Full:
log.error("work_list queue is full!")
except Exception as e:
log.error("problem scheduling a task! unit: {} \n error: {}".format(u, e))
else:
log.info("Max number of tasks scheduled, not requesting more products to process")
return True
def scheduled_tasks(cfg, espa_api, work_list):
product_frequency = cfg.get('product_request_frequency')
handler_frequency = cfg.get('handle_orders_frequency')
log.debug("calling get_products_to_process with frequency: {} minutes".format(product_frequency))
log.debug("calling handle_orders with frequency: {} minutes".format(handler_frequency))
schedule.every(product_frequency).minutes.do(get_products_to_process, cfg=cfg, espa=espa_api, work_list=work_list)
schedule.every(handler_frequency).minutes.do(espa_api.handle_orders)
while True:
schedule.run_pending()
class ESPAFramework(object):
def __init__(self, cfg, espa_api, worklist):
master = cfg.get('mesos_master')
principal = cfg.get('mesos_principal')
secret = cfg.get('mesos_secret')
self.workList = worklist
self.runningList = {}
self.max_cpus = cfg.get('max_cpu')
self.required_cpus = cfg.get('task_cpu')
self.required_memory = cfg.get('task_mem')
self.required_disk = cfg.get('task_disk')
self.task_image = cfg.get('task_image')
self.refuse_seconds = cfg.get('offer_refuse_seconds')
self.request_count = cfg.get('product_request_count')
self.products = cfg.get('product_frequency')
self.healthy_states = ["TASK_STAGING", "TASK_STARTING", "TASK_RUNNING", "TASK_FINISHED"]
self.espa = espa_api
self.cfg = cfg
self.client = MesosClient(mesos_urls=[master], frameworkName='ESPA Mesos Framework')
self.client.verify = False
self.client.set_credentials(principal, secret)
self.client.on(MesosClient.SUBSCRIBED, self.subscribed)
self.client.on(MesosClient.OFFERS, self.offer_received)
self.client.on(MesosClient.UPDATE, self.status_update)
# put some work on the queue
get_products_to_process(cfg, self.espa, self.workList)
def _getResource(self, res, name):
for r in res:
if r['name'] == name:
return r['scalar']['value']
return 0.0
def _updateResource(self, res, name, value):
if value <= 0:
return
for r in res:
if r['name'] == name:
r['scalar']['value'] -= value
return
def subscribed(self, driver):
log.warning('SUBSCRIBED')
self.driver = driver
def core_limit_reached(self):
running_count = len(self.runningList)
task_core_count = self.required_cpus
core_utilization = running_count * task_core_count
resp = False
log.debug("Number of cores being used: {}".format(core_utilization))
if core_utilization >= self.max_cpus:
log.debug("Max number of cores being used. Max = {}".format(self.max_cpus))
resp = True
return resp
def accept_offer(self, offer):
accept = True
resources = offer.get('resources')
if self.required_cpus != 0:
cpu = self._getResource(resources, "cpus")
if self.required_cpus > cpu:
accept = False
if self.required_memory != 0:
mem = self._getResource(resources, "mem")
if self.required_memory > mem:
accept = False
if self.required_disk != 0:
disk = self._getResource(resources, "disk")
if self.required_disk > disk:
accept = False
if(accept == True):
self._updateResource(resources, "cpus", self.required_cpus)
self._updateResource(resources, "mem", self.required_memory)
self._updateResource(resources, "disk", self.required_disk)
return accept
def decline_offer(self, offer):
options = {'filters': {'refuse_seconds': self.refuse_seconds}}
log.debug("declining offer: {} with options: {}".format(offer, options))
try:
offer.decline(options)
except Exception as error:
log.error("Exception encountered declining offer: {}, error: {}".format(offer, error))
raise
return True
def offer_received(self, offers):
response = addict.Dict()
response.offers.length = len(offers)
response.offers.accepted = 0
log.debug("Received {} new offers...".format(response.offers.length))
# check to see if Mesos tasks are enabled
if self.espa.mesos_tasks_disabled():
# decline the offers to free up the resources
log.debug("mesos tasks disabled, declining {} offers".format(len(offers)))
for offer in offers:
self.decline_offer(offer)
response.tasks.enabled = False
return response
else:
response.tasks.enabled = True
# check to see if core limit has been reached
if self.core_limit_reached():
# decline the offers to free up the resources
log.debug("Core utilization limit reached, declining {} offers".format(len(offers)))
for offer in offers:
self.decline_offer(offer)
response.tasks.enabled = False
return response
else:
response.tasks.enabled = True
for offer in offers:
mesos_offer = offer.get_offer()
if self.accept_offer(mesos_offer):
log.debug("Acceptable offer, checking for work to do")
try:
work = self.workList.get(False) # will raise multiprocessing.Empty if no objects present
orderid = work.get('orderid')
scene = work.get('scene')
task_id = "{}_@@@_{}".format(orderid, scene)
new_task = task.build(task_id, mesos_offer, self.task_image, self.required_cpus,
self.required_memory, self.required_disk, work, self.cfg)
log.debug("New Task definition: {}".format(new_task))
offer.accept([new_task])
self.espa.update_status(scene, orderid, 'tasked')
response.offers.accepted += 1
except Empty:
log.debug("Work queue is empty, declining offer")
self.decline_offer(offer)
except Exception as e:
log.error("Exception creating new task. offer: {}, exception: {}\n declining offer".format(offer, e))
self.decline_offer(offer)
else:
log.debug("Unacceptable offer, declining")
self.decline_offer(offer)
log.debug("resourceOffer response: {}".format(response))
return response
def status_update(self, update):
# possible state values
# http://mesos.apache.org/api/latest/java/org/apache/mesos/Protos.TaskState.html
task_id = update['status']['task_id']['value']
orderid, scene = task_id.split("_@@@_")
state = update['status']['state']
response = addict.Dict()
response.task_id = task_id
response.state = state
if state in self.healthy_states:
log.debug("status update for: {} new status: {}".format(task_id, state))
response.status = "healthy"
if state == "TASK_RUNNING":
response.list.name = "running"
if task_id not in self.runningList:
self.runningList[task_id] = util.right_now()
response.list.status = "new"
else:
response.list.status = "current"
if state == "TASK_FINISHED":
try:
self.runningList.__delitem__(task_id)
except KeyError:
log.debug("Received TASK_FINISHED update for {}, which wasn't in the runningList".format(task_id))
else: # something abnormal happened
log.error("abnormal task state for: {}, full update: {}".format(task_id, update))
response.status = "unhealthy"
self.espa.set_scene_error(scene, orderid, update)
if task_id in self.runningList:
self.runningList.__delitem__(task_id)
return response
def main():
cfg = config.config()
espa_api = espa.api_connect(cfg)
work_list = Queue() # multiprocessing Queue
framework = ESPAFramework(cfg, espa_api, work_list)
# Scheduled requests for espa processing work, and handle-orders call
scheduled_process = Process(target=scheduled_tasks, args=(cfg, espa_api, work_list,))
try:
scheduled_process.start()
framework.client.register()
except Exception as err:
log.error("espa scheduler encountered an error, killing scheduled processes. tearing down framework. error: {}".format(err))
framework.client.tearDown()
scheduled_process.kill()
if __name__ == '__main__':
main()
|
web_server.py
|
#!/usr/bin/python3
# file: multiprocess_web_server.py
# Created by Guang at 19-7-19
# description:
# *-* coding:utf8 *-*
import multiprocessing
import socket
import re
import time
from frame import frame_app
class WSGIServer(object):
def __init__(self, ip, port):
# 1.创建套接字
self.listen_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listen_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 2.绑定ip和port
self.local_addr = (ip, port)
self.listen_server.bind(self.local_addr)
# 3.主动变被动
self.listen_server.listen(128)
def service_client(self, new_socket):
"""为这个客户端返回数据"""
# 1.接收浏览器发送过来的请求, 即HTTP请求
# GET / HTTP/1.1
request = new_socket.recv(1024).decode('utf-8')
# print("-" * 100)
# print(request)
request_lines = request.splitlines() # 当客户端主动关闭, 会收到空字符串并解阻塞; 这里会生成空列表
if not request_lines:
return
file_name = ""
ret = re.match(r'[^/]+(/[^ ]*)', request_lines[0])
if ret:
file_name = ret.group(1)
# print("*" * 50, file_name)
if file_name == "/":
file_name = "/index.html"
# 2.返回HTTP格式的数据
# 假设以 .py 结尾的资源是 动态资源
if not file_name.endswith(".py"):
try:
f = open("./html" + file_name, 'rb')
except Exception as e:
response = "HTTP/1.1 404 NOT FOUND\r\n"
response += "\r\n"
response += "----------file not found --------"
new_socket.send(response.encode("utf-8"))
else:
html_content = f.read()
f.close()
# 2.1 准备发送给浏览器的数据 -- header
response = "HTTP/1.1 200 OK\r\n"
response += "\r\n"
# 2.2 准备发送给浏览器的数据 -- body
# response += “哈哈哈哈”
# 将response header 发送给浏览器
new_socket.send(response.encode("utf-8"))
# 将response body 发送给服务器
new_socket.send(html_content)
else:
# 动态资源加载
print(file_name)
header = "HTTP/1.1 200 OK\r\n"
env = dict()
# env {"FILE_PATH": "/index.py"}
env["FILE_PATH"] = file_name
body = frame_app.application(env, self.set_reponse_header)
for temp in self.response_headers:
print(temp)
header += "%s: %s\r\n" % temp
header += "\r\n"
response = header + body
new_socket.send(response.encode("utf-8"))
# 这里必须再关闭一次, 底层文件描述符
new_socket.close()
def set_reponse_header(self, status, response_headers):
self.status = status
self.response_headers = [("Server", "mini_web_server/1.0")]
self.response_headers += response_headers
def runserver(self):
while True:
# 4.等待新客户端的连接
new_socket, client_addr = self.listen_server.accept()
# 5.为这个客户端服务
p = multiprocessing.Process(target=self.service_client, args=(new_socket, ))
p.start()
# 进程类实现的并发服务器,必须要在这里也new_socket.close一次; 原因:文件描述符 fd
new_socket.close()
# 关闭监听套接字
self.listen_server.close()
if __name__ == '__main__':
ip = ''
port = 8888
wsgi_server = WSGIServer(ip, port)
wsgi_server.runserver()
|
drs.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import sys
import signal
import zmq
import conf
import logging
import atexit
import random
import json
from time import sleep
from math import sqrt
from colorsys import rgb_to_hsv
from urllib import urlencode
from urllib2 import urlopen, URLError
from collections import deque
from threading import Thread
from enum import Enum
from ev3dev import *
from ev3dev_utils import *
from graph import *
#from graph import indexof, contains, indexof_many, get_min_dest_direction, filter_graph, add_unknown_edges_to_graph, explored
# [TODO] are all the mails correct?
__authors__ = ["Marco Squarcina <squarcina at dais.unive.it>",
"Enrico Steffinlongo <enrico.steffinlongo at unive.it>",
"Francesco Di Giacomo <fdigiacom at gmail.com>",
"Michele Schiavinato <mschiavi at dais.unive.it>",
"Alan Del Piccolo <alan.delpiccolo at gmail.com>",
"Filippo Cavallin <840031 at stud.unive.it>",
"Eyasu Zemene Mequanint <eyasu201011 at gmail.com>"]
__status__ = "Development"
# global variables
# instances of motors
motor_left = large_motor(OUTPUT_D)
motor_right = large_motor(OUTPUT_B)
# instances of sensors
col_sensor = color_sensor()
ir_sensor = infrared_sensor()
## other variables
## [MERGE] moved in update()
#ir_buffer = [[deque(), deque()] for _ in range(4)]
#ir_medians = [[None, None] for _ in range(4)]
# mean between the value of the line and the plane
mid_value = (conf.line_value + conf.plane_value)/2
# queue of the last samples taken by the color sensor
last_hsvs = deque()
# zmq context definitions
context = zmq.Context()
# possible states
State = Enum('State', ('begin explore_node_init explore_node '
'explore_edge_init explore_edge_before_marker '
'explore_edge explore_edge_after_marker escaping_init '
'escaping waiting_for_clearance moving_init '
'moving_before_marker moving moving_after_marker idling'))
# function definitions
def message_server():
sock = context.socket(zmq.REP)
sock.bind("tcp://0.0.0.0:{}".format(conf.robot_port))
# log incoming messages and reply back
# [TODO] define a poison pill to kill this thread
while True:
message = sock.recv()
sock.send("Echoed: [{}]".format(message))
logging.info(message)
def reset(signal = None, frame = None):
"""Stop the motors. [FIXME] is it possible to break?"""
motor_left.reset()
motor_right.reset()
if signal:
sys.exit(1)
def stop_motors():
motor_left.stop()
motor_right.stop()
def start_motors():
motor_left.run()
motor_right.run()
def wait_launch():
"""Block until the game is started (click play on the web interface)."""
url_to_check = "http://{}:{}/started".format(
conf.web_server_ip, conf.web_server_port)
started = False
while not started:
try:
f = urlopen(url_to_check)
started = True if f.read() == '1' else False
except URLError:
logging.error('Unable to connect to the web server, proceeding')
break
sleep(0.5)
def json_translate(data):
return {int(k): v for k, v in data.iteritems()}
def greet():
"""Say hello before starting the protocol."""
# set the second parameter to False for non-blocking call
sound.speak("Hello, I am the robot number {}".format(conf.robot_id), True)
def follow_line(value, pulses = conf.base_pulses):
"""Adjust the speed of the two motors to keep up with the line tracking."""
start_motors()
error = value - mid_value
correction = int(conf.proportional_const * error)
motor_left.pulses_per_second_setpoint = pulses + correction
motor_right.pulses_per_second_setpoint = pulses - correction
def initialize():
# explicitly set the color sensor in RGB mode
col_sensor.mode = 'RGB-RAW'
# explicitly set the infrared sensor in SEEK mode
ir_sensor.mode = infrared_sensor.mode_ir_seeker
# prepare the motors
motor_left.regulation_mode = motor.mode_on
motor_right.regulation_mode = motor.mode_on
# set motors ready to run
start_motors()
def get_hsv_colors():
"""Return the Hue, Saturation, Value triple of the sampled color assuming
that the color sensor is in RAW-RGB mode."""
hsv = rgb_to_hsv(*[col_sensor.value(i)/1022 for i in range(col_sensor.num_values())])
if len(last_hsvs) >= conf.n_col_samples:
last_hsvs.popleft()
last_hsvs.append(hsv)
return hsv
'''
def avoid_collision():
# query the ir sensor in SEEK mode to avoid collisions
seek = [ir_sensor.value(i) for i in range(ir_sensor.num_values())]
for robot_id in range(4):
# remove the heads
if len(ir_buffer[robot_id][0]) >= conf.n_ir_samples:
ir_buffer[robot_id][0].popleft()
ir_buffer[robot_id][1].popleft()
# update the angle
ir_buffer[robot_id][0].append(seek[robot_id*2])
# update the distance
ir_buffer[robot_id][1].append(abs(seek[robot_id*2+1]))
# recompute the median
ir_medians[robot_id][0] = median(ir_buffer[robot_id][0])
ir_medians[robot_id][1] = median(ir_buffer[robot_id][1])
if ir_medians[robot_id][1] < 20:
# [TODO] handle collisions
pass
'''
#[MERGE] first part of avoid_collision used to sample the ir values
def update_ir_queue(ir_buffer):
# query the ir sensor in SEEK mode to avoid collisions
seek = [ir_sensor.value(i) for i in range(ir_sensor.num_values())]
for robot_id in range(4):
# remove the heads
if len(ir_buffer[robot_id][0]) >= conf.n_ir_samples:
ir_buffer[robot_id][0].popleft()
ir_buffer[robot_id][1].popleft()
# update the angle
ir_buffer[robot_id][0].append(seek[robot_id*2])
# update the distance
ir_buffer[robot_id][1].append(abs(seek[robot_id*2+1]))
#[MERGE] second part of avoid_collision used to check medians of ir_buffer to check for inbound robots
def get_seen_robots(ir_buffer):
ir_medians = [[None, None] for _ in range(4)]
for robot_id in range(4):
# recompute the median
ir_medians[robot_id][0] = median(ir_buffer[robot_id][0])
ir_medians[robot_id][1] = median(ir_buffer[robot_id][1])
#[MERGE] added minimum distance in conf.py
seen_bots = [i for i in range(4) if (ir_medians[i][1] <= conf.collision_distance and i != conf.robot_id)]
#seen_bots = indexof_many(lambda d: d <= conf.collision_distance, ir_medians[1])
#seen_bot = filter(lambda id: id != conf.robot_id, seen_bots)
assert len(seen_bots) < 2, "WTF? We are colliding with more than one bot??? Consider better invariants! IROS!"
return seen_bots
def identify_color(hsv_color):
"""Return the string id of the color closer to the provide HSV triple."""
# compute the distances among the acquired color and all known colors
distances = {k : color_distance(v, hsv_color) for k, v in conf.hsv_colors.iteritems()}
# return the closest one
return min(distances, key=distances.get)
def on_border():
"""Use the saturation mean to see if we fall on a border."""
saturation = mean([hsv[1] for hsv in last_hsvs])
return saturation > conf.border_saturation_thr
'''
def choose_random_direction(edges):
direction = random.choice([i for i in range(4) if edges[i]])
return direction
'''
def move_to_edge(current_orientation, new_orientation):
rotate(current_orientation, (new_orientation - current_orientation) % 4)
def rotate(starting_direction, direction = -1):
"""Rotate within a node.
This function can be used to identify all the out edges starting from the
current node or, when a direction is provided, to perform a rotation until
the given direction is reached. Return the list of discovered edges in the
first case, else nothing."""
print("Rotate args: starting_direction: {}, direction: {}".format(starting_direction, direction))
# if the direction is 0 we are already in the right place, there's nothing
# to do
if direction == 0:
return
# reset position
reset_motor_position()
# start with a queue made only of white values
for _ in range(conf.n_col_samples):
last_hsvs.popleft()
last_hsvs.append((0, 0, conf.plane_value))
# ... and obviously assume that the previous color is white
prev_color = 'white'
color = 'white'
# list of edges to be returned in case we are in discovery mode
edges = [False for _ in range(4)]
# start rotating at half of the maximum allowed speed
motor_left.pulses_per_second_setpoint = conf.slow_pulses
motor_right.pulses_per_second_setpoint = -conf.slow_pulses
while True:
# leave if a 360 degrees rotation has been done
if motor_left.position > conf.full_rotation_degrees:
break
# update the queue of sampled color values
get_hsv_colors()
# update the current color according to the sampled value
mean_value = median([hsv[2] for hsv in last_hsvs])
if mean_value < conf.line_value + 0.05:
color = 'black'
if mean_value > conf.plane_value - 0.05:
color = 'white'
# from white we just fallen on a black line
if prev_color != color and color == 'black':
#cur_direction = get_orientation(0)
#print("cur_direction: {}".format(cur_direction))
cur_direction = int(round(motor_left.position / (conf.full_rotation_degrees//4)))
if cur_direction == direction:
# arrived at destination, it's time to leave ;)
break
elif cur_direction <= 3:
# keep trace of the new edge just found
edges[cur_direction] = True
logging.info("FOUND EDGE")
elif motor_left.position > conf.full_rotation_degrees:
break
else:
# this is the 5th edge, we are back in the starting position on
# a node with 4 edges, we should stop here
break
prev_color = color
new_edges = [edges[(i-starting_direction) % 4] for i in range(4)]
print("starting_direction: {}, edges: {}, new_edges: {}".format(starting_direction, edges, new_edges))
return new_edges if direction == -1 else None
def cross_bordered_area(marker=True):
"""Cross a bordered colored region and return the color."""
color = conf.Color.unknown
# assume that we are on a border
local_state = 'border'
if not marker:
# if we are on a node just go straight until the end is reached because
# we have already sampled the color in the previous marker
local_state = 'sampled'
run_for(motor_left, ever=True, power=conf.slow_pulses)
run_for(motor_right, ever=True, power=conf.slow_pulses)
count = 0
while True:
# sample color
get_hsv_colors()
if local_state == 'border':
# halt!!!
#stop_motors()
# slightly move forward so that we are exactly over the color
# (run_for is not a blocking call, pay attention we need to sleep)
run_for(motor_left, power=conf.slow_pulses, degrees=27)
run_for(motor_right, power=conf.slow_pulses, degrees=27)
sleep(3)
#stop_motors()
#start_motors()
logging.info("Start sampling")
local_state = 'inside'
# start moving again
#run_for(motor_left, ever=True, power=conf.slow_pulses//2)
#run_for(motor_right, ever=True, power=conf.slow_pulses//2)
elif local_state == 'inside':
# time to pick up some samples to identify the color
count += 1
if count >= conf.n_col_samples:
mean_hsv_color = median(list(last_hsvs))
color = conf.Color[identify_color(mean_hsv_color)]
local_state = 'sampled'
logging.info([color, mean_hsv_color])
run_for(motor_left, power=conf.slow_pulses, ever=True)
run_for(motor_right, power=conf.slow_pulses, ever=True)
logging.info("Esco")
sleep(2)
sleep(0.01)
elif local_state == 'sampled':
# determine the end of the bordered area using the saturation
if not on_border():
return color
else:
raise Exception("Uh?")
def turn_around():
"""Change direction to avoid collisions and tell if a marker is found."""
marker_found = False
reset_motor_position()
# start with a queue made only of white values
for _ in range(conf.n_col_samples):
last_hsvs.popleft()
last_hsvs.append((0, 0, conf.plane_value))
while True:
get_hsv_colors()
# check if we are on a marker, this is kind of a code duplication, but
# it's much faster than computing the mean of the same list two times
# in a row
_, saturation, value = mean(last_hsvs)
if saturation > conf.border_saturation_thr:
marker_found = True
if motor_left.position > conf.full_rotation_degrees//2:
# we are performing the rotation over the marker
logging.info("1: {}".format(motor_left.position))
break
elif motor_left.position > conf.full_rotation_degrees*0.38 and value < mid_value:
# we performed the turn_around and we are back on track
logging.info("2: {}".format(motor_left.position))
break
elif motor_left.position < conf.full_rotation_degrees*0.75:
# clockwise rotation
run_for(motor_left, power=conf.slow_pulses, ever=True)
run_for(motor_right, power=-conf.slow_pulses, ever=True)
#motor_left.pulses_per_second_setpoint = conf.slow_pulses
#motor_right.pulses_per_second_setpoint = -conf.slow_pulses
else:
raise Exception("Lost the track")
while on_border():
get_hsv_colors()
motor_left.pulses_per_second_setpoint = conf.slow_pulses
motor_right.pulses_per_second_setpoint = conf.slow_pulses
return marker_found
def retire_from_marker():
run_for(motor_left, power=conf.slow_pulses, degrees=-150)
run_for(motor_right, power=conf.slow_pulses, degrees=-150)
sleep(4)
def mean(data):
"""Compute the mean of the provided data."""
n = len(data)
try:
return [float(sum(l))/len(l) for l in zip(*data)]
except TypeError:
return sum(data)/n
def median(data):
"""Compute the median of the provided data, used for ir smoothing."""
data = sorted(data)
n = len(data)
if n == 0:
raise Exception('No median for an empty list')
if n%2 == 1:
return data[n//2]
else:
i = n//2
return (data[i-1] + data[i])/2
def color_distance(a, b):
"""Compute the euclidean distance of 2 values.
This function also accounts for the heuristic corrections of the two
colors. Color near to red can be recognized with hue component near to 0 or
1 (due to cylindrical hsv color space). On the other hand, the height of
the color sensor wrt the surface involves heavily on the value component,
so we reduce the value by a constant multiplicative factor."""
# red correction on hue component and value reduction
a, b = [(0 if (x[0] >= 0.9) else x[0], x[1], x[2]*0.3) for x in a, b]
# euclidean distance of all components (hue, saturation, value)
return sqrt(sum((a - b)**2 for a, b in zip(a, b)))
def get_orientation(old_orientation):
delta_motors = motor_left.position - motor_right.position
orientation = int(round(delta_motors / conf.turn_rotation_difference) + old_orientation) % 4
print(delta_motors, old_orientation, orientation)
return orientation
# [TODO] the implementation of this trivial function is left to the willing programmer (Ok, I'll help you! :>")
def solve_collision(seen_robots, current_node, orientation):
if conf.robot_id != 0:
graph[current_node][orientation] = None
# [TODO] the server should also tell us if we need to explore the node (since
# it's a new undiscovered node) or not
# [TODO] when dest_orient and edge_len are -1, we just discard these values and
# check if the bot can enter the node
# [MERGE] it gives to the bot even the list of all bots positions
# [MERGE] the permission to enter in such node can be deduced using other_position
# returned values are: (the updated graph, the position of all bots, the permission to enter in destination)
def marker_update(destination_node, destination_orientation, edge_length, exploring):
data = {'robot': conf.robot_id,
'destination_node': destination_node.value,
'destination_orientation': destination_orientation,
'edge_length': edge_length,
'exploring': exploring}
url_to_check = "http://{}:{}/marker_update".format(
conf.web_server_ip, conf.web_server_port)
response_list = []
sent = False
while not sent:
try:
f = urlopen(url_to_check, urlencode(data))
response_list = json.loads(f.read())
sent = True
except URLError:
logging.error('Unable to connect to the web server, proceeding')
sleep(0.5)
response_list[0] = json_translate(response_list[0])
return response_list
# return updated graph and bot_positions
def outupdate(graph, current_node, direction):
edges = [1 if e != None else 0 for e in graph[current_node.value]]
data = {'robot': conf.robot_id,
'direction': direction,
'n': edges[0],
'e': edges[1],
's': edges[2],
'w': edges[3]}
url_to_check = "http://{}:{}/outupdate".format(
conf.web_server_ip, conf.web_server_port)
response_list = []
sent = False
while not sent:
try:
f = urlopen(url_to_check, urlencode(data))
response_list = json.loads(f.read())
sent = True
except URLError:
logging.error('Unable to connect to the web server, proceeding')
sleep(0.5)
response_list[0] = json_translate(response_list[0])
return response_list
def reset_motor_position():
motor_left.position = 0
motor_right.position = 0
def get_motor_position():
return (motor_left.position + motor_right.position) / 2
def get_complementary_orientation(orientation):
"""Used to find the outcoming orientation given the incoming one (e.g. N ->
S; E-> W; S -> N; W -> E)."""
return (orientation + 2) % 4
def update(debug=False):
"""OMG our huge state machine!!!!!!! x_X."""
state = State.begin
if debug:
state = State.explore_edge
old_state = State.begin
orientation = conf.robot_id
current_node = Color.unknown
# current edge is a 3-tuple: starting node, starting orientation,
# destination node (or unknown)
current_edge = None
has_to_explore = False
graph = dict()
# list containing the last visited node of each robot (even itself)
bot_positions = []
# list of last sampled ir measurements
ir_buffer = [[deque(), deque()] for _ in range(4)]
# tuple of the states where the bot should follow the line
moving_states = (State.begin, State.explore_edge_before_marker,
State.explore_edge, State.explore_edge_after_marker,
State.escaping, State.moving_before_marker, State.moving,
State.moving_after_marker)
while True:
if state != old_state:
logging.info("{} -> {}".format(old_state, state))
old_state = state
#logging.info(state)
# we sample every tick the ir values even if it is not used in current
# state
update_ir_queue(ir_buffer)
# update the global color queue every tick as before
hue, saturation, value = get_hsv_colors()
# if we are in a moving state we follow the line, this is correct since
# all the high level moving calls are blocking
if state in moving_states:
follow_line(value)
# BEGIN OF THE STATE MACHINE UPDATE
# Begin before a marker, update the vertex infos.
# NEXT_STATE: EXPLORE_EDGE_AFTER_MARKER.
if state == State.begin:
if on_border():
stop_motors()
orientation = get_orientation(orientation)
current_node = cross_bordered_area(marker=True)
stop_motors()
response = marker_update(current_node, get_complementary_orientation(orientation), -1, True)
if len(response) == 0:
raise Exception('Empty list returned by marker_update')
graph, bot_positions, has_to_explore, _ = response
state = State.explore_edge_after_marker
# Receive the updated graph, identify the node, explore the node if it is unexplored
# by rotating around and counting the edges under the color sensor.
# NEXT STATE: EXPLORE_NODE
elif state == State.explore_node_init:
cross_bordered_area(marker=False)
sleep(0.5)
if has_to_explore:
has_to_explore = False
edges = rotate(orientation)
# local graph updated. Modifications commited to the server in
# outupdate contained in explore_edge_init
graph = add_unknown_edges_to_graph(graph, current_node.value, edges)
state = State.explore_node
# Find the direction to reach the closes unexplored edge. If the edge is adjacent to
# the current node then start exploring it, otherwise move to the node in the minimum path.
# If there is no unexplored reachable edge switch to idle mode.
# NEXT STATES: IDLING, MOVING_INIT, EXPLORE_EDGE_INIT
elif state == State.explore_node:
filtered_graph = filter_graph(graph, conf.robot_id, bot_positions)
directions = get_min_dest_direction(filtered_graph, current_node.value)
if directions == None:
state = State.idling
else:
dest = random.choice(directions)
current_edge = (current_node.value, dest[1], dest[0])
print("Dest: {}".format(dest))
if dest[0] == Color.unknown.value:
state = State.explore_edge_init
else:
state = State.moving_init
# Update the graph infos on the server when exiting the node. Rotate
# and align with the edge to explore.
# Start moving on the edge.
# NEXT_STATE: EXPLORE_EDGE_BEFORE_MARKER
elif state == State.explore_edge_init:
sleep(1)
# [TODO] not merged... update position and direction of the bot,
# update the graph on the server. Maybe gets a new graph
stop_motors()
graph, bot_positions = outupdate(graph, current_node, current_edge[1])
start_motors()
print("current edge {}".format(current_edge))
move_to_edge(orientation, current_edge[1])
# always update orientation on turns
orientation = current_edge[1]
state = State.explore_edge_before_marker
#START!!!
# Try to spot a robot. If one exists solve the collision (in this case
# the robot always has the right of way) and start waiting until the
# other robot has turned around. If the position is on a marker and no
# robot has been spotted move past the marker.
# NEXT STATE: EXPLORE_EDGE
elif state == State.explore_edge_before_marker:
seen_robots = get_seen_robots(ir_buffer)
if len(seen_robots) > 0:
stop_motors()
#solve_collision(seen_robots, current_node, orientation)
state = State.waiting_for_clearance # corrosive husking candling pathos
if on_border():
stop_motors()
sleep(1)
cross_bordered_area(marker=False)
reset_motor_position()
state = State.explore_edge
# Try to spot a robot. If one exists solve the collision and starts
# escaping. If no collision exists and it reachers a marker see if the
# destination is locked. If it is locked update the edge infos and
# escape. Otherwise lock the destination and unlock the starting node.
# NEXT_STATES: ESCAPING_INIT, EXPLORE_EDGE_AFTER_MARKER
elif state == State.explore_edge:
seen_robots = get_seen_robots(ir_buffer)
if len(seen_robots) > 0:
stop_motors()
solve_collision(seen_robots, current_node, orientation)
state = State.escaping_init
elif on_border():
# we reached the end of the edge
stop_motors()
edge_length = int(get_motor_position())
orientation = get_orientation(orientation)
marker_color = cross_bordered_area(marker=True)
stop_motors()
response = marker_update(marker_color, get_complementary_orientation(orientation), edge_length, True)
if len(response) == 0:
raise Exception('Empty list returned by marker_update')
graph, bot_positions, has_to_explore, can_enter = response
if can_enter:
current_node = marker_color
state = State.explore_edge_after_marker
else:
retire_from_marker()
state = State.escaping_init
# If we find a node we release the lock on the current edge and we
# start the node exploration.
# NEXT_STATE: EXPLORE_NODE_INIT
elif state == State.explore_edge_after_marker:
if on_border():
state = State.explore_node_init
# Start turning. If there is a waiting mate we notify that the way is
# clear. If we find a marker while turning we simply go back and we run
# the standard escape code.
# NEXT_STATES: EXPLORE_EDGE_AFTER_MARKER, ESCAPING
elif state == State.escaping_init:
start_motors()
found_marker = turn_around()
stop_motors()
# always update orientation on turns
orientation = get_complementary_orientation(orientation)
#if waiting_mate != None:
# notify_clearance(waiting_mate) # to be removed if waiting_for_clearance only sleeps for some seconds
if found_marker:
state = State.explore_edge_after_marker
else:
state = State.escaping
print(state)
# We wait until we are on a marker. We identify it and we change state
# to notify we are past the marker.
# NEXT_STATE: EXPLORE_EDGE_AFTER_MARKER
elif state == State.escaping:
if on_border():
stop_motors()
# we have just visited this marker, so even if we are on a
# marker we want to get past of it
cross_bordered_area(marker=False)
# we do not check locks because it's not released yet
state = State.explore_edge_after_marker
# We update graph infos. We move towards the edge.
# NEXT_STATE: MOVING_BEFORE_MARKER
elif state == State.moving_init:
stop_motors()
graph, bot_positions = outupdate(graph, current_node, current_edge[1])
start_motors()
move_to_edge(orientation, current_edge[1])
orientation = current_edge[1]
state = State.moving_before_marker
# We wait until we are on the marker. We start moving.
# NEXT_STATE: MOVING
elif state == State.moving_before_marker:
if on_border():
# we have just visited this marker, so even if we are on a
# marker we want to get past of it
cross_bordered_area(marker=False)
reset_motor_position()
state = State.moving
# If we are on a node we start exploring it. If we are on a marker and
# it is lock, we escape. Otherwise we release lock just as for the edge
# exploration.
# NEXT_STATES: ESCAPING_INIT, EXPLORE_EDGE_AFTER_MARKER
elif state == State.moving:
if on_border():
stop_motors()
orientation = get_orientation(orientation)
marker_color = cross_bordered_area(marker = True)
#assert marker_color.value == current_edge[2], 'Unexpected color marker {} found, expecting color {}'.format(marker_color, current_edge[2])
stop_motors()
# using edge_update to notify to the server. The server can
# discard the information, or use the position to correct
# weight [TODO] we'll decide later on
response = marker_update(marker_color, -1, -1, False)
if len(response) == 0:
raise Exception('Empty list returned by marker_update')
graph, bot_positions, _, can_enter = response
if can_enter:
current_node = marker_color
state = State.explore_edge_after_marker
else:
retire_from_marker()
state = State.escaping_init
# We sleep for 5 seconds (measured rotation time) and we start the
# exploration
# NEXT_STATE: EXPLORE_EDGE_BEFORE_MARKER
elif state == State.waiting_for_clearance:
stop_motors()
t = time.time()
while time.time() - t < 10:
update_ir_queue(ir_buffer)
sleep(0.01)
state = State.explore_edge_before_marker
# We wait for 5 seconds and then we poll the node to see if we can
# reach an unexplored edge.
# NEXT_STATE: EXPLORE_NODE
elif state == State.idling:
stop_motors()
t = time.time()
while time.time() - t < 5:
update_ir_queue(ir_buffer)
sleep(0.01)
state = State.explore_node
# Enrico did something wrong because my code is always bug free.
else:
raise Exception("Undefined state...")
def main():
# register anti-panic handlers
signal.signal(signal.SIGINT, reset)
atexit.register(reset)
# configure how logging should be done
logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] (%(threadName)-10s) %(message)s', )
greet()
# parse command line options
if len(sys.argv) > 1 and sys.argv[1] == '--wait':
# wait the protocol to be started
wait_launch()
# create a thread for reading incoming zmq messages
server = Thread(name='MessageServer', target=message_server)
server.setDaemon(True)
server.start()
# [TODO] create the socket for sending messages
initialize()
update()
reset()
# [TODO] join the MessageServer thread
sys.exit(0)
if __name__ == '__main__':
main()
|
runner.py
|
#!/usr/bin/env python3
# Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""This is the Emscripten test runner. To run some tests, specify which tests
you want, for example
tests/runner asm1.test_hello_world
There are many options for which tests to run and how to run them. For details,
see
http://kripken.github.io/emscripten-site/docs/getting_started/test-suite.html
"""
# XXX Use EMTEST_ALL_ENGINES=1 in the env to test all engines!
from enum import Enum
from functools import wraps
from subprocess import PIPE, STDOUT
import argparse
import atexit
import contextlib
import difflib
import fnmatch
import glob
import hashlib
import json
import logging
import math
import multiprocessing
import operator
import os
import random
import shlex
import shutil
import string
import subprocess
import stat
import sys
import tempfile
import time
import unittest
import webbrowser
from http.server import HTTPServer, SimpleHTTPRequestHandler
from urllib.parse import unquote, unquote_plus
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(__rootpath__)
import clang_native
import jsrun
import parallel_testsuite
from jsrun import NON_ZERO
from tools.shared import TEMP_DIR, EMCC, EMXX, DEBUG, EMCONFIGURE, EMCMAKE
from tools.shared import EMSCRIPTEN_TEMP_DIR
from tools.shared import EM_BUILD_VERBOSE
from tools.shared import get_canonical_temp_dir, try_delete
from tools.utils import MACOS, WINDOWS
from tools import shared, line_endings, building, config
def path_from_root(*pathelems):
"""Construct a path relative to the emscripten root directory."""
return os.path.join(__rootpath__, *pathelems)
sys.path.append(path_from_root('third_party/websockify'))
logger = logging.getLogger("runner")
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser. Setting '0' as the browser disables running a browser (but we still
# see tests compile)
EMTEST_BROWSER = os.getenv('EMTEST_BROWSER')
EMTEST_DETECT_TEMPFILE_LEAKS = int(os.getenv('EMTEST_DETECT_TEMPFILE_LEAKS', '0'))
# TODO(sbc): Remove this check for the legacy name once its been around for a while.
assert 'EM_SAVE_DIR' not in os.environ, "Please use EMTEST_SAVE_DIR instead of EM_SAVE_DIR"
EMTEST_SAVE_DIR = int(os.getenv('EMTEST_SAVE_DIR', '0'))
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = os.getenv('EMTEST_ALL_ENGINES')
EMTEST_SKIP_SLOW = os.getenv('EMTEST_SKIP_SLOW')
EMTEST_LACKS_NATIVE_CLANG = os.getenv('EMTEST_LACKS_NATIVE_CLANG')
EMTEST_VERBOSE = int(os.getenv('EMTEST_VERBOSE', '0')) or shared.DEBUG
TEST_ROOT = path_from_root('tests')
WEBIDL_BINDER = shared.bat_suffix(path_from_root('tools', 'webidl_binder'))
if EMTEST_VERBOSE:
logging.root.setLevel(logging.DEBUG)
def delete_contents(pathname):
for entry in os.listdir(pathname):
try_delete(os.path.join(pathname, entry))
def test_file(*path_components):
"""Construct a path relative to the emscripten "tests" directory."""
return os.path.join(TEST_ROOT, *path_components)
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
@wraps(func)
def decorated(self, *args, **kwargs):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self, *args, **kwargs)
return decorated
def needs_dylink(func):
assert callable(func)
@wraps(func)
def decorated(self):
self.check_dylink()
return func(self)
return decorated
def is_slow_test(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
def disabled(note=''):
assert not callable(note)
return unittest.skip(note)
def no_mac(note=''):
assert not callable(note)
if MACOS:
return unittest.skip(note)
return lambda f: f
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
def requires_native_clang(func):
assert callable(func)
def decorated(self, *args, **kwargs):
if EMTEST_LACKS_NATIVE_CLANG:
return self.skipTest('native clang tests are disabled')
return func(self, *args, **kwargs)
return decorated
def node_pthreads(f):
def decorated(self):
self.set_setting('USE_PTHREADS')
self.emcc_args += ['-Wno-pthreads-mem-growth']
if self.get_setting('MINIMAL_RUNTIME'):
self.skipTest('node pthreads not yet supported with MINIMAL_RUNTIME')
self.js_engines = [config.NODE_JS]
self.node_args += ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory']
f(self)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
def decorated(f):
def modified(self):
with env_modify(updates):
return f(self)
return modified
return decorated
def ensure_dir(dirname):
if not os.path.isdir(dirname):
os.makedirs(dirname)
def limit_size(string, maxbytes=800000 * 20, maxlines=100000, max_line=5000):
lines = string.splitlines()
for i, line in enumerate(lines):
if len(line) > max_line:
lines[i] = line[:max_line] + '[..]'
if len(lines) > maxlines:
lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:]
string = '\n'.join(lines) + '\n'
if len(string) > maxbytes:
string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:]
return string
def create_file(name, contents, binary=False):
assert not os.path.isabs(name)
mode = 'wb' if binary else 'w'
with open(name, mode) as f:
f.write(contents)
def make_executable(name):
os.chmod(name, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
# The core test modes
core_test_modes = [
'wasm0',
'wasm1',
'wasm2',
'wasm3',
'wasms',
'wasmz',
'strict',
'wasm2js0',
'wasm2js1',
'wasm2js2',
'wasm2js3',
'wasm2jss',
'wasm2jsz',
]
# The default core test mode, used when none is specified
default_core_test_mode = 'wasm0'
# The non-core test modes
non_core_test_modes = [
'other',
'browser',
'sanity',
'sockets',
'interactive',
'benchmark',
'asan',
'lsan',
'wasm2ss',
'posixtest',
'posixtest_browser',
]
def parameterized(parameters):
"""
Mark a test as parameterized.
Usage:
@parameterized({
'subtest1': (1, 2, 3),
'subtest2': (4, 5, 6),
})
def test_something(self, a, b, c):
... # actual test body
This is equivalent to defining two tests:
def test_something_subtest1(self):
# runs test_something(1, 2, 3)
def test_something_subtest2(self):
# runs test_something(4, 5, 6)
"""
def decorator(func):
func._parameterize = parameters
return func
return decorator
class RunnerMeta(type):
@classmethod
def make_test(mcs, name, func, suffix, args):
"""
This is a helper function to create new test functions for each parameterized form.
:param name: the original name of the function
:param func: the original function that we are parameterizing
:param suffix: the suffix to append to the name of the function for this parameterization
:param args: the positional arguments to pass to the original function for this parameterization
:returns: a tuple of (new_function_name, new_function_object)
"""
# Create the new test function. It calls the original function with the specified args.
# We use @functools.wraps to copy over all the function attributes.
@wraps(func)
def resulting_test(self):
return func(self, *args)
# Add suffix to the function name so that it displays correctly.
if suffix:
resulting_test.__name__ = f'{name}_{suffix}'
else:
resulting_test.__name__ = name
# On python 3, functions have __qualname__ as well. This is a full dot-separated path to the
# function. We add the suffix to it as well.
resulting_test.__qualname__ = f'{func.__qualname__}_{suffix}'
return resulting_test.__name__, resulting_test
def __new__(mcs, name, bases, attrs):
# This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`.
new_attrs = {}
for attr_name, value in attrs.items():
# Check if a member of the new class has _parameterize, the tag inserted by @parameterized.
if hasattr(value, '_parameterize'):
# If it does, we extract the parameterization information, build new test functions.
for suffix, args in value._parameterize.items():
new_name, func = mcs.make_test(attr_name, value, suffix, args)
assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name
new_attrs[new_name] = func
else:
# If not, we just copy it over to new_attrs verbatim.
assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name
new_attrs[attr_name] = value
# We invoke type, the default metaclass, to actually create the new class, with new_attrs.
return type.__new__(mcs, name, bases, new_attrs)
class RunnerCore(unittest.TestCase, metaclass=RunnerMeta):
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
def is_wasm(self):
return self.get_setting('WASM') != 0
def check_dylink(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dynamic linking with memory growth (without wasm)')
if not self.is_wasm():
self.skipTest('no dynamic linking support in wasm2js yet')
if '-fsanitize=address' in self.emcc_args:
self.skipTest('no dynamic linking support in asan yet')
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or (self.is_wasm() and not self.get_setting('WASM2JS')):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super(RunnerCore, cls).setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super(RunnerCore, self).setUp()
self.settings_mods = {}
self.emcc_args = ['-Werror']
self.node_args = []
self.v8_args = []
self.env = {}
self.temp_files_before_run = []
self.uses_es6 = False
self.js_engines = config.JS_ENGINES.copy()
self.wasm_engines = config.WASM_ENGINES.copy()
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
if EMTEST_SAVE_DIR:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
if os.path.exists(self.working_dir):
if EMTEST_SAVE_DIR == 2:
print('Not clearing existing test directory')
else:
print('Clearing existing test directory')
# Even when EMTEST_SAVE_DIR we still try to start with an empty directoy as many tests
# expect this. EMTEST_SAVE_DIR=2 can be used to keep the old contents for the new test
# run. This can be useful when iterating on a given test with extra files you want to keep
# around in the output directory.
delete_contents(self.working_dir)
else:
print('Creating new test output directory')
ensure_dir(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
if not EMTEST_SAVE_DIR:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not EMTEST_SAVE_DIR:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not DEBUG:
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir.
# They may not be due to us, but e.g. the browser when running browser
# tests. Until we figure out a proper solution, ignore some temp file
# names that we see on our CI infrastructure.
ignorable_file_prefixes = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout',
'/tmp/wasmer'
]
left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run)
left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])]
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
def get_setting(self, key, default=None):
return self.settings_mods.get(key, default)
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
self.settings_mods[key] = value
def has_changed_setting(self, key):
return key in self.settings_mods
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret += ['-s', key]
elif type(value) == str:
ret += ['-s', f'{key}={value}']
else:
ret += ['-s', f'{key}={json.dumps(value)}']
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def add_pre_run(self, code):
create_file('prerun.js', 'Module.preRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'prerun.js']
def add_post_run(self, code):
create_file('postrun.js', 'Module.postRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'postrun.js']
def add_on_exit(self, code):
create_file('onexit.js', 'Module.onExit = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'onexit.js']
# returns the full list of arguments to pass to emcc
# param @main_file whether this is the main file of the test. some arguments
# (like --pre-js) do not need to be passed when building
# libraries, for example
def get_emcc_args(self, main_file=False):
args = self.serialize_settings() + self.emcc_args
if not main_file:
for i, arg in enumerate(args):
if arg in ('--pre-js', '--post-js'):
args[i] = None
args[i + 1] = None
args = [arg for arg in args if arg is not None]
return args
def verify_es5(self, filename):
es_check = shared.get_npm_cmd('es-check')
# use --quiet once its available
# See: https://github.com/dollarshaveclub/es-check/pull/126/
es_check_env = os.environ.copy()
es_check_env['PATH'] = os.path.dirname(config.NODE_JS[0]) + os.pathsep + es_check_env['PATH']
try:
shared.run_process(es_check + ['es5', os.path.abspath(filename)], stderr=PIPE, env=es_check_env)
except subprocess.CalledProcessError as e:
print(e.stderr)
self.fail('es-check failed to verify ES5 output compliance')
# Build JavaScript code from source code
def build(self, filename, libraries=[], includes=[], force_c=False,
post_build=None, js_outfile=True):
suffix = '.js' if js_outfile else '.wasm'
if shared.suffix(filename) in ('.cc', '.cxx', '.cpp') and not force_c:
compiler = [EMXX]
else:
# TODO(https://github.com/emscripten-core/emscripten/issues/11121)
# We link with C++ stdlibs, even when linking with emcc for historical reasons. We can remove
# this if this issues is fixed.
compiler = [EMCC, '-nostdlib++']
if force_c:
compiler.append('-xc')
dirname, basename = os.path.split(filename)
output = shared.unsuffixed(basename) + suffix
cmd = compiler + [filename, '-o', output] + self.get_emcc_args(main_file=True) + libraries
if shared.suffix(filename) not in ('.i', '.ii'):
# Add the location of the test file to include path.
cmd += ['-I.']
cmd += ['-I' + include for include in includes]
self.run_process(cmd, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(output)
if js_outfile and not self.uses_es6:
self.verify_es5(output)
if post_build:
post_build(output)
if js_outfile and self.uses_memory_init_file():
src = open(output).read()
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
return output
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
with open(javascript_file, 'rt') as f:
blob = "".join(f.readlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def run_js(self, filename, engine=None, args=[], output_nicerizer=None, assert_returncode=0):
# use files, as PIPE can get too full and hang us
stdout = self.in_dir('stdout')
stderr = self.in_dir('stderr')
error = None
if not engine:
engine = config.JS_ENGINES[0]
if engine == config.NODE_JS:
engine = engine + self.node_args
if engine == config.V8_ENGINE:
engine = engine + self.v8_args
if EMTEST_VERBOSE:
print(f"Running '{filename}' under '{shared.shlex_join(engine)}'")
try:
jsrun.run_js(filename, engine, args,
stdout=open(stdout, 'w'),
stderr=open(stderr, 'w'),
assert_returncode=assert_returncode)
except subprocess.CalledProcessError as e:
error = e
# Make sure that we produced proper line endings to the .js file we are about to run.
if not filename.endswith('.wasm'):
self.assertEqual(line_endings.check_line_endings(filename), 0)
out = open(stdout, 'r').read()
err = open(stderr, 'r').read()
if output_nicerizer:
ret = output_nicerizer(out, err)
else:
ret = out + err
if error or EMTEST_VERBOSE:
ret = limit_size(ret)
print('-- begin program output --')
print(ret, end='')
print('-- end program output --')
if error:
if assert_returncode == NON_ZERO:
self.fail('JS subprocess unexpectedly succeeded (%s): Output:\n%s' % (error.cmd, ret))
else:
self.fail('JS subprocess failed (%s): %s. Output:\n%s' % (error.cmd, error.returncode, ret))
# We should pass all strict mode checks
self.assertNotContained('strict warning:', ret)
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = 'Expected file not found: ' + filename
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line
# ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2, msg=None,
fromfile='expected', tofile='actual'):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2, msg, fromfile, tofile)
def assertIdentical(self, values, y, msg=None,
fromfile='expected', tofile='actual'):
if type(values) not in (list, tuple):
values = [values]
for x in values:
if x == y:
return # success
diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(),
fromfile=fromfile, tofile=tofile)
diff = ''.join([a.rstrip() + '\n' for a in diff_lines])
if EMTEST_VERBOSE:
print("Expected to have '%s' == '%s'" % (limit_size(values[0]), limit_size(y)))
fail_message = 'Unexpected difference:\n' + limit_size(diff)
if not EMTEST_VERBOSE:
fail_message += '\nFor full output run with EMTEST_VERBOSE=1.'
if msg:
fail_message += '\n' + msg
self.fail(fail_message)
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
if callable(string):
string = string()
if not any(v in string for v in values):
diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')
diff = ''.join(a.rstrip() + '\n' for a in diff)
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string), limit_size(diff),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertContainedIf(self, value, string, condition):
if condition:
self.assertContained(value, string)
else:
self.assertNotContained(value, string)
def assertBinaryEqual(self, file1, file2):
self.assertEqual(os.path.getsize(file1),
os.path.getsize(file2))
self.assertEqual(open(file1, 'rb').read(),
open(file2, 'rb').read())
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
ensure_dir(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args=None,
env_init={}, cache_name_extra='', native=False):
if make_args is None:
make_args = ['-j', str(shared.get_num_cores())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
emcc_args = self.get_emcc_args()
hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
with open(bc_file, 'wb') as f:
f.write(contents)
generated_libs.append(bc_file)
return generated_libs
print(f'<building and saving {cache_name} into cache>', file=sys.stderr)
if configure is not None:
# Avoid += so we don't mutate the default arg
configure = configure + configure_args
return build_library(name, build_dir, output_dir, generated_libs, configure,
make, make_args, self.library_cache,
cache_name, env_init=env_init, native=native, cflags=self.get_emcc_args())
def clear(self):
delete_contents(self.get_dir())
if EMSCRIPTEN_TEMP_DIR:
delete_contents(EMSCRIPTEN_TEMP_DIR)
def run_process(self, cmd, check=True, **args):
# Wrapper around shared.run_process. This is desirable so that the tests
# can fail (in the unittest sense) rather than error'ing.
# In the long run it would nice to completely remove the dependency on
# core emscripten code (shared.py) here.
try:
return shared.run_process(cmd, check=check, **args)
except subprocess.CalledProcessError as e:
if check and e.returncode != 0:
self.fail('subprocess exited with non-zero return code(%d): `%s`' %
(e.returncode, shared.shlex_join(cmd)))
# Shared test code between main suite and others
def expect_fail(self, cmd, **args):
"""Run a subprocess and assert that it returns non-zero.
Return the stderr of the subprocess.
"""
proc = self.run_process(cmd, check=False, stderr=PIPE, **args)
self.assertNotEqual(proc.returncode, 0, 'subprocess unexpectedly succeeded. stderr:\n' + proc.stderr)
# When we check for failure we expect a user-visible error, not a traceback.
# However, on windows a python traceback can happen randomly sometimes,
# due to "Access is denied" https://github.com/emscripten-core/emscripten/issues/718
if not WINDOWS or 'Access is denied' not in proc.stderr:
self.assertNotContained('Traceback', proc.stderr)
return proc.stderr
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
extern "C" {
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
}
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_file('libb.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc();
}
void bfunc() {
afunc("b");
}
''')
create_file('libc.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc();
}
void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', '32mb')
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [EMCC, src, '-o', shared.unsuffixed(src) + so, '-s', 'SIDE_MODULE'] + self.get_emcc_args()
cmdv += linkto
self.run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.cpp', ['liba' + so])
ccshared('libc.cpp', ['liba' + so])
self.set_setting('MAIN_MODULE')
original_args = self.emcc_args.copy()
extra_args = ['libb' + so, 'libc' + so]
self.emcc_args += extra_args
do_run(r'''
#ifdef __cplusplus
extern "C" {
#endif
void bfunc();
void cfunc();
#ifdef __cplusplus
}
#endif
int test_main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
self.emcc_args = original_args
for libname in ['liba', 'libb', 'libc']:
self.emcc_args += ['--embed-file', libname + so]
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int test_main() {
void *bdso, *cdso;
void (*bfunc)(), (*cfunc)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currently working
bdso = dlopen("libb%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(cdso != NULL);
bfunc = (void (*)())dlsym(bdso, "bfunc");
assert(bfunc != NULL);
cfunc = (void (*)())dlsym(cdso, "cfunc");
assert(cfunc != NULL);
bfunc();
cfunc();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = self.js_engines
for engine in js_engines:
assert engine in config.JS_ENGINES, "js engine does not exist in config.JS_ENGINES"
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) in (list, type(None))
banned = [b[0] for b in self.banned_js_engines if b]
return [engine for engine in js_engines if engine and engine[0] not in banned]
def do_run(self, src, expected_output, force_c=False, **kwargs):
if 'no_build' in kwargs:
filename = src
else:
if force_c:
filename = 'src.c'
else:
filename = 'src.cpp'
with open(filename, 'w') as f:
f.write(src)
self._build_and_run(filename, expected_output, **kwargs)
def do_runf(self, filename, expected_output=None, **kwargs):
self._build_and_run(filename, expected_output, **kwargs)
## Just like `do_run` but with filename of expected output
def do_run_from_file(self, filename, expected_output_filename, **kwargs):
self._build_and_run(filename, open(expected_output_filename).read(), **kwargs)
def do_run_in_out_file_test(self, *path, **kwargs):
srcfile = test_file(*path)
outfile = shared.unsuffixed(srcfile) + '.out'
expected = open(outfile).read()
self._build_and_run(srcfile, expected, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def _build_and_run(self, filename, expected_output, args=[], output_nicerizer=None,
no_build=False,
js_engines=None, post_build=None, libraries=[],
includes=[],
assert_returncode=0, assert_identical=False, assert_all=False,
check_for_error=True, force_c=False):
logger.debug(f'_build_and_run: {filename}')
if no_build:
js_file = filename
else:
self.build(filename, libraries=libraries, includes=includes, post_build=post_build,
force_c=force_c)
js_file = shared.unsuffixed(os.path.basename(filename)) + '.js'
self.assertExists(js_file)
engines = self.filtered_js_engines(js_engines)
if len(engines) > 1 and not self.use_all_engines:
engines = engines[:1]
# In standalone mode, also add wasm vms as we should be able to run there too.
if self.get_setting('STANDALONE_WASM'):
# TODO once standalone wasm support is more stable, apply use_all_engines
# like with js engines, but for now as we bring it up, test in all of them
if not self.wasm_engines:
logger.warning('no wasm engine was found to run the standalone part of this test')
engines += self.wasm_engines
if self.get_setting('WASM2C') and not EMTEST_LACKS_NATIVE_CLANG:
# compile the c file to a native executable.
c = shared.unsuffixed(js_file) + '.wasm.c'
executable = shared.unsuffixed(js_file) + '.exe'
cmd = [shared.CLANG_CC, c, '-o', executable] + clang_native.get_clang_native_args()
self.run_process(cmd, env=clang_native.get_clang_native_env())
# we can now run the executable directly, without an engine, which
# we indicate with None as the engine
engines += [[None]]
if len(engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % config.EM_CONFIG)
for engine in engines:
js_output = self.run_js(js_file, engine, args, output_nicerizer=output_nicerizer, assert_returncode=assert_returncode)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
elif assert_all:
for o in expected_output:
self.assertContained(o, js_output)
else:
self.assertContained(expected_output, js_output)
if check_for_error:
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
def get_freetype_library(self):
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
return self.get_library(os.path.join('third_party', 'freetype'), os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared', '--without-zlib'])
def get_poppler_library(self, env_init=None):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args += [
'-I' + test_file('third_party', 'freetype', 'include'),
'-I' + test_file('third_party', 'poppler', 'include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
self.emcc_args += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
env_init = env_init.copy() if env_init else {}
env_init['FONTCONFIG_CFLAGS'] = ' '
env_init['FONTCONFIG_LIBS'] = ' '
poppler = self.get_library(
os.path.join('third_party', 'poppler'),
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init=env_init,
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self):
if WINDOWS:
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'),
configure=['cmake', '.'],
make=['cmake', '--build', '.'],
make_args=[])
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a'])
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.send_header('Connection', 'close')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
# Add COOP, COEP, CORP, and no-caching headers
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Cross-Origin-Opener-Policy', 'same-origin')
self.send_header('Cross-Origin-Embedder-Policy', 'require-corp')
self.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
return SimpleHTTPRequestHandler.end_headers(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(open(test_file('browser_harness.html'), 'rb').read())
elif 'report_' in self.path:
# the test is reporting its result. first change dir away from the
# test dir, as it will be deleted now that the test is finishing, and
# if we got a ping at that time, we'd return an error
os.chdir(path_from_root())
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', unquote_plus(self.path), ']')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
# there is a new test ready to be served
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write(b'COMMAND:' + url.encode('utf-8'))
# move us to the right place to serve the files for the new test
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write(b'(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class Reporting(Enum):
"""When running browser tests we normally automatically include support
code for reporting results back to the browser. This enum allows tests
to decide what type of support code they need/want.
"""
NONE = 0
# Include the JS helpers for reporting results
JS_ONLY = 1
# Include C/C++ reporting code (REPORT_RESULT mactros) as well as JS helpers
FULL = 2
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super(BrowserCore, self).__init__(*args, **kwargs)
@staticmethod
def browser_open(url):
if not EMTEST_BROWSER:
logger.info('Using default system browser')
webbrowser.open_new(url)
return
browser_args = shlex.split(EMTEST_BROWSER)
# If the given browser is a scalar, treat it like one of the possible types
# from https://docs.python.org/2/library/webbrowser.html
if len(browser_args) == 1:
try:
# This throws if the type of browser isn't available
webbrowser.get(browser_args[0]).open_new(url)
logger.info('Using Emscripten browser: %s', browser_args[0])
return
except webbrowser.Error:
# Ignore the exception and fallback to the custom command logic
pass
# Else assume the given browser is a specific program with additional
# parameters and delegate to that
logger.info('Using Emscripten browser: %s', str(browser_args))
subprocess.Popen(browser_args + [url])
@classmethod
def setUpClass(cls):
super(BrowserCore, cls).setUpClass()
cls.also_asmjs = int(os.getenv('EMTEST_BROWSER_ALSO_ASMJS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser():
return
cls.browser_timeout = 60
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
cls.browser_open('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super(BrowserCore, cls).tearDownClass()
if not has_browser():
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
# @param extra_tries: how many more times to try this test, if it fails. browser tests have
# many more causes of flakiness (in particular, they do not run
# synchronously, so we have a timeout, which can be hit if the VM
# we run on stalls temporarily), so we let each test try more than
# once by default
def run_browser(self, html_file, message, expectedResult=None, timeout=None, extra_tries=1):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
'http://localhost:%s/%s' % (self.port, html_file),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
# verify the result, and try again if we should do so
output = unquote(output)
try:
self.assertContained(expectedResult, output)
except Exception as e:
if extra_tries > 0:
print('[test error (see below), automatically retrying]')
print(e)
return self.run_browser(html_file, message, expectedResult, timeout, extra_tries - 1)
else:
raise e
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
with open('reftest.js', 'w') as out:
with open(test_file('browser_reporting.js')) as reporting:
out.write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
window.requestAnimationFrame = /** @suppress{checkTypes} */ (function(func) {
realRAF(function() {
func();
realRAF(doReftest);
});
});
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
''' % (reporting.read(), basename, int(manually_trigger)))
def compile_btest(self, args, reporting=Reporting.FULL):
# Inject support code for reporting results. This adds an include a header so testcases can
# use REPORT_RESULT, and also adds a cpp file to be compiled alongside the testcase, which
# contains the implementation of REPORT_RESULT (we can't just include that implementation in
# the header as there may be multiple files being compiled here).
args += ['-s', 'IN_TEST_HARNESS']
if reporting != Reporting.NONE:
# For basic reporting we inject JS helper funtions to report result back to server.
args += ['-DEMTEST_PORT_NUMBER=%d' % self.port,
'--pre-js', test_file('browser_reporting.js')]
if reporting == Reporting.FULL:
# If C reporting (i.e. REPORT_RESULT macro) is required
# also compile in report_result.cpp and forice-include report_result.h
args += ['-I' + TEST_ROOT,
'-include', test_file('report_result.h'),
test_file('report_result.cpp')]
self.run_process([EMCC] + self.get_emcc_args() + args)
def btest_exit(self, filename, assert_returncode=0, *args, **kwargs):
"""Special case of btest that reports its result solely via exiting
with a give result code.
In this case we set EXIT_RUNTIME and we don't need to provide the
REPORT_RESULT macro to the C code.
"""
self.set_setting('EXIT_RUNTIME')
kwargs['reporting'] = Reporting.JS_ONLY
kwargs['expected'] = 'exit:%d' % assert_returncode
return self.btest(filename, *args, **kwargs)
def btest(self, filename, expected=None, reference=None,
reference_slack=0, manual_reference=False, post_build=None,
args=None, message='.', also_proxied=False,
url_suffix='', timeout=None, also_asmjs=False,
manually_trigger_reftest=False, extra_tries=1,
reporting=Reporting.FULL):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
if args is None:
args = []
original_args = args.copy()
if not os.path.exists(filename):
filename = test_file(filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(test_file(reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args += ['--pre-js', 'reftest.js', '-s', 'GL_TESTING']
outfile = 'test.html'
args += [filename, '-o', outfile]
# print('all args:', args)
try_delete(outfile)
self.compile_btest(args, reporting=reporting)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout, extra_tries=extra_tries)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in original_args and (also_asmjs or self.also_asmjs):
print('WASM=0')
self.btest(filename, expected, reference, reference_slack, manual_reference, post_build,
original_args + ['-s', 'WASM=0'], message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-s', 'GL_TESTING'], message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure=['sh', './configure'],
make=['make'],
make_args=[],
cache=None,
cache_name=None,
env_init={},
native=False,
cflags=[]):
"""Build a library and cache the result. We build the library file
once and cache it for all our tests. (We cache in memory since the test
directory is destroyed and recreated for each test. Note that we cache
separately for different compilers). This cache is just during the test
runner. There is a different concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
source_dir = test_file(name.replace('_native', ''))
project_dir = os.path.join(build_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
shutil.copytree(source_dir, project_dir) # Useful in debugging sometimes to comment this out, and two lines above
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
if native:
env = clang_native.get_clang_native_env()
else:
env = building.get_building_env(cflags=cflags)
for k, v in env_init.items():
env[k] = v
if configure:
if configure[0] == 'cmake':
configure = [EMCMAKE] + configure
else:
configure = [EMCONFIGURE] + configure
try:
with open(os.path.join(project_dir, 'configure_out'), 'w') as out:
with open(os.path.join(project_dir, 'configure_err'), 'w') as err:
stdout = out if EM_BUILD_VERBOSE < 2 else None
stderr = err if EM_BUILD_VERBOSE < 1 else None
shared.run_process(configure, env=env, stdout=stdout, stderr=stderr,
cwd=project_dir)
except subprocess.CalledProcessError:
with open(os.path.join(project_dir, 'configure_out')) as f:
print('-- configure stdout --')
print(f.read())
print('-- end configure stdout --')
with open(os.path.join(project_dir, 'configure_err')) as f:
print('-- configure stderr --')
print(f.read())
print('-- end configure stderr --')
raise
def open_make_out(mode='r'):
return open(os.path.join(project_dir, 'make.out'), mode)
def open_make_err(mode='r'):
return open(os.path.join(project_dir, 'make.err'), mode)
if EM_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
try:
with open_make_out('w') as make_out:
with open_make_err('w') as make_err:
stdout = make_out if EM_BUILD_VERBOSE < 2 else None
stderr = make_err if EM_BUILD_VERBOSE < 1 else None
shared.run_process(make + make_args, stdout=stdout, stderr=stderr, env=env,
cwd=project_dir)
except subprocess.CalledProcessError:
with open_make_out() as f:
print('-- make stdout --')
print(f.read())
print('-- end make stdout --')
with open_make_err() as f:
print('-- make stderr --')
print(f.read())
print('-- end stderr --')
raise
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, open(f, 'rb').read()))
return generated_libs
def check_js_engines():
working_engines = [e for e in config.JS_ENGINES if jsrun.check_engine(e)]
if len(working_engines) < len(config.JS_ENGINES):
print('Not all the JS engines in JS_ENGINES appears to work.')
exit(1)
if EMTEST_ALL_ENGINES:
print('(using ALL js engines)')
else:
logger.warning('use EMTEST_ALL_ENGINES=1 in the env to run against all JS '
'engines, which is slower but provides more coverage')
def get_and_import_modules():
modules = []
for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'test*.py')):
module_dir, module_file = os.path.split(filename)
module_name, module_ext = os.path.splitext(module_file)
__import__(module_name)
modules.append(sys.modules[module_name])
return modules
def get_all_tests(modules):
# Create a list of all known tests so that we can choose from them based on a wildcard search
all_tests = []
suites = core_test_modes + non_core_test_modes
for m in modules:
for s in suites:
if hasattr(m, s):
tests = [t for t in dir(getattr(m, s)) if t.startswith('test_')]
all_tests += [s + '.' + t for t in tests]
return all_tests
def tests_with_expanded_wildcards(args, all_tests):
# Process wildcards, e.g. "browser.test_pthread_*" should expand to list all pthread tests
new_args = []
for i, arg in enumerate(args):
if '*' in arg:
if arg.startswith('skip:'):
arg = arg[5:]
matching_tests = fnmatch.filter(all_tests, arg)
new_args += ['skip:' + t for t in matching_tests]
else:
new_args += fnmatch.filter(all_tests, arg)
else:
new_args += [arg]
if not new_args and args:
print('No tests found to run in set: ' + str(args))
sys.exit(1)
return new_args
def skip_requested_tests(args, modules):
for i, arg in enumerate(args):
if arg.startswith('skip:'):
which = [arg.split('skip:')[1]]
print(','.join(which), file=sys.stderr)
skipped = False
for test in which:
print('will skip "%s"' % test, file=sys.stderr)
suite_name, test_name = test.split('.')
for m in modules:
suite = getattr(m, suite_name, None)
if suite:
setattr(suite, test_name, lambda s: s.skipTest("requested to be skipped"))
skipped = True
break
assert skipped, "Not able to skip test " + test
args[i] = None
return [a for a in args if a is not None]
def args_for_random_tests(args, modules):
if not args:
return args
first = args[0]
if first.startswith('random'):
random_arg = first[6:]
num_tests, base_module, relevant_modes = get_random_test_parameters(random_arg)
for m in modules:
if hasattr(m, base_module):
base = getattr(m, base_module)
new_args = choose_random_tests(base, num_tests, relevant_modes)
print_random_test_statistics(num_tests)
return new_args
return args
def get_random_test_parameters(arg):
num_tests = 1
base_module = default_core_test_mode
relevant_modes = core_test_modes
if len(arg):
num_str = arg
if arg.startswith('other'):
base_module = 'other'
relevant_modes = ['other']
num_str = arg.replace('other', '')
elif arg.startswith('browser'):
base_module = 'browser'
relevant_modes = ['browser']
num_str = arg.replace('browser', '')
num_tests = int(num_str)
return num_tests, base_module, relevant_modes
def choose_random_tests(base, num_tests, relevant_modes):
tests = [t for t in dir(base) if t.startswith('test_')]
print()
chosen = set()
while len(chosen) < num_tests:
test = random.choice(tests)
mode = random.choice(relevant_modes)
new_test = mode + '.' + test
before = len(chosen)
chosen.add(new_test)
if len(chosen) > before:
print('* ' + new_test)
else:
# we may have hit the limit
if len(chosen) == len(tests) * len(relevant_modes):
print('(all possible tests chosen! %d = %d*%d)' % (len(chosen), len(tests), len(relevant_modes)))
break
return list(chosen)
def print_random_test_statistics(num_tests):
std = 0.5 / math.sqrt(num_tests)
expected = 100.0 * (1.0 - std)
print()
print('running those %d randomly-selected tests. if they all pass, then there is a '
'greater than 95%% chance that at least %.2f%% of the test suite will pass'
% (num_tests, expected))
print()
def show():
print('if all tests passed then there is a greater than 95%% chance that at least '
'%.2f%% of the test suite will pass'
% (expected))
atexit.register(show)
def load_test_suites(args, modules):
loader = unittest.TestLoader()
unmatched_test_names = set(args)
suites = []
for m in modules:
names_in_module = []
for name in list(unmatched_test_names):
try:
operator.attrgetter(name)(m)
names_in_module.append(name)
unmatched_test_names.remove(name)
except AttributeError:
pass
if len(names_in_module):
loaded_tests = loader.loadTestsFromNames(sorted(names_in_module), m)
tests = flattened_tests(loaded_tests)
suite = suite_for_module(m, tests)
for test in tests:
suite.addTest(test)
suites.append((m.__name__, suite))
return suites, unmatched_test_names
def flattened_tests(loaded_tests):
tests = []
for subsuite in loaded_tests:
for test in subsuite:
tests.append(test)
return tests
def suite_for_module(module, tests):
suite_supported = module.__name__ in ('test_core', 'test_other', 'test_posixtest')
if not EMTEST_SAVE_DIR and not DEBUG:
has_multiple_tests = len(tests) > 1
has_multiple_cores = parallel_testsuite.num_cores() > 1
if suite_supported and has_multiple_tests and has_multiple_cores:
return parallel_testsuite.ParallelTestSuite(len(tests))
return unittest.TestSuite()
def run_tests(options, suites):
resultMessages = []
num_failures = 0
print('Test suites:')
print([s[0] for s in suites])
# Run the discovered tests
testRunner = unittest.TextTestRunner(verbosity=2)
for mod_name, suite in suites:
print('Running %s: (%s tests)' % (mod_name, suite.countTestCases()))
res = testRunner.run(suite)
msg = ('%s: %s run, %s errors, %s failures, %s skipped' %
(mod_name, res.testsRun, len(res.errors), len(res.failures), len(res.skipped)))
num_failures += len(res.errors) + len(res.failures)
resultMessages.append(msg)
if len(resultMessages) > 1:
print('====================')
print()
print('TEST SUMMARY')
for msg in resultMessages:
print(' ' + msg)
# Return the number of failures as the process exit code for automating success/failure reporting.
return min(num_failures, 255)
def parse_args(args):
parser = argparse.ArgumentParser(prog='runner.py', description=__doc__)
parser.add_argument('tests', nargs='*')
return parser.parse_args()
def main(args):
options = parse_args(args)
check_js_engines()
def prepend_default(arg):
if arg.startswith('test_'):
return default_core_test_mode + '.' + arg
return arg
tests = [prepend_default(t) for t in options.tests]
modules = get_and_import_modules()
all_tests = get_all_tests(modules)
tests = tests_with_expanded_wildcards(tests, all_tests)
tests = skip_requested_tests(tests, modules)
tests = args_for_random_tests(tests, modules)
suites, unmatched_tests = load_test_suites(tests, modules)
if unmatched_tests:
print('ERROR: could not find the following tests: ' + ' '.join(unmatched_tests))
return 1
return run_tests(options, suites)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
|
utils.py
|
# From http://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/
import cv2
import datetime
from threading import Thread
class FPS:
def __init__(self):
# store the start time, end time, and total number of frames
# that were examined between the start and end intervals
self._start = None
self._end = None
self._numFrames = 0
def start(self):
# start the timer
self._start = datetime.datetime.now()
return self
def stop(self):
# stop the timer
self._end = datetime.datetime.now()
def update(self):
# increment the total number of frames examined during the
# start and end intervals
self._numFrames += 1
def elapsed(self):
# return the total number of seconds between the start and
# end interval
return (self._end - self._start).total_seconds()
def fps(self):
# compute the (approximate) frames per second
return self._numFrames / self.elapsed()
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
print("WebcamVideoStream: update: self.stopped")
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
horizont.py
|
import logging
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
import threading
class Hud:
# ============================================
def __init__(self):
self.state = [1000,1000,1000,1000,0.0,0.0,0.0]
plt.ion()
self.fig, self.ax1 = plt.subplots(1, 1)
self.lines = {}
#self.thread = threading.Thread(target=self._run)
#self.thread.daemon = True
self.stopThread = False
# ===========================================
def start(self):
self._initPlot()
#self.thread.start()
# ===========================================
def stop(self):
self.stopThread = True
# ===========================================
def _run(self):
while True:
self.update()
time.sleep(0.01)
# ===========================================
def _initPlot(self):
xah, xaht, xhth = self._linePlots()
self.lines = {}
self.lines["engines"] = self.ax1.text(-0.35, 0.8, str(self.state[:4]), color="w")
self.lines["line_xah0"] = self.ax1.plot(xah[:, 0], xah[:, 1], "w", lw=3)[0]
for k in range(xhth.shape[0]):
self.lines["line_xhth{}".format(k)] = self.ax1.plot(
[xhth[k], xhth[k]], [0.1, -0.1], "w"
)[0]
self.lines["blue"] = self.ax1.fill_between(
[xaht[0, 0], xaht[-1, 0]],
[1, 1],
[xaht[0, 1], xaht[-1, 1]],
facecolor="blue",
)
self.lines["brown"] = self.ax1.fill_between(
[xaht[0, 0], xaht[-1, 0]],
[-1, -1],
[xaht[0, 1], xaht[-1, 1]],
facecolor="brown",
)
self.ax1.axis([-1, 1, -1, 1])
self.ax1.axis("off")
plt.draw()
plt.pause(0.01)
self.fig.canvas.draw()
# ============================================
def _linePlots(self):
PHI = np.deg2rad(self.state[4:7])
xah, xaht = np.zeros((6, 2)), np.zeros((6, 2))
xah[0, :] = [-2, 0]
xah[1, :] = [-0.15, 0]
xah[2, :] = [-0.1, -0.2]
xah[3, :] = [0.1, -0.2]
xah[4, :] = [0.15, 0]
xah[5, :] = [2, 0]
for k in range(xah.shape[0]):
xaht[k, 0] = xah[k, 0] * np.cos(-PHI[0]) + xah[k, 1] * np.sin(-PHI[0])
xaht[k, 1] = (
xah[k, 0] * np.sin(--PHI[0])
+ xah[k, 1] * np.cos(-PHI[0])
+ np.sin(--PHI[1])
)
xhth = np.linspace(-180, 180, 180) - np.rad2deg(PHI[2])
return xah, xaht, xhth
# ============================================
def update(self):
xah, xaht, xhth = self._linePlots()
self.lines["line_xah0"].set_data(xah[:, 0], xah[:, 1])
for k in range(xhth.shape[0]):
self.lines["line_xhth{}".format(k)].set_data([xhth[k], xhth[k]], [0.1, -0.1])
self.lines["engines"].set_text(str(self.state[:4]))
self.lines["blue"].remove()
self.lines["brown"].remove()
self.lines["blue"] = self.ax1.fill_between(
[xaht[0, 0], xaht[-1, 0]],
[1, 1],
[xaht[0, 1], xaht[-1, 1]],
facecolor="blue",
)
self.lines["brown"] = self.ax1.fill_between(
[xaht[0, 0], xaht[-1, 0]],
[-1, -1],
[xaht[0, 1], xaht[-1, 1]],
facecolor="brown",
)
self.fig.canvas.draw()
# ============================================
if __name__ == "__main__":
hud = Hud()
hud.start()
print(np.random.rand(3))
while True:
hud.state = [1000,1000,1000,1000]+np.random.rand(3).tolist()
hud.update()
time.sleep(0.01)
|
coordinates_for_DrugBank.py
|
from openforcefield.utils import utils
from openeye import oechem
from openeye import oeomega
import multiprocessing
def genConfs(c_mol, ofsff, ofsTri, index):
# set omega settings
omega = oeomega.OEOmega()
omega.SetMaxConfs(1)
omega.SetIncludeInput(False)
omega.SetEnergyWindow(15.0)
strict_stereo = True
omega.SetStrictStereo(strict_stereo)
omega.SetSampleHydrogens(True)
omega.SetStrictAtomTypes(True)
mol = oechem.OEMol(c_mol)
status = omega(mol)
if status:
# change title
mol.SetTitle(f'DrugBank_{index}')
# save forcefield type
mol1 = oechem.OEMol(mol)
oechem.OETriposAtomNames(mol1)
oechem.OEWriteConstMolecule(ofsff, mol1)
# save Tripos atom types
mol2 = oechem.OEMol(mol)
oechem.OETriposAtomTypeNames(mol2)
oechem.OEWriteConstMolecule(ofsTri, mol2)
return status
flavor = oechem.OEIFlavor_Generic_Default | oechem.OEIFlavor_MOL2_Default | oechem.OEIFlavor_MOL2_Forcefield
in_file = utils.get_data_file_path('molecules/DrugBank_atyped.oeb')
ff_out = 'DrugBank_ff.mol2'
tripos_out = 'DrugBank_tripos.mol2'
failed_out = 'DrugBank_no3D.mol2'
# open files
ofsff = oechem.oemolostream()
ofsff.SetFlavor(oechem.OEFormat_MOL2, flavor)
ofsff.open(ff_out)
ofsTri = oechem.oemolostream()
ofsTri.SetFlavor(oechem.OEFormat_MOL2, flavor)
ofsTri.open(tripos_out)
ofsFail = oechem.oemolostream()
ofsFail.SetFlavor(oechem.OEFormat_MOL2, flavor)
ofsFail.open(failed_out)
success = 0
time_out = 0
conf_fail = 0
index = 0
ifs = oechem.oemolistream(in_file)
ifs.SetFlavor(oechem.OEFormat_MOL2, flavor)
c_mol = oechem.OECreateOEGraphMol()
while oechem.OEReadMolecule(ifs, c_mol):
index += 1
# process molecules individually, storing less
p = multiprocessing.Process(target=genConfs, args=(c_mol,ofsff, ofsTri, index,))
p.start()
p.join(24)
if p.is_alive():
print(f"TIMED OUT {oechem.OECreateIsoSmiString(c_mol)}")
oechem.OEWriteConstMolecule(ofsFail, oechem.OEMol(c_mol))
time_out += 1
p.terminate()
p.join()
elif p.exitcode:
success += 1
p.terminate()
p.join()
else:
print(f"CONF FAIL {oechem.OECreateIsoSmiString(c_mol)}")
oechem.OEWriteConstMolecule(ofsFail, oechem.OEMol(c_mol))
conf_fail += 1
p.terminate()
p.join()
# Print data
print(f"Success {success} out of {index}")
print(f"{time_out} timed out")
print(f"{conf_fail} failed during conformation generation")
# close files
ofsff.close()
ofsTri.close()
ofsFail.close()
|
regrtest.py
|
#! /usr/bin/env python
"""
Usage:
python -m test.regrtest [options] [test_name1 [test_name2 ...]]
python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]]
If no arguments or options are provided, finds all files matching
the pattern "test_*" in the Lib/test subdirectory and runs
them in alphabetical order (but see -M and -u, below, for exceptions).
For more rigorous testing, it is useful to use the following
command line:
python -E -tt -Wd -3 -m test.regrtest [options] [test_name1 ...]
Options:
-h/--help -- print this text and exit
Verbosity
-v/--verbose -- run tests in verbose mode with output to stdout
-w/--verbose2 -- re-run failed tests in verbose mode
-W/--verbose3 -- re-run failed tests in verbose mode immediately
-q/--quiet -- no output unless one or more tests fail
-S/--slow -- print the slowest 10 tests
--header -- print header with interpreter info
Selecting tests
-r/--randomize -- randomize test execution order (see below)
--randseed -- pass a random seed to reproduce a previous random run
-f/--fromfile -- read names of tests to run from a file (see below)
-x/--exclude -- arguments are tests to *exclude*
-s/--single -- single step through a set of tests (see below)
-u/--use RES1,RES2,...
-- specify which special resource intensive tests to run
-M/--memlimit LIMIT
-- run very large memory-consuming tests
Special runs
-l/--findleaks -- if GC is available detect tests that leak memory
-L/--runleaks -- run the leaks(1) command just before exit
-R/--huntrleaks RUNCOUNTS
-- search for reference leaks (needs debug build, v. slow)
-j/--multiprocess PROCESSES
-- run PROCESSES processes at once
-T/--coverage -- turn on code coverage tracing using the trace module
-D/--coverdir DIRECTORY
-- Directory where coverage files are put
-N/--nocoverdir -- Put coverage files alongside modules
-t/--threshold THRESHOLD
-- call gc.set_threshold(THRESHOLD)
-F/--forever -- run the specified tests in a loop, until an error happens
Additional Option Details:
-r randomizes test execution order. You can use --randseed=int to provide a
int seed value for the randomizer; this is useful for reproducing troublesome
test orders.
-s On the first invocation of regrtest using -s, the first test file found
or the first test file given on the command line is run, and the name of
the next test is recorded in a file named pynexttest. If run from the
Python build directory, pynexttest is located in the 'build' subdirectory,
otherwise it is located in tempfile.gettempdir(). On subsequent runs,
the test in pynexttest is run, and the next test is written to pynexttest.
When the last test has been run, pynexttest is deleted. In this way it
is possible to single step through the test files. This is useful when
doing memory analysis on the Python interpreter, which process tends to
consume too many resources to run the full regression test non-stop.
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), and the minimal invocation is '-R :'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines howmuch memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
bsddb - It is okay to run the bsddb testsuite, which takes
a long time to complete.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
cpu - Used for certain CPU-heavy tests.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
gui - Run tests that require a running GUI.
xpickle - Test pickle and cPickle against Python 2.4, 2.5 and 2.6 to
test backwards compatibility. These tests take a long time
to run.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the bsddb tests, give the
option '-uall,-bsddb'.
"""
import StringIO
import getopt
import json
import os
import random
import re
import shutil
import sys
import time
import traceback
import warnings
import unittest
import tempfile
import imp
import platform
import sysconfig
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
# These modules are searched first in sys.path[0] (so '' -- the CWD) and if
# they are found in the CWD their __file__ and __path__ will be relative (this
# happens before the chdir). All the modules imported after the chdir, are
# not found in the CWD, and since the other paths in sys.path[1:] are absolute
# (site.py absolutize them), the __file__ and __path__ will be absolute too.
# Therefore it is necessary to absolutize manually the __file__ and __path__ of
# the packages to prevent later imports to fail when the CWD is different.
for module in sys.modules.itervalues():
if hasattr(module, '__path__'):
module.__path__ = [os.path.abspath(path) for path in module.__path__]
if hasattr(module, '__file__'):
module.__file__ = os.path.abspath(module.__file__)
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
# Windows, Tkinter, and resetting the environment after each test don't
# mix well. To alleviate test failures due to Tcl/Tk not being able to
# find its library, get the necessary environment massage done once early.
if sys.platform == 'win32':
try:
import FixTk
except Exception:
pass
# Test result constants.
PASSED = 1
FAILED = 0
ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
from test import test_support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network', 'bsddb',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui',
'xpickle')
TEMPDIR = os.path.abspath(tempfile.gettempdir())
def usage(code, msg=''):
print __doc__
if msg: print msg
sys.exit(code)
def main(tests=None, testdir=None, verbose=0, quiet=False,
exclude=False, single=False, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
random_seed=None, use_mp=None, verbose3=False, forever=False,
header=False):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
single, randomize, findleaks, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
test_support.record_original_stdout(sys.stdout)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvqxsSrf:lu:t:TD:NLR:FwWM:j:',
['help', 'verbose', 'verbose2', 'verbose3', 'quiet',
'exclude', 'single', 'slow', 'randomize', 'fromfile=', 'findleaks',
'use=', 'threshold=', 'trace', 'coverdir=', 'nocoverdir',
'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=',
'multiprocess=', 'slaveargs=', 'forever', 'header'])
except getopt.error, msg:
usage(2, msg)
# Defaults
if random_seed is None:
random_seed = random.randrange(10000000)
if use_resources is None:
use_resources = []
for o, a in opts:
if o in ('-h', '--help'):
usage(0)
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-w', '--verbose2'):
verbose2 = True
elif o in ('-W', '--verbose3'):
verbose3 = True
elif o in ('-q', '--quiet'):
quiet = True;
verbose = 0
elif o in ('-x', '--exclude'):
exclude = True
elif o in ('-s', '--single'):
single = True
elif o in ('-S', '--slow'):
print_slow = True
elif o in ('-r', '--randomize'):
randomize = True
elif o == '--randseed':
random_seed = int(a)
elif o in ('-f', '--fromfile'):
fromfile = a
elif o in ('-l', '--findleaks'):
findleaks = True
elif o in ('-L', '--runleaks'):
runleaks = True
elif o in ('-t', '--threshold'):
import gc
gc.set_threshold(int(a))
elif o in ('-T', '--coverage'):
trace = True
elif o in ('-D', '--coverdir'):
coverdir = os.path.join(os.getcwd(), a)
elif o in ('-N', '--nocoverdir'):
coverdir = None
elif o in ('-R', '--huntrleaks'):
huntrleaks = a.split(':')
if len(huntrleaks) not in (2, 3):
print a, huntrleaks
usage(2, '-R takes 2 or 3 colon-separated arguments')
if not huntrleaks[0]:
huntrleaks[0] = 5
else:
huntrleaks[0] = int(huntrleaks[0])
if not huntrleaks[1]:
huntrleaks[1] = 4
else:
huntrleaks[1] = int(huntrleaks[1])
if len(huntrleaks) == 2 or not huntrleaks[2]:
huntrleaks[2:] = ["reflog.txt"]
elif o in ('-M', '--memlimit'):
test_support.set_memlimit(a)
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
if r == 'all':
use_resources[:] = RESOURCE_NAMES
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if r not in RESOURCE_NAMES:
usage(1, 'Invalid -u/--use option: ' + a)
if remove:
if r in use_resources:
use_resources.remove(r)
elif r not in use_resources:
use_resources.append(r)
elif o in ('-F', '--forever'):
forever = True
elif o in ('-j', '--multiprocess'):
use_mp = int(a)
elif o == '--header':
header = True
elif o == '--slaveargs':
args, kwargs = json.loads(a)
try:
result = runtest(*args, **kwargs)
except BaseException, e:
result = INTERRUPTED, e.__class__.__name__
print # Force a newline (just in case)
print json.dumps(result)
sys.exit(0)
else:
print >>sys.stderr, ("No handler for option {}. Please "
"report this as a bug at http://bugs.python.org.").format(o)
sys.exit(1)
if single and fromfile:
usage(2, "-s and -f don't go together!")
if use_mp and trace:
usage(2, "-T and -j don't go together!")
if use_mp and findleaks:
usage(2, "-l and -j don't go together!")
good = []
bad = []
skipped = []
resource_denieds = []
environment_changed = []
interrupted = False
if findleaks:
try:
import gc
except ImportError:
print 'No GC available, disabling findleaks.'
findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if single:
filename = os.path.join(TEMPDIR, 'pynexttest')
try:
fp = open(filename, 'r')
next_test = fp.read().strip()
tests = [next_test]
fp.close()
except IOError:
pass
if fromfile:
tests = []
fp = open(os.path.join(test_support.SAVEDCWD, fromfile))
for line in fp:
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
fp.close()
# Strip .py extensions.
removepy(args)
removepy(tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS.copy()
if exclude:
for arg in args:
if arg in stdtests:
stdtests.remove(arg)
nottests.add(arg)
args = []
# For a partial run, we do not need to clutter the output.
if verbose or header or not (quiet or single or tests or args):
# Print basic platform information
print "==", platform.python_implementation(), \
" ".join(sys.version.split())
print "== ", platform.platform(aliased=True), \
"%s-endian" % sys.byteorder
print "== ", os.getcwd()
print "Testing with flags:", sys.flags
alltests = findtests(testdir, stdtests, nottests)
selected = tests or args or alltests
if single:
selected = selected[:1]
try:
next_single_test = alltests[alltests.index(selected[0])+1]
except IndexError:
next_single_test = None
if randomize:
random.seed(random_seed)
print "Using random seed", random_seed
random.shuffle(selected)
if trace:
import trace
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix],
trace=False, count=True)
test_times = []
test_support.use_resources = use_resources
save_modules = sys.modules.keys()
def accumulate_result(test, result):
ok, test_time = result
test_times.append((test_time, test))
if ok == PASSED:
good.append(test)
elif ok == FAILED:
bad.append(test)
elif ok == ENV_CHANGED:
environment_changed.append(test)
elif ok == SKIPPED:
skipped.append(test)
elif ok == RESOURCE_DENIED:
skipped.append(test)
resource_denieds.append(test)
if forever:
def test_forever(tests=list(selected)):
while True:
for test in tests:
yield test
if bad:
return
tests = test_forever()
test_count = ''
test_count_width = 3
else:
tests = iter(selected)
test_count = '/{}'.format(len(selected))
test_count_width = len(test_count) - 1
if use_mp:
try:
from threading import Thread
except ImportError:
print "Multiprocess option requires thread support"
sys.exit(2)
from Queue import Queue
from subprocess import Popen, PIPE
debug_output_pat = re.compile(r"\[\d+ refs\]$")
output = Queue()
def tests_and_args():
for test in tests:
args_tuple = (
(test, verbose, quiet),
dict(huntrleaks=huntrleaks, use_resources=use_resources)
)
yield (test, args_tuple)
pending = tests_and_args()
opt_args = test_support.args_from_interpreter_flags()
base_cmd = [sys.executable] + opt_args + ['-m', 'test.regrtest']
def work():
# A worker thread.
try:
while True:
try:
test, args_tuple = next(pending)
except StopIteration:
output.put((None, None, None, None))
return
# -E is needed by some tests, e.g. test_import
popen = Popen(base_cmd + ['--slaveargs', json.dumps(args_tuple)],
stdout=PIPE, stderr=PIPE,
universal_newlines=True,
close_fds=(os.name != 'nt'))
stdout, stderr = popen.communicate()
# Strip last refcount output line if it exists, since it
# comes from the shutdown of the interpreter in the subcommand.
stderr = debug_output_pat.sub("", stderr)
stdout, _, result = stdout.strip().rpartition("\n")
if not result:
output.put((None, None, None, None))
return
result = json.loads(result)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
except BaseException:
output.put((None, None, None, None))
raise
workers = [Thread(target=work) for i in range(use_mp)]
for worker in workers:
worker.start()
finished = 0
test_index = 1
try:
while finished < use_mp:
test, stdout, stderr, result = output.get()
if test is None:
finished += 1
continue
if stdout:
print stdout
if stderr:
print >>sys.stderr, stderr
sys.stdout.flush()
sys.stderr.flush()
if result[0] == INTERRUPTED:
assert result[1] == 'KeyboardInterrupt'
raise KeyboardInterrupt # What else?
accumulate_result(test, result)
if not quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count,
len(bad), test))
test_index += 1
except KeyboardInterrupt:
interrupted = True
pending.close()
for worker in workers:
worker.join()
else:
for test_index, test in enumerate(tests, 1):
if not quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count, len(bad), test))
sys.stdout.flush()
if trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, verbose, quiet)',
globals=globals(), locals=vars())
else:
try:
result = runtest(test, verbose, quiet, huntrleaks)
accumulate_result(test, result)
if verbose3 and result[0] == FAILED:
print "Re-running test %r in verbose mode" % test
runtest(test, True, quiet, huntrleaks)
except KeyboardInterrupt:
interrupted = True
break
except:
raise
if findleaks:
gc.collect()
if gc.garbage:
print "Warning: test created", len(gc.garbage),
print "uncollectable object(s)."
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
test_support.unload(module)
if interrupted:
# print a newline after ^C
print
print "Test suite interrupted by signal SIGINT."
omitted = set(selected) - set(good) - set(bad) - set(skipped)
print count(len(omitted), "test"), "omitted:"
printlist(omitted)
if good and not quiet:
if not bad and not skipped and not interrupted and len(good) > 1:
print "All",
print count(len(good), "test"), "OK."
if print_slow:
test_times.sort(reverse=True)
print "10 slowest tests:"
for time, test in test_times[:10]:
print "%s: %.1fs" % (test, time)
if bad:
bad = set(bad) - set(environment_changed)
if bad:
print count(len(bad), "test"), "failed:"
printlist(bad)
if environment_changed:
print "{} altered the execution environment:".format(
count(len(environment_changed), "test"))
printlist(environment_changed)
if skipped and not quiet:
print count(len(skipped), "test"), "skipped:"
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print count(len(surprise), "skip"), \
"unexpected on", plat + ":"
printlist(surprise)
else:
print "Those skips are all expected on", plat + "."
else:
print "Ask someone to teach regrtest.py about which tests are"
print "expected to get skipped on", plat + "."
if verbose2 and bad:
print "Re-running failed tests in verbose mode"
for test in bad:
print "Re-running test %r in verbose mode" % test
sys.stdout.flush()
try:
test_support.verbose = True
ok = runtest(test, True, quiet, huntrleaks)
except KeyboardInterrupt:
# print a newline separate from the ^C
print
break
except:
raise
if single:
if next_single_test:
with open(filename, 'w') as fp:
fp.write(next_single_test + '\n')
else:
os.unlink(filename)
if trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0 or interrupted)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
]
NOTTESTS = {
'test_support',
'test_future1',
'test_future2',
}
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
names = os.listdir(testdir)
tests = []
others = set(stdtests) | nottests
for name in names:
modname, ext = os.path.splitext(name)
if modname[:5] == "test_" and ext == ".py" and modname not in others:
tests.append(modname)
return stdtests + sorted(tests)
def runtest(test, verbose, quiet,
huntrleaks=False, use_resources=None):
"""Run a single test.
test -- the name of the test
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
test_times -- a list of (time, test_name) pairs
huntrleaks -- run multiple times to test for leaks; requires a debug
build; a triple corresponding to -R's three arguments
Returns one of the test result constants:
INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
"""
test_support.verbose = verbose # Tell tests to be moderately quiet
if use_resources is not None:
test_support.use_resources = use_resources
try:
return runtest_inner(test, verbose, quiet, huntrleaks)
finally:
cleanup_test_droppings(test, verbose)
# Unit tests are supposed to leave the execution environment unchanged
# once they complete. But sometimes tests have bugs, especially when
# tests fail, and the changes to environment go on to mess up other
# tests. This can cause issues with buildbot stability, since tests
# are run in random order and so problems may appear to come and go.
# There are a few things we can save and restore to mitigate this, and
# the following context manager handles this task.
class saved_test_environment:
"""Save bits of the test environment and restore them at block exit.
with saved_test_environment(testname, verbose, quiet):
#stuff
Unless quiet is True, a warning is printed to stderr if any of
the saved items was changed by the test. The attribute 'changed'
is initially False, but is set to True if a change is detected.
If verbose is more than 1, the before and after state of changed
items is also printed.
"""
changed = False
def __init__(self, testname, verbose=0, quiet=False):
self.testname = testname
self.verbose = verbose
self.quiet = quiet
# To add things to save and restore, add a name XXX to the resources list
# and add corresponding get_XXX/restore_XXX functions. get_XXX should
# return the value to be saved and compared against a second call to the
# get function when test execution completes. restore_XXX should accept
# the saved value and restore the resource using it. It will be called if
# and only if a change in the value is detected.
#
# Note: XXX will have any '.' replaced with '_' characters when determining
# the corresponding method names.
resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
'os.environ', 'sys.path', 'asyncore.socket_map',
'files',
)
def get_sys_argv(self):
return id(sys.argv), sys.argv, sys.argv[:]
def restore_sys_argv(self, saved_argv):
sys.argv = saved_argv[1]
sys.argv[:] = saved_argv[2]
def get_cwd(self):
return os.getcwd()
def restore_cwd(self, saved_cwd):
os.chdir(saved_cwd)
def get_sys_stdout(self):
return sys.stdout
def restore_sys_stdout(self, saved_stdout):
sys.stdout = saved_stdout
def get_sys_stderr(self):
return sys.stderr
def restore_sys_stderr(self, saved_stderr):
sys.stderr = saved_stderr
def get_sys_stdin(self):
return sys.stdin
def restore_sys_stdin(self, saved_stdin):
sys.stdin = saved_stdin
def get_os_environ(self):
return id(os.environ), os.environ, dict(os.environ)
def restore_os_environ(self, saved_environ):
os.environ = saved_environ[1]
os.environ.clear()
os.environ.update(saved_environ[2])
def get_sys_path(self):
return id(sys.path), sys.path, sys.path[:]
def restore_sys_path(self, saved_path):
sys.path = saved_path[1]
sys.path[:] = saved_path[2]
def get_asyncore_socket_map(self):
asyncore = sys.modules.get('asyncore')
# XXX Making a copy keeps objects alive until __exit__ gets called.
return asyncore and asyncore.socket_map.copy() or {}
def restore_asyncore_socket_map(self, saved_map):
asyncore = sys.modules.get('asyncore')
if asyncore is not None:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
def get_test_support_TESTFN(self):
if os.path.isfile(test_support.TESTFN):
result = 'f'
elif os.path.isdir(test_support.TESTFN):
result = 'd'
else:
result = None
return result
def restore_test_support_TESTFN(self, saved_value):
if saved_value is None:
if os.path.isfile(test_support.TESTFN):
os.unlink(test_support.TESTFN)
elif os.path.isdir(test_support.TESTFN):
shutil.rmtree(test_support.TESTFN)
def get_files(self):
return sorted(fn + ('/' if os.path.isdir(fn) else '')
for fn in os.listdir(os.curdir))
def restore_files(self, saved_value):
fn = test_support.TESTFN
if fn not in saved_value and (fn + '/') not in saved_value:
if os.path.isfile(fn):
test_support.unlink(fn)
elif os.path.isdir(fn):
test_support.rmtree(fn)
def resource_info(self):
for name in self.resources:
method_suffix = name.replace('.', '_')
get_name = 'get_' + method_suffix
restore_name = 'restore_' + method_suffix
yield name, getattr(self, get_name), getattr(self, restore_name)
def __enter__(self):
self.saved_values = dict((name, get()) for name, get, restore
in self.resource_info())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
del self.saved_values
for name, get, restore in self.resource_info():
current = get()
original = saved_values.pop(name)
# Check for changes to the resource's value
if current != original:
self.changed = True
restore(original)
if not self.quiet:
print >>sys.stderr, (
"Warning -- {} was modified by {}".format(
name, self.testname))
if self.verbose > 1:
print >>sys.stderr, (
" Before: {}\n After: {} ".format(
original, current))
# XXX (ncoghlan): for most resources (e.g. sys.path) identity
# matters at least as much as value. For others (e.g. cwd),
# identity is irrelevant. Should we add a mechanism to check
# for substitution in the cases where it matters?
return False
def runtest_inner(test, verbose, quiet, huntrleaks=False):
test_support.unload(test)
if verbose:
capture_stdout = None
else:
capture_stdout = StringIO.StringIO()
test_time = 0.0
refleak = False # True if the test leaked references.
try:
save_stdout = sys.stdout
try:
if capture_stdout:
sys.stdout = capture_stdout
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
with saved_test_environment(test, verbose, quiet) as environment:
start_time = time.time()
the_package = __import__(abstest, globals(), locals(), [])
the_module = getattr(the_package, test)
# Old tests run to completion simply as a side-effect of
# being imported. For tests based on unittest or doctest,
# explicitly invoke their test_main() function (if it exists).
indirect_test = getattr(the_module, "test_main", None)
if indirect_test is not None:
indirect_test()
if huntrleaks:
refleak = dash_R(the_module, test, indirect_test,
huntrleaks)
test_time = time.time() - start_time
finally:
sys.stdout = save_stdout
except test_support.ResourceDenied, msg:
if not quiet:
print test, "skipped --", msg
sys.stdout.flush()
return RESOURCE_DENIED, test_time
except unittest.SkipTest, msg:
if not quiet:
print test, "skipped --", msg
sys.stdout.flush()
return SKIPPED, test_time
except KeyboardInterrupt:
raise
except test_support.TestFailed, msg:
print >>sys.stderr, "test", test, "failed --", msg
sys.stderr.flush()
return FAILED, test_time
except:
type, value = sys.exc_info()[:2]
print >>sys.stderr, "test", test, "crashed --", str(type) + ":", value
sys.stderr.flush()
if verbose:
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
else:
if refleak:
return FAILED, test_time
if environment.changed:
return ENV_CHANGED, test_time
# Except in verbose mode, tests should not print anything
if verbose or huntrleaks:
return PASSED, test_time
output = capture_stdout.getvalue()
if not output:
return PASSED, test_time
print "test", test, "produced unexpected output:"
print "*" * 70
print output
print "*" * 70
sys.stdout.flush()
return FAILED, test_time
def cleanup_test_droppings(testname, verbose):
import stat
import gc
# First kill any dangling references to open files etc.
gc.collect()
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (test_support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print "%r left behind %s %r" % (testname, kind, name)
try:
# if we have chmod, fix possible permissions problems
# that might prevent cleanup
if (hasattr(os, 'chmod')):
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception, msg:
print >> sys.stderr, ("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg))
def dash_R(the_module, test, indirect_test, huntrleaks):
"""Run a test multiple times, looking for reference leaks.
Returns:
False if the test didn't leak references; True if we detected refleaks.
"""
# This code is hackish and inelegant, but it seems to do the job.
import copy_reg, _abcoll, _pyio
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copy_reg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
try:
import zipimport
except ImportError:
zdc = None # Run unmodified on platforms without zipimport support
else:
zdc = zipimport._zip_directory_cache.copy()
abcs = {}
modules = _abcoll, _pyio
for abc in [getattr(mod, a) for mod in modules for a in mod.__all__]:
# XXX isinstance(abc, ABCMeta) leads to infinite recursion
if not hasattr(abc, '_abc_registry'):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
if indirect_test:
def run_the_test():
indirect_test()
else:
def run_the_test():
imp.reload(the_module)
deltas = []
nwarmup, ntracked, fname = huntrleaks
fname = os.path.join(test_support.SAVEDCWD, fname)
repcount = nwarmup + ntracked
print >> sys.stderr, "beginning", repcount, "repetitions"
print >> sys.stderr, ("1234567890"*(repcount//10 + 1))[:repcount]
dash_R_cleanup(fs, ps, pic, zdc, abcs)
for i in range(repcount):
rc_before = sys.gettotalrefcount()
run_the_test()
sys.stderr.write('.')
dash_R_cleanup(fs, ps, pic, zdc, abcs)
rc_after = sys.gettotalrefcount()
if i >= nwarmup:
deltas.append(rc_after - rc_before)
print >> sys.stderr
if any(deltas):
msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas))
print >> sys.stderr, msg
with open(fname, "a") as refrep:
print >> refrep, msg
refrep.flush()
return True
return False
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
import gc, copy_reg
import _strptime, linecache
dircache = test_support.import_module('dircache', deprecated=True)
import urlparse, urllib, urllib2, mimetypes, doctest
import struct, filecmp
from distutils.dir_util import _path_created
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Restore some original values.
warnings.filters[:] = fs
copy_reg.dispatch_table.clear()
copy_reg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
try:
import zipimport
except ImportError:
pass # Run unmodified on platforms without zipimport support
else:
zipimport._zip_directory_cache.clear()
zipimport._zip_directory_cache.update(zdc)
# clear type cache
sys._clear_type_cache()
# Clear ABC registries, restoring previously saved ABC registries.
for abc, registry in abcs.items():
abc._abc_registry = registry.copy()
abc._abc_cache.clear()
abc._abc_negative_cache.clear()
# Clear assorted module caches.
_path_created.clear()
re.purge()
_strptime._regex_cache.clear()
urlparse.clear_cache()
urllib.urlcleanup()
urllib2.install_opener(None)
dircache.reset()
linecache.clearcache()
mimetypes._default_mime_types()
filecmp._cache.clear()
struct._clearcache()
doctest.master = None
try:
import ctypes
except ImportError:
# Don't worry about resetting the cache if ctypes is not supported
pass
else:
ctypes._reset_cache()
# Collect cyclic trash.
gc.collect()
def findtestdir(path=None):
return path or os.path.dirname(__file__) or os.curdir
def removepy(names):
if not names:
return
for idx, name in enumerate(names):
basename, ext = os.path.splitext(name)
if ext == '.py':
names[idx] = basename
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
print fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks)
# Map sys.platform to a string containing the basenames of tests
# expected to be skipped on that platform.
#
# Special cases:
# test_pep277
# The _ExpectedSkips constructor adds this to the set of expected
# skips if not os.path.supports_unicode_filenames.
# test_timeout
# Controlled by test_timeout.skip_expected. Requires the network
# resource and a socket module.
#
# Tests that are expected to be skipped everywhere except on one platform
# are also handled separately.
_expectations = {
'win32':
"""
test__locale
test_bsddb185
test_bsddb3
test_commands
test_crypt
test_curses
test_dbm
test_dl
test_fcntl
test_fork1
test_epoll
test_gdbm
test_grp
test_ioctl
test_largefile
test_kqueue
test_mhlib
test_openpty
test_ossaudiodev
test_pipes
test_poll
test_posix
test_pty
test_pwd
test_resource
test_signal
test_spwd
test_threadsignals
test_timing
test_wait3
test_wait4
""",
'linux2':
"""
test_bsddb185
test_curses
test_dl
test_largefile
test_kqueue
test_ossaudiodev
""",
'unixware7':
"""
test_bsddb
test_bsddb185
test_dl
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
""",
'openunix8':
"""
test_bsddb
test_bsddb185
test_dl
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
""",
'sco_sv3':
"""
test_asynchat
test_bsddb
test_bsddb185
test_dl
test_fork1
test_epoll
test_gettext
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_queue
test_sax
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
""",
'riscos':
"""
test_asynchat
test_atexit
test_bsddb
test_bsddb185
test_bsddb3
test_commands
test_crypt
test_dbm
test_dl
test_fcntl
test_fork1
test_epoll
test_gdbm
test_grp
test_largefile
test_locale
test_kqueue
test_mmap
test_openpty
test_poll
test_popen2
test_pty
test_pwd
test_strop
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
test_timing
""",
'darwin':
"""
test__locale
test_bsddb
test_bsddb3
test_curses
test_epoll
test_gdb
test_gdbm
test_largefile
test_locale
test_kqueue
test_minidom
test_ossaudiodev
test_poll
""",
'sunos5':
"""
test_bsddb
test_bsddb185
test_curses
test_dbm
test_epoll
test_kqueue
test_gdbm
test_gzip
test_openpty
test_zipfile
test_zlib
""",
'hp-ux11':
"""
test_bsddb
test_bsddb185
test_curses
test_dl
test_epoll
test_gdbm
test_gzip
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_zipfile
test_zlib
""",
'atheos':
"""
test_bsddb185
test_curses
test_dl
test_gdbm
test_epoll
test_largefile
test_locale
test_kqueue
test_mhlib
test_mmap
test_poll
test_popen2
test_resource
""",
'cygwin':
"""
test_bsddb185
test_bsddb3
test_curses
test_dbm
test_epoll
test_ioctl
test_kqueue
test_largefile
test_locale
test_ossaudiodev
test_socketserver
""",
'os2emx':
"""
test_audioop
test_bsddb185
test_bsddb3
test_commands
test_curses
test_dl
test_epoll
test_kqueue
test_largefile
test_mhlib
test_mmap
test_openpty
test_ossaudiodev
test_pty
test_resource
test_signal
""",
'freebsd4':
"""
test_bsddb
test_bsddb3
test_epoll
test_gdbm
test_locale
test_ossaudiodev
test_pep277
test_pty
test_socketserver
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_timeout
test_urllibnet
test_multiprocessing
""",
'aix5':
"""
test_bsddb
test_bsddb185
test_bsddb3
test_bz2
test_dl
test_epoll
test_gdbm
test_gzip
test_kqueue
test_ossaudiodev
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_zipimport
test_zlib
""",
'openbsd3':
"""
test_ascii_formatd
test_bsddb
test_bsddb3
test_ctypes
test_dl
test_epoll
test_gdbm
test_locale
test_normalization
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
""",
'netbsd3':
"""
test_ascii_formatd
test_bsddb
test_bsddb185
test_bsddb3
test_ctypes
test_curses
test_dl
test_epoll
test_gdbm
test_locale
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
""",
}
_expectations['freebsd5'] = _expectations['freebsd4']
_expectations['freebsd6'] = _expectations['freebsd4']
_expectations['freebsd7'] = _expectations['freebsd4']
_expectations['freebsd8'] = _expectations['freebsd4']
class _ExpectedSkips:
def __init__(self):
import os.path
from test import test_timeout
self.valid = False
if sys.platform in _expectations:
s = _expectations[sys.platform]
self.expected = set(s.split())
# expected to be skipped on every platform, even Linux
self.expected.add('test_linuxaudiodev')
if not os.path.supports_unicode_filenames:
self.expected.add('test_pep277')
if test_timeout.skip_expected:
self.expected.add('test_timeout')
if sys.maxint == 9223372036854775807L:
self.expected.add('test_imageop')
if sys.platform != "darwin":
MAC_ONLY = ["test_macos", "test_macostools", "test_aepack",
"test_plistlib", "test_scriptpackages",
"test_applesingle"]
for skip in MAC_ONLY:
self.expected.add(skip)
elif len(u'\0'.encode('unicode-internal')) == 4:
self.expected.add("test_macostools")
if sys.platform != "win32":
# test_sqlite is only reliable on Windows where the library
# is distributed with Python
WIN_ONLY = ["test_unicode_file", "test_winreg",
"test_winsound", "test_startfile",
"test_sqlite", "test_msilib"]
for skip in WIN_ONLY:
self.expected.add(skip)
if sys.platform != 'irix':
IRIX_ONLY = ["test_imageop", "test_al", "test_cd", "test_cl",
"test_gl", "test_imgfile"]
for skip in IRIX_ONLY:
self.expected.add(skip)
if sys.platform != 'sunos5':
self.expected.add('test_sunaudiodev')
self.expected.add('test_nis')
if not sys.py3kwarning:
self.expected.add('test_py3kwarn')
self.valid = True
def isvalid(self):
"Return true iff _ExpectedSkips knows about the current platform."
return self.valid
def getexpected(self):
"""Return set of test names we expect to skip on current platform.
self.isvalid() must be true.
"""
assert self.isvalid()
return self.expected
if __name__ == '__main__':
# findtestdir() gets the dirname out of __file__, so we have to make it
# absolute before changing the working directory.
# For example __file__ may be relative when running trace or profile.
# See issue #9323.
__file__ = os.path.abspath(__file__)
# sanity check
assert __file__ == os.path.abspath(sys.argv[0])
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. It eases the cleanup of leftover
# files using command "make distclean".
if sysconfig.is_python_build():
TEMPDIR = os.path.join(sysconfig.get_config_var('srcdir'), 'build')
TEMPDIR = os.path.abspath(TEMPDIR)
if not os.path.exists(TEMPDIR):
os.mkdir(TEMPDIR)
# Define a writable temp dir that will be used as cwd while running
# the tests. The name of the dir includes the pid to allow parallel
# testing (see the -j option).
TESTCWD = 'test_python_{}'.format(os.getpid())
TESTCWD = os.path.join(TEMPDIR, TESTCWD)
# Run the tests in a context manager that temporary changes the CWD to a
# temporary and writable directory. If it's not possible to create or
# change the CWD, the original CWD will be used. The original CWD is
# available from test_support.SAVEDCWD.
with test_support.temp_cwd(TESTCWD, quiet=True):
main()
|
master.py
|
"""
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
"""
import collections
import copy
import ctypes
import functools
import logging
import multiprocessing
import os
import re
import signal
import stat
import sys
import threading
import time
import salt.acl
import salt.auth
import salt.client
import salt.client.ssh.client
import salt.crypt
import salt.daemons.masterapi
import salt.defaults.exitcodes
import salt.engines
import salt.exceptions
import salt.ext.tornado.gen
import salt.key
import salt.log.setup
import salt.minion
import salt.payload
import salt.pillar
import salt.runner
import salt.serializers.msgpack
import salt.state
import salt.transport.server
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.crypt
import salt.utils.event
import salt.utils.files
import salt.utils.gitfs
import salt.utils.gzip_util
import salt.utils.jid
import salt.utils.job
import salt.utils.master
import salt.utils.minions
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.stringutils
import salt.utils.user
import salt.utils.verify
import salt.utils.zeromq
import salt.wheel
from salt.config import DEFAULT_INTERVAL
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.ext.tornado.stack_context import StackContext
from salt.transport import iter_transport_opts
from salt.utils.ctx import RequestContext
from salt.utils.debug import (
enable_sigusr1_handler,
enable_sigusr2_handler,
inspect_stack,
)
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.zeromq import ZMQ_VERSION_INFO, ZMQDefaultLoop, install_zmq, zmq
try:
import resource
HAS_RESOURCE = True
except ImportError:
# resource is not available on windows
HAS_RESOURCE = False
log = logging.getLogger(__name__)
class SMaster:
"""
Create a simple salt-master, this will generate the top-level master
"""
secrets = (
{}
) # mapping of key -> {'secret': multiprocessing type, 'reload': FUNCTION}
def __init__(self, opts):
"""
Create a salt master server instance
:param dict opts: The salt options dictionary
"""
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self.opts = state["opts"]
self.master_key = state["master_key"]
self.key = state["key"]
SMaster.secrets = state["secrets"]
def __getstate__(self):
return {
"opts": self.opts,
"master_key": self.master_key,
"key": self.key,
"secrets": SMaster.secrets,
}
def __prep_key(self):
"""
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
"""
return salt.daemons.masterapi.access_keys(self.opts)
class Maintenance(salt.utils.process.SignalHandlingProcess):
"""
A generalized maintenance process which performs maintenance routines.
"""
def __init__(self, opts, **kwargs):
"""
Create a maintenance instance
:param dict opts: The salt options
"""
super().__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts["loop_interval"])
# Track key rotation intervals
self.rotate = int(time.time())
# A serializer for general maint operations
self.serial = salt.payload.Serial(self.opts)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
state["opts"],
log_queue=state["log_queue"],
log_queue_level=state["log_queue_level"],
)
def __getstate__(self):
return {
"opts": self.opts,
"log_queue": self.log_queue,
"log_queue_level": self.log_queue_level,
}
def _post_fork_init(self):
"""
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
"""
# Load Runners
ropts = dict(self.opts)
ropts["quiet"] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(
self.opts, runner_client.functions_dict(), returners=self.returners
)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(
self.opts, self.opts["sock_dir"], listen=False
)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
if self.opts["maintenance_niceness"] and not salt.utils.platform.is_windows():
log.info(
"setting Maintenance niceness to %d", self.opts["maintenance_niceness"]
)
os.nice(self.opts["maintenance_niceness"])
self.presence_events = False
if self.opts.get("presence_events", False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != "tcp":
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
# handled in the transport code.
self.presence_events = True
def run(self):
"""
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
"""
salt.utils.process.appendproctitle(self.__class__.__name__)
# init things that need to be done after the process is forked
self._post_fork_init()
# Make Start Times
last = int(time.time())
# update git_pillar on first loop
last_git_pillar_update = 0
git_pillar_update_interval = self.opts.get("git_pillar_update_interval", 0)
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
salt.daemons.masterapi.clean_pub_auth(self.opts)
if (now - last_git_pillar_update) >= git_pillar_update_interval:
last_git_pillar_update = now
self.handle_git_pillar()
self.handle_schedule()
self.handle_key_cache()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.utils.verify.check_max_open_files(self.opts)
last = now
time.sleep(self.loop_interval)
def handle_key_cache(self):
"""
Evaluate accepted keys and create a msgpack file
which contains a list
"""
if self.opts["key_cache"] == "sched":
keys = []
# TODO DRY from CKMinions
if self.opts["transport"] in ("zeromq", "tcp"):
acc = "minions"
else:
acc = "accepted"
for fn_ in os.listdir(os.path.join(self.opts["pki_dir"], acc)):
if not fn_.startswith(".") and os.path.isfile(
os.path.join(self.opts["pki_dir"], acc, fn_)
):
keys.append(fn_)
log.debug("Writing master key cache")
# Write a temporary file securely
with salt.utils.atomicfile.atomic_open(
os.path.join(self.opts["pki_dir"], acc, ".key_cache"), mode="wb"
) as cache_file:
self.serial.dump(keys, cache_file)
def handle_key_rotate(self, now):
"""
Rotate the AES key rotation
"""
to_rotate = False
dfn = os.path.join(self.opts["cachedir"], ".dfn")
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
# user/group/all. Check for read-only state instead.
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
to_rotate = True
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
elif stats.st_mode == 0o100400:
to_rotate = True
else:
log.error("Found dropfile with incorrect permissions, ignoring...")
os.remove(dfn)
except os.error:
pass
if self.opts.get("publish_session"):
if now - self.rotate >= self.opts["publish_session"]:
to_rotate = True
if to_rotate:
log.info("Rotating master AES key")
for secret_key, secret_map in SMaster.secrets.items():
# should be unnecessary-- since no one else should be modifying
with secret_map["secret"].get_lock():
secret_map["secret"].value = salt.utils.stringutils.to_bytes(
secret_map["reload"]()
)
self.event.fire_event(
{"rotate_{}_key".format(secret_key): True}, tag="key"
)
self.rotate = now
if self.opts.get("ping_on_rotate"):
# Ping all minions to get them to pick up the new key
log.debug("Pinging all connected minions " "due to key rotation")
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_git_pillar(self):
"""
Update git pillar
"""
try:
for pillar in self.git_pillar:
pillar.fetch_remotes()
except Exception as exc: # pylint: disable=broad-except
log.error("Exception caught while updating git_pillar", exc_info=True)
def handle_schedule(self):
"""
Evaluate the scheduler
"""
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc: # pylint: disable=broad-except
log.error("Exception %s occurred in scheduled job", exc)
self.schedule.cleanup_subprocesses()
def handle_presence(self, old_present):
"""
Fire presence events if enabled
"""
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
if self.presence_events and self.event.connect_pull(timeout=3):
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {"new": list(new), "lost": list(lost)}
self.event.fire_event(data, tagify("change", "presence"))
data = {"present": list(present)}
self.event.fire_event(data, tagify("present", "presence"))
old_present.clear()
old_present.update(present)
class FileserverUpdate(salt.utils.process.SignalHandlingProcess):
"""
A process from which to update any dynamic fileserver backends
"""
def __init__(self, opts, **kwargs):
super().__init__(**kwargs)
self.opts = opts
self.update_threads = {}
# Avoid circular import
import salt.fileserver
self.fileserver = salt.fileserver.Fileserver(self.opts)
self.fill_buckets()
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
state["opts"], log_queue=state["log_queue"],
)
def __getstate__(self):
return {
"opts": self.opts,
"log_queue": self.log_queue,
}
def fill_buckets(self):
"""
Get the configured backends and the intervals for any backend which
supports them, and set up the update "buckets". There will be one
bucket for each thing being updated at a given interval.
"""
update_intervals = self.fileserver.update_intervals()
self.buckets = {}
for backend in self.fileserver.backends():
fstr = "{}.update".format(backend)
try:
update_func = self.fileserver.servers[fstr]
except KeyError:
log.debug("No update function for the %s filserver backend", backend)
continue
if backend in update_intervals:
# Variable intervals are supported for this backend
for id_, interval in update_intervals[backend].items():
if not interval:
# Don't allow an interval of 0
interval = DEFAULT_INTERVAL
log.debug(
"An update_interval of 0 is not supported, "
"falling back to %s",
interval,
)
i_ptr = self.buckets.setdefault(interval, OrderedDict())
# Backend doesn't technically need to be present in the
# key, all we *really* need is the function reference, but
# having it there makes it easier to provide meaningful
# debug logging in the update threads.
i_ptr.setdefault((backend, update_func), []).append(id_)
else:
# Variable intervals are not supported for this backend, so
# fall back to the global interval for that fileserver. Since
# this backend doesn't support variable updates, we have
# nothing to pass to the backend's update func, so we'll just
# set the value to None.
try:
interval_key = "{}_update_interval".format(backend)
interval = self.opts[interval_key]
except KeyError:
interval = DEFAULT_INTERVAL
log.warning(
"%s key missing from configuration. Falling back to "
"default interval of %d seconds",
interval_key,
interval,
)
self.buckets.setdefault(interval, OrderedDict())[
(backend, update_func)
] = None
def update_fileserver(self, interval, backends):
"""
Threading target which handles all updates for a given wait interval
"""
def _do_update():
log.debug(
"Performing fileserver updates for items with an update "
"interval of %d",
interval,
)
for backend, update_args in backends.items():
backend_name, update_func = backend
try:
if update_args:
log.debug(
"Updating %s fileserver cache for the following "
"targets: %s",
backend_name,
update_args,
)
args = (update_args,)
else:
log.debug("Updating %s fileserver cache", backend_name)
args = ()
update_func(*args)
except Exception as exc: # pylint: disable=broad-except
log.exception(
"Uncaught exception while updating %s fileserver " "cache",
backend_name,
)
log.debug(
"Completed fileserver updates for items with an update "
"interval of %d, waiting %d seconds",
interval,
interval,
)
condition = threading.Condition()
_do_update()
while True:
with condition:
condition.wait(interval)
_do_update()
def run(self):
"""
Start the update threads
"""
salt.utils.process.appendproctitle(self.__class__.__name__)
if (
self.opts["fileserver_update_niceness"]
and not salt.utils.platform.is_windows()
):
log.info(
"setting FileServerUpdate niceness to %d",
self.opts["fileserver_update_niceness"],
)
os.nice(self.opts["fileserver_update_niceness"])
# Clean out the fileserver backend cache
salt.daemons.masterapi.clean_fsbackend(self.opts)
for interval in self.buckets:
self.update_threads[interval] = threading.Thread(
target=self.update_fileserver, args=(interval, self.buckets[interval]),
)
self.update_threads[interval].start()
# Keep the process alive
while True:
time.sleep(60)
class Master(SMaster):
"""
The salt master server
"""
def __init__(self, opts):
"""
Create a salt master server instance
:param dict: The salt options
"""
if zmq and ZMQ_VERSION_INFO < (3, 2):
log.warning(
"You have a version of ZMQ less than ZMQ 3.2! There are "
"known connection keep-alive issues with ZMQ < 3.2 which "
"may result in loss of contact with minions. Please "
"upgrade your ZMQ!"
)
SMaster.__init__(self, opts)
def __set_max_open_files(self):
if not HAS_RESOURCE:
return
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
if mof_h == resource.RLIM_INFINITY:
# Unclear what to do with infinity... macOS reports RLIM_INFINITY as
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
"Current values for max open files soft/hard setting: %s/%s", mof_s, mof_h
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts["max_open_files"]
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
"The value for the 'max_open_files' setting, %s, is higher "
"than the highest value the user running salt is allowed to "
"set (%s). Defaulting to %s.",
mof_c,
mof_h,
mof_h,
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.info("Raising max open files value to %s", mof_c)
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
"New values for max open files soft/hard values: %s/%s",
mof_s,
mof_h,
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under macOS reported that our 100000 default value is
# still too high.
log.critical(
"Failed to raise max open files setting to %s. If this "
"value is too low, the salt-master will most likely fail "
"to run properly.",
mof_c,
)
def _pre_flight(self):
"""
Run pre flight checks. If anything in this method fails then the master
should not start up.
"""
errors = []
critical_errors = []
try:
os.chdir("/")
except OSError as err:
errors.append("Cannot change to root directory ({})".format(err))
if self.opts.get("fileserver_verify_config", True):
# Avoid circular import
import salt.fileserver
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
"Failed to load fileserver backends, the configured backends "
"are: {}".format(", ".join(self.opts["fileserver_backend"]))
)
else:
# Run init() for all backends which support the function, to
# double-check configuration
try:
fileserver.init()
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append("{}".format(exc))
if not self.opts["fileserver_backend"]:
errors.append("No fileserver backends are configured")
# Check to see if we need to create a pillar cache dir
if self.opts["pillar_cache"] and not os.path.isdir(
os.path.join(self.opts["cachedir"], "pillar_cache")
):
try:
with salt.utils.files.set_umask(0o077):
os.mkdir(os.path.join(self.opts["cachedir"], "pillar_cache"))
except OSError:
pass
if self.opts.get("git_pillar_verify_config", True):
try:
git_pillars = [
x
for x in self.opts.get("ext_pillar", [])
if "git" in x and not isinstance(x["git"], str)
]
except TypeError:
git_pillars = []
critical_errors.append(
"Invalid ext_pillar configuration. It is likely that the "
"external pillar type was not specified for one or more "
"external pillars."
)
if git_pillars:
try:
new_opts = copy.deepcopy(self.opts)
import salt.pillar.git_pillar
for repo in git_pillars:
new_opts["ext_pillar"] = [repo]
try:
git_pillar = salt.utils.gitfs.GitPillar(
new_opts,
repo["git"],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY,
)
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
del new_opts
if errors or critical_errors:
for error in errors:
log.error(error)
for error in critical_errors:
log.critical(error)
log.critical("Master failed pre flight checks, exiting\n")
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def start(self):
"""
Turn on the master server components
"""
self._pre_flight()
log.info("salt-master is starting as user '%s'", salt.utils.user.get_user())
enable_sigusr1_handler()
enable_sigusr2_handler()
self.__set_max_open_files()
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
# Setup the secrets here because the PubServerChannel may need
# them as well.
SMaster.secrets["aes"] = {
"secret": multiprocessing.Array(
ctypes.c_char,
salt.utils.stringutils.to_bytes(
salt.crypt.Crypticle.generate_key_string()
),
),
"reload": salt.crypt.Crypticle.generate_key_string,
}
log.info("Creating master process manager")
# Since there are children having their own ProcessManager we should wait for kill more time.
self.process_manager = salt.utils.process.ProcessManager(wait_for_kill=5)
pub_channels = []
log.info("Creating master publisher process")
log_queue = salt.log.setup.get_multiprocessing_logging_queue()
for _, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.pre_fork(self.process_manager, kwargs={"log_queue": log_queue})
pub_channels.append(chan)
log.info("Creating master event publisher process")
self.process_manager.add_process(
salt.utils.event.EventPublisher, args=(self.opts,)
)
if self.opts.get("reactor"):
if isinstance(self.opts["engines"], list):
rine = False
for item in self.opts["engines"]:
if "reactor" in item:
rine = True
break
if not rine:
self.opts["engines"].append({"reactor": {}})
else:
if "reactor" not in self.opts["engines"]:
log.info("Enabling the reactor engine")
self.opts["engines"]["reactor"] = {}
salt.engines.start_engines(self.opts, self.process_manager)
# must be after channels
log.info("Creating master maintenance process")
self.process_manager.add_process(Maintenance, args=(self.opts,))
if self.opts.get("event_return"):
log.info("Creating master event return process")
self.process_manager.add_process(
salt.utils.event.EventReturn, args=(self.opts,)
)
ext_procs = self.opts.get("ext_processes", [])
for proc in ext_procs:
log.info("Creating ext_processes process: %s", proc)
try:
mod = ".".join(proc.split(".")[:-1])
cls = proc.split(".")[-1]
_tmp = __import__(mod, globals(), locals(), [cls], -1)
cls = _tmp.__getattribute__(cls)
self.process_manager.add_process(cls, args=(self.opts,))
except Exception: # pylint: disable=broad-except
log.error("Error creating ext_processes process: %s", proc)
# TODO: remove, or at least push into the transport stuff (pre-fork probably makes sense there)
if self.opts["con_cache"]:
log.info("Creating master concache process")
self.process_manager.add_process(
salt.utils.master.ConnectedCache, args=(self.opts,)
)
# workaround for issue #16315, race condition
log.debug("Sleeping for two seconds to let concache rest")
time.sleep(2)
log.info("Creating master request server process")
kwargs = {}
if salt.utils.platform.is_windows():
kwargs["log_queue"] = log_queue
kwargs[
"log_queue_level"
] = salt.log.setup.get_multiprocessing_logging_level()
kwargs["secrets"] = SMaster.secrets
self.process_manager.add_process(
ReqServer,
args=(self.opts, self.key, self.master_key),
kwargs=kwargs,
name="ReqServer",
)
self.process_manager.add_process(FileserverUpdate, args=(self.opts,))
# Fire up SSDP discovery publisher
if self.opts["discovery"]:
if salt.utils.ssdp.SSDPDiscoveryServer.is_available():
self.process_manager.add_process(
salt.utils.ssdp.SSDPDiscoveryServer(
port=self.opts["discovery"]["port"],
listen_ip=self.opts["interface"],
answer={
"mapping": self.opts["discovery"].get("mapping", {})
},
).run
)
else:
log.error("Unable to load SSDP: asynchronous IO is not available.")
if sys.version_info.major == 2:
log.error(
'You are using Python 2, please install "trollius" module to enable SSDP discovery.'
)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
self.process_manager.run()
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
class ReqServer(salt.utils.process.SignalHandlingProcess):
"""
Starts up the master request server, minions send results to this
interface.
"""
def __init__(self, opts, key, mkey, secrets=None, **kwargs):
"""
Create a request server
:param dict opts: The salt options dictionary
:key dict: The user starting the server and the AES key
:mkey dict: The user starting the server and the RSA key
:rtype: ReqServer
:returns: Request server
"""
super().__init__(**kwargs)
self.opts = opts
self.master_key = mkey
# Prepare the AES key
self.key = key
self.secrets = secrets
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
state["opts"],
state["key"],
state["mkey"],
secrets=state["secrets"],
log_queue=state["log_queue"],
log_queue_level=state["log_queue_level"],
)
def __getstate__(self):
return {
"opts": self.opts,
"key": self.key,
"mkey": self.master_key,
"secrets": self.secrets,
"log_queue": self.log_queue,
"log_queue_level": self.log_queue_level,
}
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self.destroy(signum)
super()._handle_signals(signum, sigframe)
def __bind(self):
"""
Binds the reply server
"""
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
if self.log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
if self.secrets is not None:
SMaster.secrets = self.secrets
dfn = os.path.join(self.opts["cachedir"], ".dfn")
if os.path.isfile(dfn):
try:
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
os.remove(dfn)
except os.error:
pass
# Wait for kill should be less then parent's ProcessManager.
self.process_manager = salt.utils.process.ProcessManager(
name="ReqServer_ProcessManager", wait_for_kill=1
)
req_channels = []
tcp_only = True
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.ReqServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
req_channels.append(chan)
if transport != "tcp":
tcp_only = False
kwargs = {}
if salt.utils.platform.is_windows():
kwargs["log_queue"] = self.log_queue
kwargs["log_queue_level"] = self.log_queue_level
if self.opts["req_server_niceness"] and not salt.utils.platform.is_windows():
log.info(
"setting ReqServer_ProcessManager niceness to %d",
self.opts["req_server_niceness"],
)
os.nice(self.opts["req_server_niceness"])
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
for ind in range(int(self.opts["worker_threads"])):
name = "MWorker-{}".format(ind)
self.process_manager.add_process(
MWorker,
args=(self.opts, self.master_key, self.key, req_channels, name),
kwargs=kwargs,
name=name,
)
self.process_manager.run()
def run(self):
"""
Start up the ReqServer
"""
self.__bind()
def destroy(self, signum=signal.SIGTERM):
if hasattr(self, "process_manager"):
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
self.process_manager.kill_children()
# pylint: disable=W1701
def __del__(self):
self.destroy()
# pylint: enable=W1701
class MWorker(salt.utils.process.SignalHandlingProcess):
"""
The worker multiprocess instance to manage the backend operations for the
salt master.
"""
def __init__(self, opts, mkey, key, req_channels, name, **kwargs):
"""
Create a salt master worker process
:param dict opts: The salt options
:param dict mkey: The user running the salt master and the AES key
:param dict key: The user running the salt master and the RSA key
:rtype: MWorker
:return: Master worker
"""
kwargs["name"] = name
self.name = name
super().__init__(**kwargs)
self.opts = opts
self.req_channels = req_channels
self.mkey = mkey
self.key = key
self.k_mtime = 0
self.stats = collections.defaultdict(lambda: {"mean": 0, "runs": 0})
self.stat_clock = time.time()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
super().__init__(
log_queue=state["log_queue"], log_queue_level=state["log_queue_level"]
)
self.opts = state["opts"]
self.req_channels = state["req_channels"]
self.mkey = state["mkey"]
self.key = state["key"]
self.k_mtime = state["k_mtime"]
SMaster.secrets = state["secrets"]
def __getstate__(self):
return {
"opts": self.opts,
"req_channels": self.req_channels,
"mkey": self.mkey,
"key": self.key,
"k_mtime": self.k_mtime,
"secrets": SMaster.secrets,
"log_queue": self.log_queue,
"log_queue_level": self.log_queue_level,
}
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, "req_channels", ()):
channel.close()
self.clear_funcs.destroy()
super()._handle_signals(signum, sigframe)
def __bind(self):
"""
Bind to the local port
"""
# using ZMQIOLoop since we *might* need zmq in there
install_zmq()
self.io_loop = ZMQDefaultLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(
self._handle_payload, io_loop=self.io_loop
) # TODO: cleaner? Maybe lazily?
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
# Tornado knows what to do
pass
@salt.ext.tornado.gen.coroutine
def _handle_payload(self, payload):
"""
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
"""
key = payload["enc"]
load = payload["load"]
ret = {"aes": self._handle_aes, "clear": self._handle_clear}[key](load)
raise salt.ext.tornado.gen.Return(ret)
def _post_stats(self, start, cmd):
"""
Calculate the master stats and fire events with stat info
"""
end = time.time()
duration = end - start
self.stats[cmd]["mean"] = (
self.stats[cmd]["mean"] * (self.stats[cmd]["runs"] - 1) + duration
) / self.stats[cmd]["runs"]
if end - self.stat_clock > self.opts["master_stats_event_iter"]:
# Fire the event with the stats and wipe the tracker
self.aes_funcs.event.fire_event(
{
"time": end - self.stat_clock,
"worker": self.name,
"stats": self.stats,
},
tagify(self.name, "stats"),
)
self.stats = collections.defaultdict(lambda: {"mean": 0, "runs": 0})
self.stat_clock = end
def _handle_clear(self, load):
"""
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
"""
log.trace("Clear payload received with command %s", load["cmd"])
cmd = load["cmd"]
method = self.clear_funcs.get_method(cmd)
if not method:
return {}, {"fun": "send_clear"}
if self.opts["master_stats"]:
start = time.time()
self.stats[cmd]["runs"] += 1
ret = method(load), {"fun": "send_clear"}
if self.opts["master_stats"]:
self._post_stats(start, cmd)
return ret
def _handle_aes(self, data):
"""
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
"""
if "cmd" not in data:
log.error("Received malformed command %s", data)
return {}
cmd = data["cmd"]
log.trace("AES payload received with command %s", data["cmd"])
method = self.aes_funcs.get_method(cmd)
if not method:
return {}, {"fun": "send"}
if self.opts["master_stats"]:
start = time.time()
self.stats[cmd]["runs"] += 1
def run_func(data):
return self.aes_funcs.run_func(data["cmd"], data)
with StackContext(
functools.partial(RequestContext, {"data": data, "opts": self.opts})
):
ret = run_func(data)
if self.opts["master_stats"]:
self._post_stats(start, cmd)
return ret
def run(self):
"""
Start a Master Worker
"""
salt.utils.process.appendproctitle(self.name)
# if we inherit req_server level without our own, reset it
if not salt.utils.platform.is_windows():
enforce_mworker_niceness = True
if self.opts["req_server_niceness"]:
if salt.utils.user.get_user() == "root":
log.info(
"%s decrementing inherited ReqServer niceness to 0", self.name
)
log.info(os.nice())
os.nice(-1 * self.opts["req_server_niceness"])
else:
log.error(
"%s unable to decrement niceness for MWorker, not running as root",
self.name,
)
enforce_mworker_niceness = False
# else set what we're explicitly asked for
if enforce_mworker_niceness and self.opts["mworker_niceness"]:
log.info(
"setting %s niceness to %i",
self.name,
self.opts["mworker_niceness"],
)
os.nice(self.opts["mworker_niceness"])
self.clear_funcs = ClearFuncs(self.opts, self.key,)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
class TransportMethods:
"""
Expose methods to the transport layer, methods with their names found in
the class attribute 'expose_methods' will be exposed to the transport layer
via 'get_method'.
"""
expose_methods = ()
def get_method(self, name):
"""
Get a method which should be exposed to the transport layer
"""
if name in self.expose_methods:
try:
return getattr(self, name)
except AttributeError:
log.error("Requested method not exposed: %s", name)
else:
log.error("Requested method not exposed: %s", name)
# TODO: rename? No longer tied to "AES", just "encrypted" or "private" requests
class AESFuncs(TransportMethods):
"""
Set up functions that are available when the load is encrypted with AES
"""
expose_methods = (
"verify_minion",
"_master_tops",
"_master_opts",
"_mine_get",
"_mine",
"_mine_delete",
"_mine_flush",
"_file_recv",
"_pillar",
"_minion_event",
"_handle_minion_event",
"_return",
"_syndic_return",
"minion_runner",
"pub_ret",
"minion_pub",
"minion_publish",
"revoke_auth",
"_serve_file",
"_file_find",
"_file_hash",
"_file_hash_and_stat",
"_file_list",
"_file_list_emptydirs",
"_dir_list",
"_symlink_list",
"_file_envs",
)
def __init__(self, opts):
"""
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
"""
self.opts = opts
self.event = salt.utils.event.get_master_event(
self.opts, self.opts["sock_dir"], listen=False
)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts["conf_file"])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts, states=False, rend=False, ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
"""
Set the local file objects from the file server interface
"""
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
"""
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
"""
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts["pki_dir"], "minions", id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except OSError:
log.warning(
"Salt minion claiming to be %s attempted to communicate with "
"master, but key could not be read and verification was denied.",
id_,
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b"salt":
return True
except ValueError as err:
log.error("Unable to decrypt token: %s", err)
log.error(
"Salt minion claiming to be %s has attempted to communicate with "
"the master and could not be verified",
id_,
)
return False
def verify_minion(self, id_, token):
"""
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
"""
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
"""
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
"""
# Verify that the load is valid
if "peer" not in self.opts:
return False
if not isinstance(self.opts["peer"], dict):
return False
if any(
key not in clear_load for key in ("fun", "arg", "tgt", "ret", "tok", "id")
):
return False
# If the command will make a recursive publish don't run
if clear_load["fun"].startswith("publish."):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load["id"], clear_load["tok"]):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
"Minion id %s is not who it says it is and is attempting "
"to issue a peer command",
clear_load["id"],
)
return False
clear_load.pop("tok")
perms = []
for match in self.opts["peer"]:
if re.match(match, clear_load["id"]):
# This is the list of funcs/modules!
if isinstance(self.opts["peer"][match], list):
perms.extend(self.opts["peer"][match])
if "," in clear_load["fun"]:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load["fun"] = clear_load["fun"].split(",")
arg_ = []
for arg in clear_load["arg"]:
arg_.append(arg.split())
clear_load["arg"] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load["fun"],
clear_load["arg"],
clear_load["tgt"],
clear_load.get("tgt_type", "glob"),
publish_validate=True,
)
def __verify_load(self, load, verify_keys):
"""
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
"""
if any(key not in load for key in verify_keys):
return False
if "tok" not in load:
log.error(
"Received incomplete call from %s for '%s', missing '%s'",
load["id"],
inspect_stack()["co_name"],
"tok",
)
return False
if not self.__verify_minion(load["id"], load["tok"]):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning("Minion id %s is not who it says it is!", load["id"])
return False
if "tok" in load:
load.pop("tok")
return load
def _master_tops(self, load):
"""
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
"""
load = self.__verify_load(load, ("id", "tok"))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
def _master_opts(self, load):
"""
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
"""
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts["file_roots"] = file_roots
mopts["top_file_merging_strategy"] = self.opts["top_file_merging_strategy"]
mopts["env_order"] = self.opts["env_order"]
mopts["default_top"] = self.opts["default_top"]
if load.get("env_only"):
return mopts
mopts["renderer"] = self.opts["renderer"]
mopts["failhard"] = self.opts["failhard"]
mopts["state_top"] = self.opts["state_top"]
mopts["state_top_saltenv"] = self.opts["state_top_saltenv"]
mopts["nodegroups"] = self.opts["nodegroups"]
mopts["state_auto_order"] = self.opts["state_auto_order"]
mopts["state_events"] = self.opts["state_events"]
mopts["state_aggregate"] = self.opts["state_aggregate"]
mopts["jinja_env"] = self.opts["jinja_env"]
mopts["jinja_sls_env"] = self.opts["jinja_sls_env"]
mopts["jinja_lstrip_blocks"] = self.opts["jinja_lstrip_blocks"]
mopts["jinja_trim_blocks"] = self.opts["jinja_trim_blocks"]
return mopts
def _mine_get(self, load):
"""
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
"""
load = self.__verify_load(load, ("id", "tgt", "fun", "tok"))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
"""
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
"""
load = self.__verify_load(load, ("id", "data", "tok"))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
"""
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
"""
load = self.__verify_load(load, ("id", "fun", "tok"))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
"""
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
"""
load = self.__verify_load(load, ("id", "tok"))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
"""
Allows minions to send files to the master, files are sent to the
master file cache
"""
if any(key not in load for key in ("id", "path", "loc")):
return False
if not isinstance(load["path"], list):
return False
if not self.opts["file_recv"]:
return False
if not salt.utils.verify.valid_id(self.opts, load["id"]):
return False
file_recv_max_size = 1024 * 1024 * self.opts["file_recv_max_size"]
if "loc" in load and load["loc"] < 0:
log.error("Invalid file pointer: load[loc] < 0")
return False
if len(load["data"]) + load.get("loc", 0) > file_recv_max_size:
log.error(
"file_recv_max_size limit of %d MB exceeded! %s will be "
"truncated. To successfully push this file, adjust "
"file_recv_max_size to an integer (in MB) large enough to "
"accommodate it.",
file_recv_max_size,
load["path"],
)
return False
if "tok" not in load:
log.error(
"Received incomplete call from %s for '%s', missing '%s'",
load["id"],
inspect_stack()["co_name"],
"tok",
)
return False
if not self.__verify_minion(load["id"], load["tok"]):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning("Minion id %s is not who it says it is!", load["id"])
return {}
load.pop("tok")
# Join path
sep_path = os.sep.join(load["path"])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or "../" in load["path"]:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts["cachedir"], "minions", load["id"], "files", normpath
)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts["cachedir"]):
log.warning(
"Attempt to write received file outside of master cache "
"directory! Requested path: %s. Access denied.",
cpath,
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load["loc"] != 0:
mode = "ab"
else:
mode = "wb"
with salt.utils.files.fopen(cpath, mode) as fp_:
if load["loc"]:
fp_.seek(load["loc"])
fp_.write(salt.utils.stringutils.to_bytes(load["data"]))
return True
def _pillar(self, load):
"""
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
"""
if any(key not in load for key in ("id", "grains")):
return False
if not salt.utils.verify.valid_id(self.opts, load["id"]):
return False
load["grains"]["id"] = load["id"]
pillar = salt.pillar.get_pillar(
self.opts,
load["grains"],
load["id"],
load.get("saltenv", load.get("env")),
ext=load.get("ext"),
pillar_override=load.get("pillar_override", {}),
pillarenv=load.get("pillarenv"),
extra_minion_data=load.get("extra_minion_data"),
)
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get("minion_data_cache", False):
self.masterapi.cache.store(
"minions/{}".format(load["id"]),
"data",
{"grains": load["grains"], "pillar": data},
)
if self.opts.get("minion_data_cache_events") is True:
self.event.fire_event(
{"Minion data cache refresh": load["id"]},
tagify(load["id"], "refresh", "minion"),
)
return data
def _minion_event(self, load):
"""
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
"""
load = self.__verify_load(load, ("id", "tok"))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
"""
Act on specific events from minions
"""
id_ = load["id"]
if load.get("tag", "") == "_salt_error":
log.error(
"Received minion error from [%s]: %s", id_, load["data"]["message"]
)
for event in load.get("events", []):
event_data = event.get("data", {})
if "minions" in event_data:
jid = event_data.get("jid")
if not jid:
continue
minions = event_data["minions"]
try:
salt.utils.job.store_minions(
self.opts, jid, minions, mminion=self.mminion, syndic_id=id_
)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
"Could not add minion(s) %s for job %s: %s", minions, jid, exc
)
def _return(self, load):
"""
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
"""
if self.opts["require_minion_sign_messages"] and "sig" not in load:
log.critical(
"_return: Master is requiring minions to sign their "
"messages, but there is no signature in this payload from "
"%s.",
load["id"],
)
return False
if "sig" in load:
log.trace("Verifying signed event publish from minion")
sig = load.pop("sig")
this_minion_pubkey = os.path.join(
self.opts["pki_dir"], "minions/{}".format(load["id"])
)
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(
this_minion_pubkey, serialized_load, sig
):
log.info("Failed to verify event signature from minion %s.", load["id"])
if self.opts["drop_messages_signature_fail"]:
log.critical(
"drop_messages_signature_fail is enabled, dropping "
"message from %s",
load["id"],
)
return False
else:
log.info(
"But 'drop_message_signature_fail' is disabled, so message is still accepted."
)
load["sig"] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion
)
except salt.exceptions.SaltCacheError:
log.error("Could not store job information for load: %s", load)
def _syndic_return(self, load):
"""
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
"""
loads = load.get("load")
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ("return", "jid", "id")):
continue
# if we have a load, save it
if load.get("load"):
fstr = "{}.save_load".format(self.opts["master_job_cache"])
self.mminion.returners[fstr](load["jid"], load["load"])
# Register the syndic
syndic_cache_path = os.path.join(
self.opts["cachedir"], "syndics", load["id"]
)
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, "w") as wfh:
wfh.write("")
# Format individual return loads
for key, item in load["return"].items():
ret = {"jid": load["jid"], "id": key}
ret.update(item)
if "master_id" in load:
ret["master_id"] = load["master_id"]
if "fun" in load:
ret["fun"] = load["fun"]
if "arg" in load:
ret["fun_args"] = load["arg"]
if "out" in load:
ret["out"] = load["out"]
if "sig" in load:
ret["sig"] = load["sig"]
self._return(ret)
def minion_runner(self, clear_load):
"""
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
"""
load = self.__verify_load(clear_load, ("fun", "arg", "id", "tok"))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
"""
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
"""
load = self.__verify_load(load, ("jid", "id", "tok"))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(self.opts["cachedir"], "publish_auth")
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, str(load["jid"]))
with salt.utils.files.fopen(jid_fn, "r") as fp_:
if not load["id"] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load["jid"])
def minion_pub(self, clear_load):
"""
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
"""
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
"""
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
"""
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
"""
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
"""
load = self.__verify_load(load, ("id", "tok"))
if not self.opts.get("allow_minion_key_revoke", False):
log.warning(
"Minion %s requested key revoke, but allow_minion_key_revoke "
"is set to False",
load["id"],
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
"""
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
"""
# Don't honor private functions
if func.startswith("__"):
# TODO: return some error? Seems odd to return {}
return {}, {"fun": "send"}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
"Master function call %s took %s seconds", func, time.time() - start
)
except Exception: # pylint: disable=broad-except
ret = ""
log.error("Error in function %s:\n", func, exc_info=True)
else:
log.error(
"Received function %s which is unavailable on the master, "
"returning False",
func,
)
return False, {"fun": "send"}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == "_return":
return ret, {"fun": "send"}
if func == "_pillar" and "id" in load:
if load.get("ver") != "2" and self.opts["pillar_version"] == 1:
# Authorized to return old pillar proto
return ret, {"fun": "send"}
return ret, {"fun": "send_private", "key": "pillar", "tgt": load["id"]}
# Encrypt the return
return ret, {"fun": "send"}
def destroy(self):
self.masterapi.destroy()
if self.local is not None:
self.local.destroy()
self.local = None
class ClearFuncs(TransportMethods):
"""
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
"""
# These methods will be exposed to the transport layer by
# MWorker._handle_clear
expose_methods = (
"ping",
"publish",
"get_token",
"mk_token",
"wheel",
"runner",
)
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(
self.opts, self.opts["sock_dir"], listen=False
)
# Make a client
self.local = salt.client.get_local_client(self.opts["conf_file"])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts, states=False, rend=False, ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def runner(self, clear_load):
"""
Send a master control function back to the runner system
"""
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get("error")
if error:
# Authentication error occurred: do not continue.
return {"error": error}
# Authorize
username = auth_check.get("username")
if auth_type != "user":
runner_check = self.ckminions.runner_check(
auth_check.get("auth_list", []),
clear_load["fun"],
clear_load.get("kwarg", {}),
)
if not runner_check:
return {
"error": {
"name": err_name,
"message": 'Authentication failure of type "{}" occurred for '
"user {}.".format(auth_type, username),
}
}
elif isinstance(runner_check, dict) and "error" in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if "user" in clear_load:
username = clear_load["user"]
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get("user", "root")
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
fun = clear_load.pop("fun")
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(
fun, clear_load.get("kwarg", {}), username, local=True
)
except Exception as exc: # pylint: disable=broad-except
log.error("Exception occurred while introspecting %s: %s", fun, exc)
return {
"error": {
"name": exc.__class__.__name__,
"args": exc.args,
"message": str(exc),
}
}
def wheel(self, clear_load):
"""
Send a master control function back to the wheel system
"""
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get("error")
if error:
# Authentication error occurred: do not continue.
return {"error": error}
# Authorize
username = auth_check.get("username")
if auth_type != "user":
wheel_check = self.ckminions.wheel_check(
auth_check.get("auth_list", []),
clear_load["fun"],
clear_load.get("kwarg", {}),
)
if not wheel_check:
return {
"error": {
"name": err_name,
"message": 'Authentication failure of type "{}" occurred for '
"user {}.".format(auth_type, username),
}
}
elif isinstance(wheel_check, dict) and "error" in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if "user" in clear_load:
username = clear_load["user"]
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get("user", "root")
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
fun = clear_load.pop("fun")
tag = tagify(jid, prefix="wheel")
data = {
"fun": "wheel.{}".format(fun),
"jid": jid,
"tag": tag,
"user": username,
}
self.event.fire_event(data, tagify([jid, "new"], "wheel"))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data["return"] = ret["return"]
data["success"] = ret["success"]
self.event.fire_event(data, tagify([jid, "ret"], "wheel"))
return {"tag": tag, "data": data}
except Exception as exc: # pylint: disable=broad-except
log.error("Exception occurred while introspecting %s: %s", fun, exc)
data["return"] = "Exception occurred in wheel {}: {}: {}".format(
fun, exc.__class__.__name__, exc,
)
data["success"] = False
self.event.fire_event(data, tagify([jid, "ret"], "wheel"))
return {"tag": tag, "data": data}
def mk_token(self, clear_load):
"""
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
"""
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
return ""
return token
def get_token(self, clear_load):
"""
Return the name associated with a token or False if the token is invalid
"""
if "token" not in clear_load:
return False
return self.loadauth.get_tok(clear_load["token"])
def publish(self, clear_load):
"""
This method sends out publications to the minions, it can only be used
by the LocalClient.
"""
extra = clear_load.get("kwargs", {})
publisher_acl = salt.acl.PublisherACL(self.opts["publisher_acl_blacklist"])
if publisher_acl.user_is_blacklisted(
clear_load["user"]
) or publisher_acl.cmd_is_blacklisted(clear_load["fun"]):
log.error(
"%s does not have permissions to run %s. Please contact "
"your local administrator if you believe this is in "
"error.\n",
clear_load["user"],
clear_load["fun"],
)
return {
"error": {
"name": "AuthorizationError",
"message": "Authorization error occurred.",
}
}
# Retrieve the minions list
delimiter = clear_load.get("kwargs", {}).get("delimiter", DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load["tgt"], clear_load.get("tgt_type", "glob"), delimiter
)
minions = _res.get("minions", list())
missing = _res.get("missing", list())
ssh_minions = _res.get("ssh_minions", False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == "user":
auth_check = self.loadauth.check_authentication(
clear_load, auth_type, key=key
)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get("auth_list", [])
err_msg = 'Authentication failure of type "{}" occurred.'.format(auth_type)
if auth_check.get("error"):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {
"error": {
"name": "AuthenticationError",
"message": "Authentication error occurred.",
}
}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != "user" or (auth_type == "user" and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load["fun"],
clear_load["arg"],
clear_load["tgt"],
clear_load.get("tgt_type", "glob"),
minions=minions,
# always accept find_job
whitelist=["saltutil.find_job"],
)
if not authorized:
# Authorization error occurred. Do not continue.
if (
auth_type == "eauth"
and not auth_list
and "username" in extra
and "eauth" in extra
):
log.debug(
'Auth configuration for eauth "%s" and user "%s" is empty',
extra["eauth"],
extra["username"],
)
log.warning(err_msg)
return {
"error": {
"name": "AuthorizationError",
"message": "Authorization error occurred.",
}
}
# Perform some specific auth_type tasks after the authorization check
if auth_type == "token":
username = auth_check.get("username")
clear_load["user"] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == "eauth":
# The username we are attempting to auth with
clear_load["user"] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get("order_masters"):
# Check for no minions
if not minions:
return {
"enc": "clear",
"load": {
"jid": None,
"minions": minions,
"error": "Master could not resolve minions for target {}".format(
clear_load["tgt"]
),
},
}
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {"enc": "clear", "load": {"error": "Master failed to assign jid"}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
"enc": "clear",
"load": {"jid": clear_load["jid"], "minions": minions, "missing": missing},
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
if "token" in clear_load:
auth_type = "token"
err_name = "TokenAuthenticationError"
sensitive_load_keys = ["token"]
elif "eauth" in clear_load:
auth_type = "eauth"
err_name = "EauthAuthenticationError"
sensitive_load_keys = ["username", "password"]
else:
auth_type = "user"
err_name = "UserAuthenticationError"
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _prep_jid(self, clear_load, extra):
"""
Return a jid for this publication
"""
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load["jid"] if clear_load.get("jid") else None
nocache = extra.get("nocache", False)
# Retrieve the jid
fstr = "{}.prep_jid".format(self.opts["master_job_cache"])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache, passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
"Failed to allocate a jid. The requested returner '{}' "
"could not be loaded.".format(fstr.split(".")[0])
)
log.error(msg)
return {"error": msg}
return jid
def _send_pub(self, load):
"""
Take a load and send it across the network to connected minions
"""
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
@property
def ssh_client(self):
if not hasattr(self, "_ssh_client"):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _send_ssh_pub(self, load, ssh_minions=False):
"""
Take a load and send it across the network to ssh minions
"""
if self.opts["enable_ssh_minions"] is True and ssh_minions is True:
log.debug("Send payload to ssh minions")
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
def _prep_pub(self, minions, jid, clear_load, extra, missing):
"""
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
"""
clear_load["jid"] = jid
delimiter = clear_load.get("kwargs", {}).get("delimiter", DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({"minions": minions}, clear_load["jid"])
new_job_load = {
"jid": clear_load["jid"],
"tgt_type": clear_load["tgt_type"],
"tgt": clear_load["tgt"],
"user": clear_load["user"],
"fun": clear_load["fun"],
"arg": clear_load["arg"],
"minions": minions,
"missing": missing,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load["jid"], "new"], "job"))
if self.opts["ext_job_cache"]:
fstr = "{}.save_load".format(self.opts["ext_job_cache"])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
arg_spec = salt.utils.args.get_function_argspec(
self.mminion.returners[fstr]
)
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
if "minions" not in arg_spec.args:
log.critical(
"The specified returner used for the external job cache "
"'%s' does not have a 'minions' kwarg in the returner's "
"save_load function.",
self.opts["ext_job_cache"],
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
"The specified returner used for the external job cache "
'"%s" does not have a save_load function!',
self.opts["ext_job_cache"],
)
if save_load_func:
try:
self.mminion.returners[fstr](
clear_load["jid"], clear_load, minions=minions
)
except Exception: # pylint: disable=broad-except
log.critical(
"The specified returner threw a stack trace:\n", exc_info=True
)
# always write out to the master job caches
try:
fstr = "{}.save_load".format(self.opts["master_job_cache"])
self.mminion.returners[fstr](clear_load["jid"], clear_load, minions)
except KeyError:
log.critical(
"The specified returner used for the master job cache "
'"%s" does not have a save_load function!',
self.opts["master_job_cache"],
)
except Exception: # pylint: disable=broad-except
log.critical("The specified returner threw a stack trace:\n", exc_info=True)
# Set up the payload
payload = {"enc": "aes"}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
"fun": clear_load["fun"],
"arg": clear_load["arg"],
"tgt": clear_load["tgt"],
"jid": clear_load["jid"],
"ret": clear_load["ret"],
}
# if you specified a master id, lets put that in the load
if "master_id" in self.opts:
load["master_id"] = self.opts["master_id"]
# if someone passed us one, use that
if "master_id" in extra:
load["master_id"] = extra["master_id"]
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load["delimiter"] = delimiter
if "id" in extra:
load["id"] = extra["id"]
if "tgt_type" in clear_load:
load["tgt_type"] = clear_load["tgt_type"]
if "to" in clear_load:
load["to"] = clear_load["to"]
if "kwargs" in clear_load:
if "ret_config" in clear_load["kwargs"]:
load["ret_config"] = clear_load["kwargs"].get("ret_config")
if "metadata" in clear_load["kwargs"]:
load["metadata"] = clear_load["kwargs"].get("metadata")
if "module_executors" in clear_load["kwargs"]:
load["module_executors"] = clear_load["kwargs"].get("module_executors")
if "executor_opts" in clear_load["kwargs"]:
load["executor_opts"] = clear_load["kwargs"].get("executor_opts")
if "ret_kwargs" in clear_load["kwargs"]:
load["ret_kwargs"] = clear_load["kwargs"].get("ret_kwargs")
if "user" in clear_load:
log.info(
"User %s Published command %s with jid %s",
clear_load["user"],
clear_load["fun"],
clear_load["jid"],
)
load["user"] = clear_load["user"]
else:
log.info(
"Published command %s with jid %s", clear_load["fun"], clear_load["jid"]
)
log.debug("Published command details %s", load)
return load
def ping(self, clear_load):
"""
Send the load back to the sender.
"""
return clear_load
def destroy(self):
if self.masterapi is not None:
self.masterapi.destroy()
self.masterapi = None
if self.local is not None:
self.local.destroy()
self.local = None
|
infer.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model import dnn_model_define
import paddle
import paddle.fluid as fluid
import os
import time
import numpy as np
import multiprocessing as mp
import sys
from paddle.distributed.fleet.dataset import TreeIndex
paddle.enable_static()
class Reader():
def __init__(self, item_nums):
self.item_nums = item_nums
def line_process(self, line):
history_ids = [0] * (self.item_nums)
features = line.strip().split("\t")
groundtruth = [int(ff) for ff in features[1].split(',')]
for item in features[2:]:
slot, feasign = item.split(":")
slot_id = int(slot.split("_")[1])
history_ids[slot_id - 1] = int(feasign)
return groundtruth, history_ids
def dataloader(self, file_list):
"DataLoader Pyreader Generator"
def reader():
for file in file_list:
with open(file, 'r') as f:
for line in f:
groudtruth, output_list = self.line_process(line)
yield groudtruth, output_list
return reader
def net_input(item_nums=69):
user_input = [
paddle.static.data(
name="item_" + str(i + 1), shape=[None, 1], dtype="int64")
for i in range(item_nums)
]
item = paddle.static.data(name="unit_id", shape=[None, 1], dtype="int64")
return user_input + [item]
def mp_run(data, process_num, func, *args):
""" run func with multi process
"""
level_start = time.time()
partn = int(max(len(data) / process_num, 1))
start = 0
p_idx = 0
ps = []
manager = mp.Manager()
res = manager.dict()
while start < len(data):
local_data = data[start:start + partn]
start += partn
p = mp.Process(target=func, args=(res, local_data, p_idx) + args)
ps.append(p)
p.start()
p_idx += 1
for p in ps:
p.join()
for p in ps:
p.terminate()
total_precision_rate = 0.0
total_recall_rate = 0.0
total_nums = 0
for i in range(p_idx):
print(i)
total_recall_rate += res["{}_recall".format(i)]
total_precision_rate += res["{}_precision".format(i)]
total_nums += res["{}_nums".format(i)]
print("global recall rate: {} / {} = {}".format(
total_recall_rate, total_nums, total_recall_rate / float(total_nums)))
print("global precision rate: {} / {} = {}".format(
total_precision_rate, total_nums, total_precision_rate / float(
total_nums)))
return p_idx
def load_tree_info(name, path, topk=200):
tree = TreeIndex(name, path)
all_codes = []
first_layer_code = None
for i in range(tree.height()):
layer_codes = tree.get_layer_codes(i)
if len(layer_codes) > topk and first_layer_code == None:
first_layer_code = layer_codes
all_codes += layer_codes
all_ids = tree.get_nodes(all_codes)
id_code_map = {}
code_id_map = {}
for i in range(len(all_codes)):
id = all_ids[i].id()
code = all_codes[i]
id_code_map[id] = code
code_id_map[code] = id
print(len(all_codes), len(all_ids), len(id_code_map), len(code_id_map))
first_layer = tree.get_nodes(first_layer_code)
first_layer = [node.id() for node in first_layer]
return id_code_map, code_id_map, tree.branch(), first_layer
def infer(res_dict, filelist, process_idx, init_model_path, id_code_map,
code_id_map, branch, first_layer_set, config):
print(process_idx, filelist, init_model_path)
item_nums = config.get("hyper_parameters.item_nums", 69)
topk = config.get("hyper_parameters.topk", 200)
node_nums = config.get("hyper_parameters.sparse_feature_num")
node_emb_size = config.get("hyper_parameters.node_emb_size")
input = net_input(item_nums)
embedding = paddle.nn.Embedding(
node_nums,
node_emb_size,
sparse=True,
weight_attr=paddle.framework.ParamAttr(
name="tdm.bw_emb.weight",
initializer=paddle.nn.initializer.Normal(std=0.001)))
user_feature = input[0:item_nums]
user_feature_emb = list(map(embedding, user_feature)) # [(bs, emb)]
unit_id_emb = embedding(input[-1])
dout = dnn_model_define(user_feature_emb, unit_id_emb)
softmax_prob = paddle.nn.functional.softmax(dout)
positive_prob = paddle.slice(softmax_prob, axes=[1], starts=[1], ends=[2])
prob_re = paddle.reshape(positive_prob, [-1])
_, topk_i = paddle.topk(prob_re, k=topk)
topk_node = paddle.index_select(input[-1], topk_i)
with open("main_program", 'w') as f:
f.write(str(paddle.static.default_main_program()))
exe = paddle.static.Executor(fluid.CPUPlace())
exe.run(paddle.static.default_startup_program())
print("begin to load parameters")
#fluid.io.load_persistables(exe, dirname=init_model_path)
paddle.static.load(paddle.static.default_main_program(),
init_model_path + '/rec_static')
print("end load parameters")
reader_instance = Reader(item_nums)
reader = reader_instance.dataloader(filelist)
total_recall_rate = 0.0
total_precision_rate = 0.0
total_nums = 0
child_info = dict()
for groudtruth, user_input in reader():
total_nums += 1
recall_result = []
candidate = first_layer_set
idx = 8
while (len(recall_result) < topk):
idx += 1
feed_dict = {}
for i in range(1, 70):
feed_dict['item_' + str(i)] = np.ones(
shape=[len(candidate), 1],
dtype='int64') * user_input[i - 1]
feed_dict['unit_id'] = np.array(
candidate, dtype='int64').reshape(-1, 1)
res = exe.run(program=paddle.static.default_main_program(),
feed=feed_dict,
fetch_list=[topk_node.name])
topk_node_res = res[0].reshape([-1]).tolist()
candidate = []
for i in range(len(topk_node_res)):
node = topk_node_res[i]
if node not in child_info:
child_info[node] = []
node_code = id_code_map[node]
for j in range(1, branch + 1):
child_code = node_code * branch + j
if child_code in code_id_map:
child_info[node].append(code_id_map[child_code])
if len(child_info[node]) == 0:
recall_result.append(node)
else:
candidate = candidate + child_info[node]
recall_result = recall_result[:topk]
intersec = list(set(recall_result).intersection(set(groudtruth)))
total_recall_rate += float(len(intersec)) / float(len(groudtruth))
total_precision_rate += float(len(intersec)) / float(
len(recall_result))
if (total_nums % 100 == 0):
print("global recall rate: {} / {} = {}".format(
total_recall_rate, total_nums, total_recall_rate / float(
total_nums)))
print("global precision rate: {} / {} = {}".format(
total_precision_rate, total_nums, total_precision_rate / float(
total_nums)))
res_dict["{}_recall".format(process_idx)] = total_recall_rate
res_dict["{}_precision".format(process_idx)] = total_precision_rate
res_dict["{}_nums".format(process_idx)] = total_nums
print("process idx:{}, global recall rate: {} / {} = {}".format(
process_idx, total_recall_rate, total_nums, total_recall_rate / float(
total_nums)))
print("process idx:{}, global precision rate: {} / {} = {}".format(
process_idx, total_precision_rate, total_nums, total_precision_rate /
float(total_nums)))
if __name__ == '__main__':
utils_path = "{}/tools/utils/static_ps".format(
os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd()))))
sys.path.append(utils_path)
print(utils_path)
import common
yaml_helper = common.YamlHelper()
config = yaml_helper.load_yaml(sys.argv[1])
test_files_path = "../demo_data/test_data"
filelist = [
"{}/{}".format(test_files_path, x) for x in os.listdir(test_files_path)
]
print(filelist)
init_model_path = sys.argv[2]
print(init_model_path)
tree_name = config.get("hyper_parameters.tree_name")
tree_path = config.get("hyper_parameters.tree_path")
print("tree_name: {}".format(tree_name))
print("tree_path: {}".format(tree_path))
id_code_map, code_id_map, branch, first_layer_set = load_tree_info(
tree_name, tree_path)
mp_run(filelist, 12, infer, init_model_path, id_code_map, code_id_map,
branch, first_layer_set, config)
|
time_elapsed.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'time_elapsed2.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets, Qt
# from Frame_recording import Screen
from threading import Thread
import settings
import subprocess
import warnings
import os
import signal
warnings.filterwarnings('ignore')
TICK_TIME = 2**6
rThread = 0
eyeGaze = 0
class recordThread(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
def __del__(self):
self.wait()
def run(self):
rThread = subprocess.Popen(["python3", "../text_extraction/Frame_recording.py"])
class gazeThread(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
def __del__(self):
self.wait()
def run(self):
eyeGaze = subprocess.Popen(["./opengazer"], stdout= subprocess.PIPE)
with open("gaze_PID", "w") as f:
f.write(str(eyeGaze.pid))
gaze_points = eyeGaze.communicate()[0]
with open("gaze_points.csv", "w") as f:
f.write(gaze_points.decode('utf-8'))
class Ui_Dialog_time_elapsed(Qt.QMainWindow, object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(248, 151)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/images/I5nk-Pen-icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
Dialog.setStyleSheet("background-image: url(:/images/shutterstock_1023246931_364607.jpg);\n"
"background-position: center center;\n"
"background-repeat: no-repeat;")
self.gridLayout_3 = QtWidgets.QGridLayout(Dialog)
self.gridLayout_3.setObjectName("gridLayout_3")
self.frame = QtWidgets.QFrame(Dialog)
self.frame.setStyleSheet("background-image: url(:/images/transparent-photoshop-background-grid-260nw-1023662581.jpg);")
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.gridLayout = QtWidgets.QGridLayout(self.frame)
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(self.frame)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.toolButton = QtWidgets.QToolButton(self.frame)
self.toolButton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/images/images.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton.setIcon(icon1)
self.toolButton.setObjectName("toolButton")
self.gridLayout.addWidget(self.toolButton, 0, 1, 1, 1)
self.lcdNumber = QtWidgets.QLCDNumber(self.frame)
font = QtGui.QFont()
font.setKerning(True)
self.lcdNumber.setFont(font)
self.lcdNumber.setCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
self.lcdNumber.setStyleSheet("color: red;")
self.lcdNumber.setFrameShape(QtWidgets.QFrame.Box)
self.lcdNumber.setSmallDecimalPoint(False)
self.lcdNumber.setDigitCount(10)
self.lcdNumber.setObjectName("lcdNumber")
self.gridLayout.addWidget(self.lcdNumber, 1, 0, 1, 2)
self.gridLayout_3.addWidget(self.frame, 0, 0, 1, 1)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
# self.sc = Screen()
self.toolButton.clicked.connect(self.recording)
self.record_Thread = recordThread()
self.gaze_thread = gazeThread()
self.dialog = Dialog
self.timer = Qt.QTimer()
self.timer.setInterval(TICK_TIME)
self.timer.timeout.connect(self.tick)
self.notRecording()
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Time Elapsed"))
self.label.setText(_translate("Dialog", "<html><head/><body><p align=\"center\"><span style=\" font-size:14pt;\">Not Recording</span></p></body></html>"))
self.toolButton.setText(_translate("Dialog", "Start Recording"))
def display(self):
self.lcdNumber.display("%d:%05.2f"%(self.time//60, self.time%60))
@Qt.pyqtSlot()
def tick(self):
self.time += TICK_TIME/1000
self.display()
def recording(self):
with open("main_config", "r") as f:
buffer = f.read().split()
with open("main_config", "w") as f:
buffer[0] = "yes"
buffer[1] = "yes"
f.write("\n".join(buffer))
self.gaze_thread.start()
self.record_Thread.start()
self.timer.start()
_translate = QtCore.QCoreApplication.translate
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/images/images_stop.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton.setIcon(icon1)
self.toolButton.clicked.disconnect()
self.label.setText(_translate("Dialog", "<html><head/><body><p align=\"center\"><span style=\" font-size:12pt; color:#ff0000;\">Recording......</span></p></body></html>"))
self.toolButton.clicked.connect(self.stopedRecording)
# self.threadTimer = Thread(target=self.sc.start_recording, args=())
# self.threadTimer.start()
# threadRecorder = Thread(target= self.record, args=())
# threadRecorder.start()
# self.sc.start_recording()
# self.record()
# def record(self):
# while True:
# if self.sc.new_frame:
# print('Recording!')
# self.sc.capture_Frames()
# # time.sleep(0.1)
@Qt.pyqtSlot()
def stopedRecording(self):
settings.stop_recording()
self.timer.stop()
# global rThread
# os.killpg(os.getpgid(rThread.pid), signal.SIGTERM)
# global gazeThread
# os.killpg(os.getpgid(gazeThread.pid), signal.SIGTERM)
with open("main_config", "r") as f:
buffer = f.read().split()
with open("main_config", "w") as f:
buffer[0] = "no"
buffer[1] = "no"
f.write("\n".join(buffer))
# self.threadTimer.join()
# self.sc.on_stop_recording()
QtWidgets.QDialog.accept(self.dialog)
#QtWidgets.QDialog.close(self.dialog)
@Qt.pyqtSlot()
def notRecording(self):
self.time = 0
self.display()
# def getSummary(self):
# return self.sc.summary
import time_elapsed_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog_time_elapsed()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
test_server.py
|
import os
from http import HTTPStatus
from multiprocessing.managers import DictProxy
from pathlib import Path
from unittest.mock import Mock, ANY
import requests
import time
import uuid
import urllib.parse
from typing import List, Text, Type, Generator, NoReturn, Dict, Optional
from contextlib import ExitStack
from _pytest import pathlib
from _pytest.monkeypatch import MonkeyPatch
from aioresponses import aioresponses
import pytest
from freezegun import freeze_time
from mock import MagicMock
from multiprocessing import Process, Manager
import rasa
import rasa.constants
import rasa.shared.constants
import rasa.shared.utils.io
import rasa.utils.io
import rasa.server
from rasa.core import utils
from rasa.core.tracker_store import InMemoryTrackerStore
from rasa.shared.core import events
from rasa.core.agent import Agent
from rasa.core.channels import (
channel,
CollectingOutputChannel,
RestInput,
SlackInput,
CallbackInput,
)
from rasa.core.channels.slack import SlackBot
from rasa.shared.core.constants import ACTION_SESSION_START_NAME, ACTION_LISTEN_NAME
from rasa.shared.core.domain import Domain, SessionConfig
from rasa.shared.core.events import (
Event,
UserUttered,
SlotSet,
BotUttered,
ActionExecuted,
SessionStarted,
)
from rasa.shared.core.trackers import DialogueStateTracker
from rasa.model import unpack_model
from rasa.shared.nlu.constants import INTENT_NAME_KEY
from rasa.utils.endpoints import EndpointConfig
from sanic import Sanic
from sanic.testing import SanicASGITestClient
from tests.nlu.utilities import ResponseTest
from tests.utilities import json_of_latest_request, latest_request
from ruamel.yaml import StringIO
# a couple of event instances that we can use for testing
test_events = [
Event.from_parameters(
{
"event": UserUttered.type_name,
"text": "/goodbye",
"parse_data": {
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"entities": [],
},
}
),
BotUttered("Welcome!", {"test": True}),
SlotSet("cuisine", 34),
SlotSet("cuisine", "34"),
SlotSet("location", None),
SlotSet("location", [34, "34", None]),
]
# sequence of events expected at the beginning of trackers
session_start_sequence: List[Event] = [
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
]
@pytest.fixture
def rasa_app_without_api(rasa_server_without_api: Sanic) -> SanicASGITestClient:
return rasa_server_without_api.asgi_client
@pytest.fixture
def rasa_app(rasa_server: Sanic) -> SanicASGITestClient:
return rasa_server.asgi_client
@pytest.fixture
def rasa_app_nlu(rasa_nlu_server: Sanic) -> SanicASGITestClient:
return rasa_nlu_server.asgi_client
@pytest.fixture
def rasa_app_core(rasa_core_server: Sanic) -> SanicASGITestClient:
return rasa_core_server.asgi_client
@pytest.fixture
def rasa_secured_app(rasa_server_secured: Sanic) -> SanicASGITestClient:
return rasa_server_secured.asgi_client
async def test_root(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/")
assert response.status == HTTPStatus.OK
assert response.text.startswith("Hello from Rasa:")
async def test_root_without_enable_api(rasa_app_without_api: SanicASGITestClient):
_, response = await rasa_app_without_api.get("/")
assert response.status == HTTPStatus.OK
assert response.text.startswith("Hello from Rasa:")
async def test_root_secured(rasa_secured_app: SanicASGITestClient):
_, response = await rasa_secured_app.get("/")
assert response.status == HTTPStatus.OK
assert response.text.startswith("Hello from Rasa:")
async def test_version(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/version")
content = response.json()
assert response.status == HTTPStatus.OK
assert content.get("version") == rasa.__version__
assert (
content.get("minimum_compatible_version")
== rasa.constants.MINIMUM_COMPATIBLE_VERSION
)
async def test_status(rasa_app: SanicASGITestClient, trained_rasa_model: Text):
_, response = await rasa_app.get("/status")
model_file = response.json()["model_file"]
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
assert os.path.isfile(model_file)
assert model_file == trained_rasa_model
async def test_status_nlu_only(
rasa_app_nlu: SanicASGITestClient, trained_nlu_model: Text
):
_, response = await rasa_app_nlu.get("/status")
model_file = response.json()["model_file"]
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
assert "model_file" in response.json()
assert model_file == trained_nlu_model
async def test_status_secured(rasa_secured_app: SanicASGITestClient):
_, response = await rasa_secured_app.get("/status")
assert response.status == HTTPStatus.UNAUTHORIZED
async def test_status_not_ready_agent(rasa_app: SanicASGITestClient):
rasa_app.app.agent = None
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.CONFLICT
@pytest.fixture
def shared_statuses() -> DictProxy:
return Manager().dict()
@pytest.fixture
def background_server(
shared_statuses: DictProxy, tmpdir: pathlib.Path
) -> Generator[Process, None, None]:
# Create a fake model archive which the mocked train function can return
fake_model = Path(tmpdir) / "fake_model.tar.gz"
fake_model.touch()
fake_model_path = str(fake_model)
# Fake training function which blocks until we tell it to stop blocking
# If we can send a status request while this is blocking, we can be sure that the
# actual training is also not blocking
def mocked_training_function(*_, **__) -> Text:
# Tell the others that we are now blocking
shared_statuses["started_training"] = True
# Block until somebody tells us to not block anymore
while shared_statuses.get("stop_training") is not True:
time.sleep(1)
return fake_model_path
def run_server() -> NoReturn:
rasa.train = mocked_training_function
from rasa import __main__
import sys
sys.argv = ["rasa", "run", "--enable-api"]
__main__.main()
server = Process(target=run_server)
yield server
server.terminate()
@pytest.fixture()
def training_request(
shared_statuses: DictProxy, tmp_path: Path
) -> Generator[Process, None, None]:
def send_request() -> None:
payload = {}
project_path = Path("examples") / "formbot"
for file in [
"domain.yml",
"config.yml",
Path("data") / "rules.yml",
Path("data") / "stories.yml",
Path("data") / "nlu.yml",
]:
full_path = project_path / file
# Read in as dictionaries to avoid that keys, which are specified in
# multiple files (such as 'version'), clash.
content = rasa.shared.utils.io.read_yaml_file(full_path)
payload.update(content)
concatenated_payload_file = tmp_path / "concatenated.yml"
rasa.shared.utils.io.write_yaml(payload, concatenated_payload_file)
payload_as_yaml = concatenated_payload_file.read_text()
response = requests.post(
"http://localhost:5005/model/train",
data=payload_as_yaml,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"force_training": True},
)
shared_statuses["training_result"] = response.status_code
train_request = Process(target=send_request)
yield train_request
train_request.terminate()
# Due to unknown reasons this test can not be run in pycharm, it
# results in segfaults...will skip in that case - test will still get run on CI.
# It also doesn't run on Windows because of Process-related calls and an attempt
# to start/terminate a process. We will investigate this case further later:
# https://github.com/RasaHQ/rasa/issues/6302
@pytest.mark.skipif("PYCHARM_HOSTED" in os.environ, reason="results in segfault")
@pytest.mark.skip_on_windows
def test_train_status_is_not_blocked_by_training(
background_server: Process, shared_statuses: DictProxy, training_request: Process
):
background_server.start()
def is_server_ready() -> bool:
try:
return (
requests.get("http://localhost:5005/status").status_code
== HTTPStatus.OK
)
except Exception:
return False
# wait until server is up before sending train request and status test loop
start = time.time()
while not is_server_ready() and time.time() - start < 60:
time.sleep(1)
assert is_server_ready()
training_request.start()
# Wait until the blocking training function was called
start = time.time()
while (
shared_statuses.get("started_training") is not True and time.time() - start < 60
):
time.sleep(1)
# Check if the number of currently running trainings was incremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == HTTPStatus.OK
assert response.json()["num_active_training_jobs"] == 1
# Tell the blocking training function to stop
shared_statuses["stop_training"] = True
start = time.time()
while shared_statuses.get("training_result") is None and time.time() - start < 60:
time.sleep(1)
assert shared_statuses.get("training_result")
# Check that the training worked correctly
assert shared_statuses["training_result"] == HTTPStatus.OK
# Check if the number of currently running trainings was decremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == HTTPStatus.OK
assert response.json()["num_active_training_jobs"] == 0
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
async def test_parse(rasa_app: SanicASGITestClient, response_test: ResponseTest):
_, response = await rasa_app.post(
response_test.endpoint, json=response_test.payload
)
rjs = response.json()
assert response.status == HTTPStatus.OK
assert all(prop in rjs for prop in ["entities", "intent", "text"])
assert rjs["entities"] == response_test.expected_response["entities"]
assert rjs["text"] == response_test.expected_response["text"]
assert rjs["intent"] == response_test.expected_response["intent"]
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse?emulation_mode=wit",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=dialogflow",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=luis",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
async def test_parse_with_different_emulation_mode(
rasa_app: SanicASGITestClient, response_test: ResponseTest
):
_, response = await rasa_app.post(
response_test.endpoint, json=response_test.payload
)
assert response.status == HTTPStatus.OK
async def test_parse_without_nlu_model(rasa_app_core: SanicASGITestClient):
_, response = await rasa_app_core.post("/model/parse", json={"text": "hello"})
assert response.status == HTTPStatus.OK
rjs = response.json()
assert all(prop in rjs for prop in ["entities", "intent", "text"])
async def test_parse_on_invalid_emulation_mode(rasa_app_nlu: SanicASGITestClient):
_, response = await rasa_app_nlu.post(
"/model/parse?emulation_mode=ANYTHING", json={"text": "hello"}
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_train_stack_success(
rasa_app: SanicASGITestClient,
default_domain_path: Text,
default_stories_file: Text,
default_stack_config: Text,
default_nlu_data: Text,
tmp_path: Path,
):
with ExitStack() as stack:
domain_file = stack.enter_context(open(default_domain_path))
config_file = stack.enter_context(open(default_stack_config))
stories_file = stack.enter_context(open(default_stories_file))
nlu_file = stack.enter_context(open(default_nlu_data))
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=stories_file.read(),
nlu=nlu_file.read(),
)
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.OK
assert response.headers["filename"] is not None
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
async def test_train_nlu_success(
rasa_app: SanicASGITestClient,
default_stack_config: Text,
default_nlu_data: Text,
default_domain_path: Text,
tmp_path: Path,
):
domain_data = rasa.shared.utils.io.read_yaml_file(default_domain_path)
config_data = rasa.shared.utils.io.read_yaml_file(default_stack_config)
nlu_data = rasa.shared.utils.io.read_yaml_file(default_nlu_data)
# combine all data into our payload
payload = {
key: val for d in [domain_data, config_data, nlu_data] for key, val in d.items()
}
data = StringIO()
rasa.shared.utils.io.write_yaml(payload, data)
_, response = await rasa_app.post(
"/model/train",
data=data.getvalue(),
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
async def test_train_core_success(
rasa_app: SanicASGITestClient,
default_stack_config: Text,
default_stories_file: Text,
default_domain_path: Text,
tmp_path: Path,
):
with ExitStack() as stack:
domain_file = stack.enter_context(open(default_domain_path))
config_file = stack.enter_context(open(default_stack_config))
core_file = stack.enter_context(open(default_stories_file))
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=core_file.read(),
)
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.OK
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
async def test_train_with_retrieval_events_success(
rasa_app: SanicASGITestClient, default_stack_config: Text, tmp_path: Path
):
with ExitStack() as stack:
domain_file = stack.enter_context(
open("data/test_domains/default_retrieval_intents.yml")
)
config_file = stack.enter_context(open(default_stack_config))
core_file = stack.enter_context(
open("data/test_stories/stories_retrieval_intents.md")
)
responses_file = stack.enter_context(open("data/test_responses/default.md"))
nlu_file = stack.enter_context(
open("data/test_nlu/default_retrieval_intents.md")
)
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=core_file.read(),
responses=responses_file.read(),
nlu=nlu_file.read(),
)
_, response = await rasa_app.post("/model/train", json=payload, timeout=60 * 5)
assert response.status == HTTPStatus.OK
assert_trained_model(response.body, tmp_path)
def assert_trained_model(response_body: bytes, tmp_path: Path) -> None:
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response_body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
@pytest.mark.parametrize(
"payload",
[
{"config": None, "stories": None, "nlu": None, "domain": None, "force": True},
{
"config": None,
"stories": None,
"nlu": None,
"domain": None,
"force": False,
"save_to_default_model_directory": True,
},
{
"config": None,
"stories": None,
"nlu": None,
"domain": None,
"save_to_default_model_directory": False,
},
],
)
def test_deprecation_warnings_json_payload(payload: Dict):
with pytest.warns(FutureWarning):
rasa.server._validate_json_training_payload(payload)
async def test_train_with_yaml(rasa_app: SanicASGITestClient, tmp_path: Path):
training_data = """
stories:
- story: My story
steps:
- intent: greet
- action: utter_greet
rules:
- rule: My rule
steps:
- intent: greet
- action: utter_greet
intents:
- greet
nlu:
- intent: greet
examples: |
- hi
- hello
responses:
utter_greet:
- text: Hi
language: en
polices:
- name: RulePolicy
pipeline:
- name: KeywordIntentClassifier
"""
_, response = await rasa_app.post(
"/model/train",
data=training_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert_trained_model(response.body, tmp_path)
async def test_train_with_invalid_yaml(rasa_app: SanicASGITestClient):
invalid_yaml = """
rules:
rule my rule
"""
_, response = await rasa_app.post(
"/model/train",
data=invalid_yaml,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.BAD_REQUEST
@pytest.mark.parametrize(
"headers, expected",
[({}, False), ({"force_training": False}, False), ({"force_training": True}, True)],
)
def test_training_payload_from_yaml_force_training(headers: Dict, expected: bool):
request = Mock()
request.body = b""
request.args = headers
payload = rasa.server._training_payload_from_yaml(request)
assert payload.get("force_training") == expected
@pytest.mark.parametrize(
"headers, expected",
[
({}, rasa.shared.constants.DEFAULT_MODELS_PATH),
({"save_to_default_model_directory": False}, ANY),
(
{"save_to_default_model_directory": True},
rasa.shared.constants.DEFAULT_MODELS_PATH,
),
],
)
def test_training_payload_from_yaml_save_to_default_model_directory(
headers: Dict, expected: Text
):
request = Mock()
request.body = b""
request.args = headers
payload = rasa.server._training_payload_from_yaml(request)
assert payload.get("output")
assert payload.get("output") == expected
async def test_train_missing_config(rasa_app: SanicASGITestClient):
payload = dict(domain="domain data", config=None)
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_train_missing_training_data(rasa_app: SanicASGITestClient):
payload = dict(domain="domain data", config="config data")
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_train_internal_error(rasa_app: SanicASGITestClient):
payload = dict(domain="domain data", config="config data", nlu="nlu data")
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_evaluate_stories(
rasa_app: SanicASGITestClient, default_stories_file: Text
):
stories = rasa.shared.utils.io.read_file(default_stories_file)
_, response = await rasa_app.post("/model/test/stories", data=stories)
assert response.status == HTTPStatus.OK
js = response.json()
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert not js["is_end_to_end_evaluation"]
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
async def test_evaluate_stories_not_ready_agent(
rasa_app_nlu: SanicASGITestClient, default_stories_file: Text
):
stories = rasa.shared.utils.io.read_file(default_stories_file)
_, response = await rasa_app_nlu.post("/model/test/stories", data=stories)
assert response.status == HTTPStatus.CONFLICT
async def test_evaluate_stories_end_to_end(
rasa_app: SanicASGITestClient, end_to_end_story_file: Text
):
stories = rasa.shared.utils.io.read_file(end_to_end_story_file)
_, response = await rasa_app.post("/model/test/stories?e2e=true", data=stories)
assert response.status == HTTPStatus.OK
js = response.json()
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert js["is_end_to_end_evaluation"]
assert js["actions"] != []
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
async def test_evaluate_intent(rasa_app: SanicASGITestClient, default_nlu_data: Text):
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
_, response = await rasa_app.post(
"/model/test/intents",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
async def test_evaluate_intent_on_just_nlu_model(
rasa_app_nlu: SanicASGITestClient, default_nlu_data: Text
):
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
_, response = await rasa_app_nlu.post(
"/model/test/intents",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
async def test_evaluate_intent_with_model_param(
rasa_app: SanicASGITestClient, trained_nlu_model, default_nlu_data: Text
):
_, response = await rasa_app.get("/status")
previous_model_file = response.json()["model_file"]
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
_, response = await rasa_app.post(
f"/model/test/intents?model={trained_nlu_model}",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
_, response = await rasa_app.get("/status")
assert previous_model_file == response.json()["model_file"]
async def test_predict(rasa_app: SanicASGITestClient):
data = {
"Events": {
"value": [
{"event": "action", "name": "action_listen"},
{
"event": "user",
"text": "hello",
"parse_data": {
"entities": [],
"intent": {"confidence": 0.57, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
},
]
}
}
_, response = await rasa_app.post(
"/model/predict",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
content = response.json()
assert response.status == HTTPStatus.OK
assert "scores" in content
assert "tracker" in content
assert "policy" in content
@freeze_time("2018-01-01")
async def test_requesting_non_existent_tracker(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/conversations/madeupid/tracker")
content = response.json()
assert response.status == HTTPStatus.OK
assert content["paused"] is False
assert content["slots"] == {"name": None}
assert content["sender_id"] == "madeupid"
assert content["events"] == [
{
"event": "action",
"name": "action_session_start",
"policy": None,
"confidence": 1,
"timestamp": 1514764800,
},
{"event": "session_started", "timestamp": 1514764800},
{
"event": "action",
INTENT_NAME_KEY: "action_listen",
"policy": None,
"confidence": None,
"timestamp": 1514764800,
},
]
assert content["latest_message"] == {
"text": None,
"intent": {},
"entities": [],
"message_id": None,
"metadata": {},
}
@pytest.mark.parametrize("event", test_events)
async def test_pushing_event(rasa_app: SanicASGITestClient, event: Event):
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
serialized_event = event.as_dict()
# Remove timestamp so that a new one is assigned on the server
serialized_event.pop("timestamp")
time_before_adding_events = time.time()
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=serialized_event,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.json() is not None
assert response.status == HTTPStatus.OK
_, tracker_response = await rasa_app.get(f"/conversations/{sender_id}/tracker")
tracker = tracker_response.json()
assert tracker is not None
assert len(tracker.get("events")) == 4
deserialized_events = [Event.from_parameters(event) for event in tracker["events"]]
# there is an initial session start sequence at the beginning of the tracker
assert deserialized_events[:3] == session_start_sequence
assert deserialized_events[3] == event
assert deserialized_events[3].timestamp > time_before_adding_events
async def test_push_multiple_events(rasa_app: SanicASGITestClient):
conversation_id = str(uuid.uuid1())
conversation = f"/conversations/{conversation_id}"
events = [e.as_dict() for e in test_events]
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=events,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.json() is not None
assert response.status == HTTPStatus.OK
_, tracker_response = await rasa_app.get(
f"/conversations/{conversation_id}/tracker"
)
tracker = tracker_response.json()
assert tracker is not None
# there is an initial session start sequence at the beginning
assert [
Event.from_parameters(event) for event in tracker.get("events")
] == session_start_sequence + test_events
@pytest.mark.parametrize(
"params", ["?execute_side_effects=true&output_channel=callback", ""]
)
async def test_pushing_event_while_executing_side_effects(
rasa_server: Sanic, params: Text
):
input_channel = CallbackInput(EndpointConfig("https://example.com/callback"))
channel.register([input_channel], rasa_server, "/webhooks/")
rasa_app = rasa_server.asgi_client
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
serialized_event = test_events[1].as_dict()
with aioresponses() as mocked:
mocked.post(
"https://example.com/callback",
repeat=True,
headers={"Content-Type": "application/json"},
)
await rasa_app.post(
f"{conversation}/tracker/events{params}",
json=serialized_event,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
r = latest_request(mocked, "post", "https://example.com/callback")
if not params:
assert r is None
else:
message_received = json_of_latest_request(r)
assert message_received.get("recipient_id") == sender_id
assert message_received.get("text") == serialized_event.get("text")
async def test_post_conversation_id_with_slash(rasa_app: SanicASGITestClient):
conversation_id = str(uuid.uuid1())
id_len = len(conversation_id) // 2
conversation_id = conversation_id[:id_len] + "/+-_\\=" + conversation_id[id_len:]
conversation = f"/conversations/{conversation_id}"
events = [e.as_dict() for e in test_events]
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=events,
headers={"Content-Type": "application/json"},
)
assert response.json() is not None
assert response.status == HTTPStatus.OK
_, tracker_response = await rasa_app.get(
f"/conversations/{conversation_id}/tracker"
)
tracker = tracker_response.json()
assert tracker is not None
# there is a session start sequence at the start
assert [
Event.from_parameters(event) for event in tracker.get("events")
] == session_start_sequence + test_events
async def test_put_tracker(rasa_app: SanicASGITestClient):
data = [event.as_dict() for event in test_events]
_, response = await rasa_app.put(
"/conversations/pushtracker/tracker/events",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
content = response.json()
assert response.status == HTTPStatus.OK
assert len(content["events"]) == len(test_events)
assert content["sender_id"] == "pushtracker"
_, tracker_response = await rasa_app.get("/conversations/pushtracker/tracker")
tracker = tracker_response.json()
assert tracker is not None
evts = tracker.get("events")
assert events.deserialise_events(evts) == test_events
async def test_sorted_predict(rasa_app: SanicASGITestClient):
await _create_tracker_for_sender(rasa_app, "sortedpredict")
_, response = await rasa_app.post("/conversations/sortedpredict/predict")
scores = response.json()["scores"]
sorted_scores = sorted(scores, key=lambda k: (-k["score"], k["action"]))
assert scores == sorted_scores
async def _create_tracker_for_sender(app: SanicASGITestClient, sender_id: Text) -> None:
data = [event.as_dict() for event in test_events[:3]]
_, response = await app.put(
f"/conversations/{sender_id}/tracker/events",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
async def test_get_tracker_with_jwt(rasa_secured_app: SanicASGITestClient):
# token generated with secret "core" and algorithm HS256
# on https://jwt.io/
# {"user": {"username": "testadmin", "role": "admin"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdGFkbWluIiwic"
"m9sZSI6ImFkbWluIn19.NAQr0kbtSrY7d28XTqRzawq2u"
"QRre7IWTuIDrCn5AIw"
}
_, response = await rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.OK
_, response = await rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.OK
# {"user": {"username": "testuser", "role": "user"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdHVzZXIiLCJyb"
"2xlIjoidXNlciJ9fQ.JnMTLYd56qut2w9h7hRQlDm1n3l"
"HJHOxxC_w7TtwCrs"
}
_, response = await rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.FORBIDDEN
_, response = await rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.OK
def test_list_routes(default_agent: Agent):
app = rasa.server.create_app(default_agent, auth_token=None)
routes = utils.list_routes(app)
assert set(routes.keys()) == {
"hello",
"version",
"status",
"retrieve_tracker",
"append_events",
"replace_events",
"retrieve_story",
"execute_action",
"trigger_intent",
"predict",
"add_message",
"train",
"evaluate_stories",
"evaluate_intents",
"tracker_predict",
"parse",
"load_model",
"unload_model",
"get_domain",
}
async def test_unload_model_error(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "model_file" in response.json() and response.json()["model_file"] is not None
_, response = await rasa_app.delete("/model")
assert response.status == HTTPStatus.NO_CONTENT
async def test_get_domain(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get(
"/domain", headers={"accept": rasa.server.JSON_CONTENT_TYPE}
)
content = response.json()
assert response.status == HTTPStatus.OK
assert "config" in content
assert "intents" in content
assert "entities" in content
assert "slots" in content
assert "responses" in content
assert "actions" in content
async def test_get_domain_invalid_accept_header(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/domain")
assert response.status == HTTPStatus.NOT_ACCEPTABLE
async def test_load_model(rasa_app: SanicASGITestClient, trained_core_model: Text):
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
old_fingerprint = response.json()["fingerprint"]
data = {"model_file": trained_core_model}
_, response = await rasa_app.put("/model", json=data)
assert response.status == HTTPStatus.NO_CONTENT
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
assert old_fingerprint != response.json()["fingerprint"]
async def test_load_model_from_model_server(
rasa_app: SanicASGITestClient, trained_core_model: Text
):
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
old_fingerprint = response.json()["fingerprint"]
endpoint = EndpointConfig("https://example.com/model/trained_core_model")
with open(trained_core_model, "rb") as f:
with aioresponses(passthrough=["http://127.0.0.1"]) as mocked:
headers = {}
fs = os.fstat(f.fileno())
headers["Content-Length"] = str(fs[6])
mocked.get(
"https://example.com/model/trained_core_model",
content_type="application/x-tar",
body=f.read(),
)
data = {"model_server": {"url": endpoint.url}}
_, response = await rasa_app.put("/model", json=data)
assert response.status == HTTPStatus.NO_CONTENT
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
assert old_fingerprint != response.json()["fingerprint"]
import rasa.core.jobs
rasa.core.jobs.__scheduler = None
async def test_load_model_invalid_request_body(rasa_app: SanicASGITestClient):
_, response = await rasa_app.put("/model")
assert response.status == HTTPStatus.BAD_REQUEST
async def test_load_model_invalid_configuration(rasa_app: SanicASGITestClient):
data = {"model_file": "some-random-path"}
_, response = await rasa_app.put("/model", json=data)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_execute(rasa_app: SanicASGITestClient):
await _create_tracker_for_sender(rasa_app, "test_execute")
data = {INTENT_NAME_KEY: "utter_greet"}
_, response = await rasa_app.post("/conversations/test_execute/execute", json=data)
assert response.status == HTTPStatus.OK
parsed_content = response.json()
assert parsed_content["tracker"]
assert parsed_content["messages"]
async def test_execute_with_missing_action_name(rasa_app: SanicASGITestClient):
test_sender = "test_execute_with_missing_action_name"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {"wrong-key": "utter_greet"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/execute", json=data
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_execute_with_not_existing_action(rasa_app: SanicASGITestClient):
test_sender = "test_execute_with_not_existing_action"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {"name": "ka[pa[opi[opj[oj[oija"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/execute", json=data
)
assert response.status == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_trigger_intent(rasa_app: SanicASGITestClient):
data = {INTENT_NAME_KEY: "greet"}
_, response = await rasa_app.post(
"/conversations/test_trigger/trigger_intent", json=data
)
assert response.status == HTTPStatus.OK
parsed_content = response.json()
assert parsed_content["tracker"]
assert parsed_content["messages"]
async def test_trigger_intent_with_missing_intent_name(rasa_app: SanicASGITestClient):
test_sender = "test_trigger_intent_with_missing_action_name"
data = {"wrong-key": "greet"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_trigger_intent_with_not_existing_intent(rasa_app: SanicASGITestClient):
test_sender = "test_trigger_intent_with_not_existing_intent"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {INTENT_NAME_KEY: "ka[pa[opi[opj[oj[oija"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == HTTPStatus.NOT_FOUND
@pytest.mark.parametrize(
"input_channels, output_channel_to_use, expected_channel",
[
(None, "slack", CollectingOutputChannel),
([], None, CollectingOutputChannel),
([RestInput()], "slack", CollectingOutputChannel),
([RestInput()], "rest", CollectingOutputChannel),
(
[RestInput(), SlackInput("test", slack_signing_secret="foobar")],
"slack",
SlackBot,
),
],
)
def test_get_output_channel(
input_channels: List[Text], output_channel_to_use: Text, expected_channel: Type
):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": output_channel_to_use}
actual = rasa.server._get_output_channel(request, None)
assert isinstance(actual, expected_channel)
@pytest.mark.parametrize(
"input_channels, expected_channel",
[
([], CollectingOutputChannel),
([RestInput()], CollectingOutputChannel),
([RestInput(), SlackInput("test", slack_signing_secret="foobar")], SlackBot),
],
)
def test_get_latest_output_channel(input_channels: List[Text], expected_channel: Type):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": "latest"}
tracker = DialogueStateTracker.from_events(
"default", [UserUttered("text", input_channel="slack")]
)
actual = rasa.server._get_output_channel(request, tracker)
assert isinstance(actual, expected_channel)
def test_app_when_app_has_no_input_channels():
request = MagicMock()
class NoInputChannels:
pass
request.app = NoInputChannels()
actual = rasa.server._get_output_channel(
request, DialogueStateTracker.from_events("default", [])
)
assert isinstance(actual, CollectingOutputChannel)
@pytest.mark.parametrize(
"conversation_events,until_time,fetch_all_sessions,expected",
# conversation with one session
[
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
],
None,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet""",
),
# conversation with multiple sessions
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID, story 1
steps:
- intent: greet
user: |-
hi
- action: utter_greet
- story: some-conversation-ID, story 2
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# conversation with multiple sessions, but setting `all_sessions=false`
# means only the last one is returned
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
False,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# the default for `all_sessions` is `false` - this test checks that
# only the latest session is returned in that case
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
None,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# `until` parameter means only the first session is returned
(
[
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=1),
SessionStarted(timestamp=2),
UserUttered("hi", {"name": "greet"}, timestamp=3),
ActionExecuted("utter_greet", timestamp=4),
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=5),
SessionStarted(timestamp=6),
UserUttered("bye bye", {"name": "goodbye"}, timestamp=7),
ActionExecuted("utter_goodbye", timestamp=8),
],
4,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet""",
),
# empty conversation
([], None, True, 'version: "2.0"'),
],
)
async def test_get_story(
rasa_app: SanicASGITestClient,
monkeypatch: MonkeyPatch,
conversation_events: List[Event],
until_time: Optional[float],
fetch_all_sessions: Optional[bool],
expected: Text,
):
conversation_id = "some-conversation-ID"
tracker_store = InMemoryTrackerStore(Domain.empty())
tracker = DialogueStateTracker.from_events(conversation_id, conversation_events)
tracker_store.save(tracker)
monkeypatch.setattr(rasa_app.app.agent, "tracker_store", tracker_store)
url = f"/conversations/{conversation_id}/story?"
query = {}
if fetch_all_sessions is not None:
query["all_sessions"] = fetch_all_sessions
if until_time is not None:
query["until"] = until_time
_, response = await rasa_app.get(url + urllib.parse.urlencode(query))
assert response.status == HTTPStatus.OK
assert response.content.decode().strip() == expected
async def test_get_story_does_not_update_conversation_session(
rasa_app: SanicASGITestClient, monkeypatch: MonkeyPatch
):
conversation_id = "some-conversation-ID"
# domain with short session expiration time of one second
domain = Domain.empty()
domain.session_config = SessionConfig(
session_expiration_time=1 / 60, carry_over_slots=True
)
monkeypatch.setattr(rasa_app.app.agent, "domain", domain)
# conversation contains one session that has expired
now = time.time()
conversation_events = [
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=now - 10),
SessionStarted(timestamp=now - 9),
UserUttered("hi", {"name": "greet"}, timestamp=now - 8),
ActionExecuted("utter_greet", timestamp=now - 7),
]
tracker = DialogueStateTracker.from_events(conversation_id, conversation_events)
# the conversation session has expired
assert rasa_app.app.agent.create_processor()._has_session_expired(tracker)
tracker_store = InMemoryTrackerStore(domain)
tracker_store.save(tracker)
monkeypatch.setattr(rasa_app.app.agent, "tracker_store", tracker_store)
_, response = await rasa_app.get(f"/conversations/{conversation_id}/story")
assert response.status == HTTPStatus.OK
# expected story is returned
assert (
response.content.decode().strip()
== """version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet"""
)
# the tracker has the same number of events as were initially added
assert len(tracker.events) == len(conversation_events)
# the last event is still the same as before
assert tracker.events[-1].timestamp == conversation_events[-1].timestamp
@pytest.mark.parametrize(
"initial_tracker_events,events_to_append,expected_events",
[
(
# the tracker is initially empty, and no events are appended
# so we'll just expect the session start sequence with an `action_listen`
[],
[],
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
],
),
(
# the tracker is initially empty, and a user utterance is appended
# we expect a tracker with a session start sequence and a user utterance
[],
[UserUttered("/greet", {"name": "greet", "confidence": 1.0})],
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered("/greet", {"name": "greet", "confidence": 1.0}),
],
),
(
# the tracker is initially empty, and a session start sequence is appended
# we'll just expect the session start sequence
[],
[ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted()],
[ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted()],
),
(
# the tracker already contains some events - we can simply append events
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered("/greet", {"name": "greet", "confidence": 1.0}),
],
[ActionExecuted("utter_greet")],
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered("/greet", {"name": "greet", "confidence": 1.0}),
ActionExecuted("utter_greet"),
],
),
],
)
async def test_update_conversation_with_events(
rasa_app: SanicASGITestClient,
monkeypatch: MonkeyPatch,
initial_tracker_events: List[Event],
events_to_append: List[Event],
expected_events: List[Event],
):
conversation_id = "some-conversation-ID"
domain = Domain.empty()
tracker_store = InMemoryTrackerStore(domain)
monkeypatch.setattr(rasa_app.app.agent, "tracker_store", tracker_store)
if initial_tracker_events:
tracker = DialogueStateTracker.from_events(
conversation_id, initial_tracker_events
)
tracker_store.save(tracker)
fetched_tracker = await rasa.server.update_conversation_with_events(
conversation_id, rasa_app.app.agent.create_processor(), domain, events_to_append
)
assert list(fetched_tracker.events) == expected_events
|
automating_by_check_box_csv.py
|
"""
'Automated AC' CSV Test Program v2.5, Copyright 2017 Sam Suri
CSV Test program retrieves live data but does not update ANYTHING in VAN. It shows the user what would happen via
use of a CSV file. Program should be run prior to running full program.
"""
import hmac, hashlib, time, json, requests, copy
import threading
import urllib.request
from queue import Queue
from pandas import DataFrame
from json import dumps, loads
from xmljson import yahoo as yh
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import fromstring
# Declaring global constants, these are form dependant and will vary from form to form
# These strings represent the ID of a check box, if checked, this ID is present
SPAN_LIST = ['4583']
HOUSE_LIST = ['4717']
VDR_LIST = ['4713']
NTL_LIST = ['4716']
VOLUNTEER_LIST = ['4713', '4715']
# Integer is constant that will be referenced for loop inserting into Queue 1
NUM_ACTIVIST_CODES = 5
# Declaring functions that make code respondent to user input on which AC/SV they are applying
# Function asks the user which list they would like to use
def which_list(): # Returns boolean value, name of activist code to ouput to CSV file name, list of ID's
print("\nWhat List would you like to use? \n\nPlease enter: span_list, house_list, "
"vdr_list \nntl_list, volunteer_list, or enter 'ALL'): ")
answer = input()
if answer == 'span_list':
return 0, 'span_list', SPAN_LIST
elif answer == 'house_list':
return 0, 'house_list', HOUSE_LIST
elif answer == 'vdr_list':
return 0, 'vdr_list', VDR_LIST
elif answer == 'ntl_list':
return 0, 'ntl_list', NTL_LIST
elif answer == 'volunteer_list':
return 0, 'volunteer_list', VOLUNTEER_LIST
elif answer == 'ALL': # if user chooses to have all applied, returns boolean 1 to allow Queue 1 to be populated
return 1, '', ''
# Function returns JSON data to a variable, used in second VAN API call
def which_list_ac_sv(list_in_use):
if list_in_use == SPAN_LIST:
return {'responses': [{'activistCodeId': 4364346, 'action': 'Apply', 'type': 'ActivistCode'}]}
elif list_in_use == HOUSE_LIST:
return {'responses': [{'activistCodeId': 4364347, 'action': 'Apply', 'type': 'ActivistCode'}]}
elif list_in_use == VDR_LIST:
return {'responses': [{'surveyQuestionId': 244123, 'surveyResponseId': 1024452, 'type': 'SurveyResponse'}]}
elif list_in_use == NTL_LIST:
return {'responses': [{'surveyQuestionId': 244127, 'surveyResponseId': 1024477, 'type': 'SurveyResponse'}]}
elif list_in_use == VOLUNTEER_LIST:
return {'responses': [{'surveyQuestionId': 244118, 'surveyResponseId': 1024432, 'type': 'SurveyResponse'}]}
#BSD Call and Declarations:
api_secret = '' # API secret provided by BSD for user Sam Suri
api_ts = int(time.time()) # API call uses HMAC authentication that incorporates times
api_id = '' # API ID provided by BSD for user Sam Suri
api_baseCall = '/page/api/signup/get_signups_by_form_id' # API Call to get list of signups based on form ID
signup_form_id = str(input('Please enter the signup form ID: ')) # prompts the user for input of form ID
# Creates paramaters for API call incorporates user ID, time created, and form ID
api_param = 'api_ver=2&api_id=' + api_id + '&api_ts=' + (str(api_ts)) + '&signup_form_id=' + str(signup_form_id)
all_bool, name_of_list_in_use, list_in_use = which_list() # Calls function which_list(), assigns to three variables
# Creates string to pass into with HMAC authentication
signing_string = api_id + '\n' + str(api_ts) + '\n' + api_baseCall + '\n' + api_param
# Creates HMAC authentication, uses API secret 'singning_string'
api_mac = hmac.new(api_secret.encode(), signing_string.encode(), hashlib.sha1).hexdigest()
# Creates full address of API call, inserts API Id, time created, HMAC authentication code, and form ID
api_url = 'http://battletx.bsd.net/page/api/signup/get_signups_by_form_id?api_ver=2&api_id=' + api_id + '&api_ts=' + \
str(api_ts) + '&api_mac=' + api_mac + '&signup_form_id=' + str(signup_form_id)
#Reformating BSD XML:
api_xml_data = urllib.request.urlopen(api_url).read() # Uses urllib library to read XML data from BSD API URL
doc = dumps(yh.data(fromstring(api_xml_data))) # Parses XML data using xmljson library, parses using yahoo standard
loaded_doc = json.loads(doc) # Deserializes data
# Function iterates over dictionary and checks keys, if keys match strings, count is altered
def indiv_dict_length(tuple):
count = 0 # declares temporary count variable, returns it at end of function
for k, v in tuple:
if v != {}:
if k == 'firstname':
count += 1
if k == 'lastname':
count += 1
if k == 'email':
count += 1
if k == 'zip':
count += 1
if k == 'phone':
count += 1
return count
# Function checks to see if multiple check boxes have been selected
def mult_check_box_copy(mult_check_box_list, value):
if isinstance(value, list) == True: # checks dictionary value to see if it is a list
for i in value: # if so, breaks it down into dictionaries
for k, v in i.items():
if k == 'signup_form_field_id': # if key is encountered, different values are appended to mult_check_box
# appends to list using deep copy, so that there is no possibility of passing by reference
mult_check_box_list.append(copy.deepcopy(v))
return 1 # returns boolean 1 to indicate that the individual has checked multiple check boxes
# Function creates initial data frame using PANDAS library and creates columns
def create_data_frame():
columns = ['First Name', 'Last Name', 'Phone Number', 'Zip Code', 'Email']
df = DataFrame(columns=columns)
return df # returns data frame to a variable
# Function appends to existing dataframe, temporary dictionary is passed in
def append_csv_row(dictionary): # looks for keys and inserts values into data frame
with df_append_lock:
df.loc[len(df)] = [dictionary['firstName'],
dictionary['lastName'],
dictionary['phones'][0]['phoneNumber'],
dictionary['addresses'][0]['zipOrPostalCode'],
dictionary['emails'][0]['email']
]
# Function prints data frame to csv file whose title dynamically includes current date and AC/SV inputted by user
def print_data_frame_to_csv(name_of_list_in_use):
csv_name = 'Contacts Affected on ' + str(time.strftime('%d-%m-%Y')) + ' for ' + name_of_list_in_use + '.csv'
df.to_csv(csv_name, index = False) # index is set to false as programs like Excel make this redundant
global df # allows DataFrame to be cleared after each iteration in Queue 1, references pointer storing df
df = df.drop(df.index[:])
# Function checks to see if multiple check boxes clicked match any check box in AC/SV list and if there is a match,
# updates both contact and AC/SV in My Campaign
def mult_check_box_compare(mult_check_box_list, temp_dict, list_in_use, signup_date):
for y in list_in_use:
for x in mult_check_box_list:
if x == y:
if (signup_date >= start_date) & (signup_date <= end_date):
append_csv_row(temp_dict) # appends information on user to CSV file
return
# Function checks to see if the single check box clicked matches any of the code in appropriate AC/SV List
# and if there is a match, updates both contact and AC/SV in My Campaign
def single_check_box_compare(dictionary, temp_dict, list_in_use, signup_date):
for k4, v4 in dictionary.items():
if k4 == 'signup_form_field_id':
for y in list_in_use:
if v4 == y:
if (signup_date >= start_date) & (signup_date <= end_date):
append_csv_row(temp_dict) # appends information on user to CSV file
return
#Queue and threading variables and declarations
q1 = Queue(maxsize = NUM_ACTIVIST_CODES) # declares first Queue of size of all activist codes being tested for
q2 = Queue(maxsize = 2000) # declares second Queue of maxsize 2000, max in second Queue is realistically around 1000
number_of_threads_q1 = NUM_ACTIVIST_CODES # threading equals number of AC/SV lists
number_of_threads_q2 = 4 # threads limited to 4 due to processing constraints of current computer, could go up to 10
queue_1_lock = threading.Lock() # lock between each iteration in Queue 1, otherwise 12 threads will run at once
df_append_lock = threading.Lock() # lock ensures that all records are appended to DataFrame
#function allows Queue 1 to insert the name of a list that corresponds to an activist code
def iter_act_codes(iter_num): # range of integers is passed in that iterates over global constant NUM_ACTIVIST_CODES
if iter_num == 0:
return 'span list', SPAN_LIST
elif iter_num == 1:
return 'house list', HOUSE_LIST
elif iter_num == 2:
return 'volunteer list', VOLUNTEER_LIST
elif iter_num == 3:
return 'ntl list', NTL_LIST
elif iter_num == 4:
return 'vdr list', VDR_LIST
# Function starts the first Queue, allows program to run for each Activist Code being used
def execute_queue1(q1):
with queue_1_lock: # enforces a lock on each thread due to processing power of computer
loaded_doc, i = q1.get(q1)
name_of_list_in_use, list_in_use = iter_act_codes(i) # calls function that returns name of a list
# Breaks down nested dictionary from XML data into useful information
for k, v in loaded_doc.items():
for k1, v1 in v.items():
for k2, v2 in v1.items():
for i in v2: # When lists of dictionaries is reached, outputs each one to second Queue
q2.put((i, list_in_use))
# Tests to see if second Queue is finished
q2.join()
# If Queue is empty, completed data frame is printed to CSV file
print_data_frame_to_csv(name_of_list_in_use)
q1.task_done()
# Function runs inside of second Queue, with a tuple from being passed in from either main body or Queue 1
def execute_queue2(tuple):
while True:
i, list_in_use = q2.get(tuple) # breaks down tuple into different objects
# initializes numerous lists and boolean values
mult_check_box_list = []
temp_dict = {}
nested_dict = {}
check_button = 0
mult_check_box = 0
signup_date_check = 0
signup_date = ''
# finds out how many fields each person has, assigns them to variables, allows program to know when to move on to next person
temp_dict_length = indiv_dict_length(i.items())
if temp_dict_length >= 5: # makes sure that each person at least three fields, any less and VAN cannot match user
for k3, v3 in i.items(): # breaks dictionary into tuple
if v3 != {}: # makes sure that only answered fields are included
if k3 == 'stg_signup_extra':
# deep copies values from v3 into mult_check_box_list, returns 1 if multiple boxes were clicked
mult_check_box = mult_check_box_copy(mult_check_box_list, v3)
check_button = 1 # boolean value is set to 1, means at least one check box has been clicked
nested_dict = v3
if k3 == 'create_dt': # finds date of when signup occurred
signup_date = v3[0:10] # strips out time
signup_date_check = 1 # boolean value is set to 1, program cannot run without info
if k3 == 'firstname':
k3 = 'firstName'
temp_dict[k3] = v3 # if key matches, appends key, value pair to temp_dict
if k3 == 'lastname':
k3 = 'lastName'
temp_dict[k3] = v3 # if key matches, appends key, value pair to temp_dict
if k3 == 'email':
k3 = 'emails'
v3 = [{'email': v3}] # reassigns key to match VAN JSON
temp_dict[k3] = v3 # if key matches, appends key, value pair to temp_dict
if k3 == 'zip':
k3 = 'addresses'
v3 = [{'zipOrPostalCode': v3}] # reassigns key to match VAN JSON
temp_dict[k3] = v3 # if key matches, appends key, value pair to temp_dict
if k3 == 'phone':
k3 = 'phones'
if v3[0] == '1': # formats phone number to match VAN style, checks if country code is present
v3 = v3[0] + '-' + v3[1:4] + '-' + v3[4:7] + '-' + v3[7:]
else:
v3 = '1-' + v3[0:3] + '-' + v3[3:6] + '-' + v3[6:]
v3 = [{'phoneNumber': v3}] # reassigns key to match VAN JSON
temp_dict[k3] = v3
# makes sure that all filled out fields have been added to temp_dict, and that at least one check box has been clicked
if (check_button == 1) & (signup_date_check == 1) & (len(temp_dict) == temp_dict_length):
if mult_check_box == 1: # checks variable to see if multiple check boxes have been clicked
# allows list to be broken down into dictionaries so API calls can be made
mult_check_box_compare(mult_check_box_list, temp_dict, list_in_use, signup_date)
break
else:
# allows single dictionary to proceed so that API calls can be made
single_check_box_compare(nested_dict, temp_dict, list_in_use, signup_date)
break
q2.task_done()
# iterates over number of threads declared for Queue 1
for i in range(number_of_threads_q1):
worker = threading.Thread(target=execute_queue1, args=(q1,)) # executes function in Queue, passes parsed XML (JSON)
worker.daemon = True
worker.start()
# iterates over number of threads declared for Queue 2
for i in range(number_of_threads_q2):
worker = threading.Thread(target = execute_queue2, args=(q2, )) # executes function in Queue, passes in tuple
worker.daemon = True
worker.start()
# Creates data frame
df = create_data_frame()
# start_date and end_date variables are created from user input
print('\nSurvey responses will only be updated if signups fall within a specific time period')
start_date = input('Please enter the start date (YYYY-MM-DD): ')
end_date = input('Please enter the end date (YYYY-MM-DD): ')
# last point before program runs, user chooses to proceed
input("The program will now run, please make sure all information is correct and press 'enter' to proceed: ")
# Checks to see if user would like to update user information for all activist codes/survey responses
if all_bool == 1:
for i in range(NUM_ACTIVIST_CODES):
q1.put((loaded_doc, i)) # if so, puts original parsed data in first Queue, threading and Queue becomes nested
# if the user does not want to update based on all activist codes, only uses Queue 2 and threading is not nested
else:
# Breaks down nested dictionary from XML data into useful information
for k, v in loaded_doc.items():
for k1, v1 in v.items():
for k2, v2 in v1.items():
for i in v2: # When lists of dictionaries is reached, outputs each one to second Queue
q2.put((i, list_in_use))
# Tests to see if second Queue is finished
q2.join()
# If Queue is empty, completed data frame is printed to CSV file
print_data_frame_to_csv(name_of_list_in_use)
# Tests to see if first Queue is finished, if Queue 1 is not used, test will pass immediately
q1.join()
input("\nThe program has completed, press 'enter' to quit: ")
|
test_slow_retrieval_attack.py
|
"""
<Program Name>
test_slow_retrieval_attack.py
<Author>
Konstantin Andrianov
<Started>
March 13, 2012
<Copyright>
See LICENSE for licensing information.
<Purpose>
Simulate slow retrieval attack. A simple client update vs. client
update implementing TUF.
During the slow retrieval attack, attacker is able to prevent clients from
being aware of interference with receiving updates by responding to client
requests so slowly that automated updates never complete.
NOTE: Currently TUF does not protect against slow retrieval attacks.
NOTE: The interposition provided by 'tuf.interposition' is used to intercept
all calls made by urllib/urillib2 to certain network locations specified in
the interposition configuration file. Look up interposition.py for more
information and illustration of a sample contents of the interposition
configuration file. Interposition was meant to make TUF integration with an
existing software updater an easy process. This allows for more flexibility
to the existing software updater. However, if you are planning to solely use
TUF there should be no need for interposition, all necessary calls will be
generated from within TUF.
Note: There is no difference between 'updates' and 'target' files.
"""
import os
import time
import urllib
import random
import subprocess
from multiprocessing import Process
import tuf.tests.system_tests.util_test_tools as util_test_tools
from tuf.interposition import urllib_tuf
class SlowRetrievalAttackAlert(Exception):
pass
def _download(url, filename, tuf=False):
if tuf:
urllib_tuf.urlretrieve(url, filename)
else:
urllib.urlretrieve(url, filename)
def test_slow_retrieval_attack(TUF=False):
WAIT_TIME = 5 # Number of seconds to wait until download completes.
ERROR_MSG = '\tSlow Retrieval Attack was Successful!\n\n'
# Launch the server.
port = random.randint(30000, 45000)
command = ['python', 'slow_retrieval_server.py', str(port)]
server_process = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
time.sleep(.1)
try:
# Setup.
root_repo, url, server_proc, keyids = \
util_test_tools.init_repo(tuf=TUF, port=port)
reg_repo = os.path.join(root_repo, 'reg_repo')
downloads = os.path.join(root_repo, 'downloads')
# Add file to 'repo' directory: {root_repo}
filepath = util_test_tools.add_file_to_repository(reg_repo, 'A'*10)
file_basename = os.path.basename(filepath)
url_to_file = url+'reg_repo/'+file_basename
downloaded_file = os.path.join(downloads, file_basename)
if TUF:
print 'TUF ...'
tuf_repo = os.path.join(root_repo, 'tuf_repo')
# Update TUF metadata before attacker modifies anything.
util_test_tools.tuf_refresh_repo(root_repo, keyids)
# Modify the url. Remember that the interposition will intercept
# urls that have 'localhost:9999' hostname, which was specified in
# the json interposition configuration file. Look for 'hostname'
# in 'util_test_tools.py'. Further, the 'file_basename' is the target
# path relative to 'targets_dir'.
url_to_file = 'http://localhost:9999/'+file_basename
# Client tries to download.
# NOTE: if TUF is enabled the metadata files will be downloaded first.
proc = Process(target=_download, args=(url_to_file, downloaded_file, TUF))
proc.start()
proc.join(WAIT_TIME)
if proc.exitcode is None:
proc.terminate()
raise SlowRetrievalAttackAlert(ERROR_MSG)
finally:
if server_process.returncode is None:
server_process.kill()
print 'Slow server terminated.\n'
util_test_tools.cleanup(root_repo, server_proc)
try:
test_slow_retrieval_attack(TUF=False)
except SlowRetrievalAttackAlert, error:
print error
try:
test_slow_retrieval_attack(TUF=True)
except SlowRetrievalAttackAlert, error:
print error
|
controls.py
|
#!/usr/bin/env python3
import threading
import subprocess
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
gi.require_version('GtkLayerShell', '0.1')
from gi.repository import Gtk, Gdk, GLib, GdkPixbuf, GtkLayerShell
from nwg_panel.tools import check_key, get_brightness, set_brightness, get_volume, set_volume, get_battery, \
get_interface, update_image, bt_service_enabled, bt_on, bt_name, is_command, list_sinks
from nwg_panel.common import dependencies
try:
import netifaces
dependencies["netifaces"] = True
except ModuleNotFoundError:
pass
class Controls(Gtk.EventBox):
def __init__(self, settings, position, alignment, width, monitor=None, icons_path=""):
self.settings = settings
self.position = position
self.alignment = alignment
self.icons_path = icons_path
Gtk.EventBox.__init__(self)
check_key(settings, "show-values", True)
check_key(settings, "icon-size", 16)
check_key(settings, "interval", 1)
check_key(settings, "icon-size", 16)
check_key(settings, "hover-opens", True)
check_key(settings, "leave-closes", True)
check_key(settings, "css-name", "controls-label")
check_key(settings, "components", ["net", "brightness", "volume", "battery"])
check_key(settings, "net-interface", "")
self.icon_size = settings["icon-size"]
self.net_icon_name = "view-refresh-symbolic"
self.net_image = Gtk.Image.new_from_icon_name(self.net_icon_name, Gtk.IconSize.MENU)
self.net_label = Gtk.Label() if settings["show-values"] else None
self.bri_icon_name = "view-refresh-symbolic"
self.bri_image = Gtk.Image.new_from_icon_name(self.bri_icon_name, Gtk.IconSize.MENU)
self.bri_label = Gtk.Label() if settings["show-values"] else None
self.bri_slider = None
self.vol_icon_name = "view-refresh-symbolic"
self.vol_image = Gtk.Image.new_from_icon_name(self.vol_icon_name, Gtk.IconSize.MENU)
self.vol_label = Gtk.Label() if settings["show-values"] else None
self.bt_icon_name = "view-refresh-symbolic"
self.bt_image = Gtk.Image.new_from_icon_name(self.bt_icon_name, Gtk.IconSize.MENU)
self.bt_label = Gtk.Label() if settings["show-values"] else None
self.bat_icon_name = "view-refresh-symbolic"
self.bat_image = Gtk.Image.new_from_icon_name(self.bat_icon_name, Gtk.IconSize.MENU)
self.bat_label = Gtk.Label() if settings["show-values"] else None
self.pan_image = Gtk.Image()
update_image(self.pan_image, "pan-down-symbolic", self.icon_size, self.icons_path)
self.box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
self.add(self.box)
self.popup_window = PopupWindow(position, alignment, settings, width, monitor=monitor, icons_path=self.icons_path)
self.connect('button-press-event', self.on_button_press)
self.connect('enter-notify-event', self.on_enter_notify_event)
self.connect('leave-notify-event', self.on_leave_notify_event)
self.build_box()
self.refresh()
if "battery" in settings["components"]:
self.refresh_bat()
if settings["interval"] > 0:
Gdk.threads_add_timeout_seconds(GLib.PRIORITY_LOW, settings["interval"], self.refresh)
if "battery" in settings["components"]:
Gdk.threads_add_timeout_seconds(GLib.PRIORITY_LOW, 5, self.refresh_bat)
def build_box(self):
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
self.box.pack_start(box, False, False, 6)
if "brightness" in self.settings["components"]:
box.pack_start(self.bri_image, False, False, 4)
if self.bri_label:
box.pack_start(self.bri_label, False, False, 0)
if "volume" in self.settings["components"]:
box.pack_start(self.vol_image, False, False, 4)
if self.vol_label:
box.pack_start(self.vol_label, False, False, 0)
if "net" in self.settings["components"] and self.settings["net-interface"]:
if dependencies["netifaces"]:
box.pack_start(self.net_image, False, False, 4)
if self.net_label:
box.pack_start(self.net_label, False, False, 0)
else:
print("'netifaces' python module not found")
if "bluetooth" in self.settings["components"] and bt_service_enabled():
box.pack_start(self.bt_image, False, False, 4)
if self.bt_label:
box.pack_start(self.bt_label, False, False, 0)
if "battery" in self.settings["components"]:
box.pack_start(self.bat_image, False, False, 4)
if self.bat_label:
box.pack_start(self.bat_label, False, False, 0)
box.pack_start(self.pan_image, False, False, 4)
def get_output(self):
if "net" in self.settings["components"] and self.settings["net-interface"]:
ip = get_interface(self.settings["net-interface"])
GLib.idle_add(self.update_net, ip)
if bt_service_enabled() and "bluetooth" in self.settings["components"]:
is_on = bt_on()
name = bt_name()
GLib.idle_add(self.update_bt, is_on, name)
if "brightness" in self.settings["components"]:
try:
value = get_brightness()
if value:
GLib.idle_add(self.update_brightness, value)
else:
print("Couldn't get brightness, is 'light' installed?")
except Exception as e:
print(e)
if "volume" in self.settings["components"] and dependencies["pyalsa"] or dependencies["amixer"]:
try:
value, switch = get_volume()
GLib.idle_add(self.update_volume, value, switch)
except Exception as e:
print(e)
return False
def get_bat_output(self):
if "battery" in self.settings["components"]:
try:
value, time, charging = get_battery()
GLib.idle_add(self.update_battery, value, charging)
except Exception as e:
print(e)
def refresh(self):
thread = threading.Thread(target=self.get_output)
thread.daemon = True
thread.start()
return True
# No point in checking battery data more often that every 5 seconds
def refresh_bat(self):
thread = threading.Thread(target=self.get_bat_output)
thread.daemon = True
thread.start()
return True
def update_net(self, ip):
icon_name = "network-wired-symbolic" if ip else "network-wired-disconnected-symbolic"
if icon_name != self.net_icon_name:
update_image(self.net_image, icon_name, self.icon_size, self.icons_path)
self.net_icon_name = icon_name
if self.net_label:
self.net_label.set_text("{}".format(self.settings["net-interface"]))
def update_bt(self, is_on, name):
icon_name = "bluetooth-active-symbolic" if is_on else "bluetooth-disabled-symbolic"
if icon_name != self.bt_icon_name:
update_image(self.bt_image, icon_name, self.icon_size, self.icons_path)
if self.bt_label:
self.bt_label.set_text(name)
def update_brightness(self, value):
icon_name = bri_icon_name(value)
if icon_name != self.bri_icon_name:
update_image(self.bri_image, icon_name, self.icon_size, self.icons_path)
self.bri_icon_name = icon_name
if self.bri_label:
self.bri_label.set_text("{}%".format(value))
def update_battery(self, value, charging):
icon_name = bat_icon_name(value, charging)
if icon_name != self.bat_icon_name:
update_image(self.bat_image, icon_name, self.icon_size, self.icons_path)
self.bat_icon_name = icon_name
if self.bat_label:
self.bat_label.set_text("{}%".format(value))
def update_volume(self, value, switch):
icon_name = vol_icon_name(value, switch)
if icon_name != self.vol_icon_name:
update_image(self.vol_image, icon_name, self.settings["icon-size"], self.icons_path)
self.vol_icon_name = icon_name
if self.vol_label:
self.vol_label.set_text("{}%".format(value))
def on_button_press(self, w, event):
if not self.popup_window.get_visible():
self.popup_window.show_all()
if self.popup_window.sink_box:
self.popup_window.sink_box.hide()
if self.popup_window.menu_box:
self.popup_window.menu_box.hide()
else:
self.popup_window.hide()
return False
def on_enter_notify_event(self, widget, event):
if self.settings["hover-opens"]:
if not self.popup_window.get_visible():
self.popup_window.show_all()
if self.popup_window.sink_box:
self.popup_window.sink_box.hide()
if self.popup_window.menu_box:
self.popup_window.menu_box.hide()
else:
self.get_style_context().set_state(Gtk.StateFlags.SELECTED)
return True
def on_leave_notify_event(self, widget, event):
self.get_style_context().set_state(Gtk.StateFlags.NORMAL)
return True
class PopupWindow(Gtk.Window):
def __init__(self, position, alignment, settings, width, monitor=None, icons_path=""):
Gtk.Window.__init__(self, type_hint=Gdk.WindowTypeHint.NORMAL)
GtkLayerShell.init_for_window(self)
if monitor:
GtkLayerShell.set_monitor(self, monitor)
check_key(settings, "css-name", "controls-window")
self.set_property("name", settings["css-name"])
self.icon_size = settings["icon-size"]
self.icons_path = icons_path
self.settings = settings
self.position = position
self.bt_icon_name = ""
self.bt_image = Gtk.Image()
self.net_icon_name = ""
self.menu_box = None
self.sink_box = None
check_key(settings, "output-switcher", False)
self.sinks = []
if is_command("pactl") and settings["output-switcher"]:
self.sinks = list_sinks()
self.connect("show", self.refresh_sinks)
eb = Gtk.EventBox()
eb.set_above_child(False)
if settings["leave-closes"]:
self.connect("leave_notify_event", self.on_window_exit)
outer_vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
eb.add(outer_vbox)
self.add(eb)
outer_hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
Gtk.Widget.set_size_request(outer_hbox, width, 10)
outer_vbox.pack_start(outer_hbox, True, True, 20)
v_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
outer_hbox.pack_start(v_box, True, True, 20)
GtkLayerShell.set_layer(self, GtkLayerShell.Layer.TOP)
# GtkLayerShell.set_keyboard_interactivity(self, True)
GtkLayerShell.set_margin(self, GtkLayerShell.Edge.TOP, 6)
GtkLayerShell.set_margin(self, GtkLayerShell.Edge.BOTTOM, 6)
GtkLayerShell.set_margin(self, GtkLayerShell.Edge.RIGHT, 6)
GtkLayerShell.set_margin(self, GtkLayerShell.Edge.LEFT, 6)
if alignment == "left":
GtkLayerShell.set_anchor(self, GtkLayerShell.Edge.LEFT, True)
else:
GtkLayerShell.set_anchor(self, GtkLayerShell.Edge.RIGHT, True)
if position == "bottom":
GtkLayerShell.set_anchor(self, GtkLayerShell.Edge.BOTTOM, True)
else:
GtkLayerShell.set_anchor(self, GtkLayerShell.Edge.TOP, True)
check_key(settings, "commands", {"battery": "", "net": ""})
add_sep = False
if "brightness" in settings["components"]:
inner_hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
v_box.pack_start(inner_hbox, False, False, 0)
self.bri_icon_name = "view-refresh-symbolic"
self.bri_image = Gtk.Image.new_from_icon_name(self.bri_icon_name, Gtk.IconSize.MENU)
icon_name = bri_icon_name(int(get_brightness()))
if icon_name != self.bri_icon_name:
update_image(self.bri_image, icon_name, self.icon_size, self.icons_path)
self.bri_icon_name = icon_name
inner_hbox.pack_start(self.bri_image, False, False, 6)
scale = Gtk.Scale.new_with_range(orientation=Gtk.Orientation.HORIZONTAL, min=0, max=100, step=1)
value = get_brightness()
scale.set_value(value)
scale.connect("value-changed", self.set_bri)
inner_hbox.pack_start(scale, True, True, 5)
add_sep = True
if "volume" in settings["components"] and dependencies["pyalsa"] or dependencies["amixer"]:
inner_hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
v_box.pack_start(inner_hbox, False, False, 6)
self.vol_icon_name = "view-refresh-symbolic"
self.vol_image = Gtk.Image.new_from_icon_name(self.vol_icon_name, Gtk.IconSize.MENU)
vol, switch = get_volume()
icon_name = vol_icon_name(vol, switch)
if icon_name != self.vol_icon_name:
update_image(self.vol_image, icon_name, self.icon_size, self.icons_path)
self.vol_icon_name = icon_name
inner_hbox.pack_start(self.vol_image, False, False, 6)
scale = Gtk.Scale.new_with_range(orientation=Gtk.Orientation.HORIZONTAL, min=0, max=100, step=1)
value, switch = get_volume()
scale.set_value(value)
scale.connect("value-changed", self.set_vol)
inner_hbox.pack_start(scale, True, True, 5)
if is_command("pactl") and settings["output-switcher"]:
pactl_eb = Gtk.EventBox()
image = Gtk.Image()
pactl_eb.add(image)
pactl_eb.connect("enter_notify_event", self.on_enter_notify_event)
pactl_eb.connect("leave_notify_event", self.on_leave_notify_event)
update_image(image, "pan-down-symbolic", self.icon_size, self.icons_path)
inner_hbox.pack_end(pactl_eb, False, False, 5)
add_sep = True
if is_command("pactl") and settings["output-switcher"]:
self.sink_box = SinkBox()
pactl_eb.connect('button-press-event', self.sink_box.switch_visibility)
v_box.pack_start(self.sink_box, False, False, 0)
if add_sep:
sep = Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL)
v_box.pack_start(sep, True, True, 10)
if "net" in settings["components"] and dependencies["netifaces"] and settings["net-interface"]:
event_box = Gtk.EventBox()
if "net" in settings["commands"] and settings["commands"]["net"]:
event_box.connect("enter_notify_event", self.on_enter_notify_event)
event_box.connect("leave_notify_event", self.on_leave_notify_event)
event_box.connect('button-press-event', self.launch, settings["commands"]["net"])
inner_vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
inner_hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
inner_vbox.pack_start(inner_hbox, True, True, 6)
v_box.pack_start(event_box, True, True, 10)
self.net_icon_name = "view-refresh-symbolic"
self.net_image = Gtk.Image.new_from_icon_name(self.net_icon_name, Gtk.IconSize.MENU)
ip_addr = get_interface(settings["net-interface"])
icon_name = "network-wired-symbolic" if ip_addr else "network-wired-disconnected-symbolic"
if icon_name != self.net_icon_name:
update_image(self.net_image, icon_name, self.icon_size, self.icons_path)
self.net_icon_name = icon_name
inner_hbox.pack_start(self.net_image, False, False, 6)
self.net_label = Gtk.Label("{}: {}".format(settings["net-interface"], ip_addr))
inner_hbox.pack_start(self.net_label, False, True, 6)
if "net" in settings["commands"] and settings["commands"]["net"]:
img = Gtk.Image()
update_image(img, "pan-end-symbolic", self.icon_size, self.icons_path)
inner_hbox.pack_end(img, False, True, 4)
event_box.add(inner_vbox)
if bt_service_enabled() and "bluetooth" in settings["components"]:
event_box = Gtk.EventBox()
if "bluetooth" in settings["commands"] and settings["commands"]["bluetooth"]:
event_box.connect("enter_notify_event", self.on_enter_notify_event)
event_box.connect("leave_notify_event", self.on_leave_notify_event)
event_box.connect('button-press-event', self.launch, settings["commands"]["bluetooth"])
inner_vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
inner_hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
inner_vbox.pack_start(inner_hbox, True, True, 6)
v_box.pack_start(event_box, True, True, 6)
self.bt_icon_name = "view-refresh-symbolic"
self.bt_image = Gtk.Image.new_from_icon_name(self.bt_icon_name, Gtk.IconSize.MENU)
icon_name = bt_icon_name(bt_on())
if icon_name != self.bt_icon_name:
update_image(self.bt_image, icon_name, self.icon_size, self.icons_path)
self.bt_icon_name = icon_name
inner_hbox.pack_start(self.bt_image, False, False, 6)
self.bt_label = Gtk.Label(bt_name())
inner_hbox.pack_start(self.bt_label, False, True, 6)
if "bluetooth" in settings["commands"] and settings["commands"]["bluetooth"]:
img = Gtk.Image()
update_image(img, "pan-end-symbolic", self.icon_size, self.icons_path)
inner_hbox.pack_end(img, False, True, 4)
event_box.add(inner_vbox)
if "battery" in settings["components"]:
event_box = Gtk.EventBox()
if "battery" in settings["commands"] and settings["commands"]["battery"]:
event_box.connect("enter_notify_event", self.on_enter_notify_event)
event_box.connect("leave_notify_event", self.on_leave_notify_event)
event_box.connect('button-press-event', self.launch, settings["commands"]["battery"])
inner_vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
inner_hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
inner_vbox.pack_start(inner_hbox, True, True, 6)
v_box.pack_start(event_box, True, True, 6)
self.bat_icon_name = "view-refresh-symbolic"
self.bat_image = Gtk.Image.new_from_icon_name(self.bat_icon_name, Gtk.IconSize.MENU)
level, msg, charging = get_battery()
icon_name = bat_icon_name(level, charging)
if icon_name != self.bat_icon_name:
update_image(self.bat_image, icon_name, self.icon_size, self.icons_path)
self.bat_icon_name = icon_name
inner_hbox.pack_start(self.bat_image, False, False, 6)
self.bat_label = Gtk.Label("{}% {}".format(level, msg))
inner_hbox.pack_start(self.bat_label, False, True, 6)
if "battery" in settings["commands"] and settings["commands"]["battery"]:
img = Gtk.Image()
update_image(img, "pan-end-symbolic", self.icon_size, self.icons_path)
inner_hbox.pack_end(img, False, True, 4)
event_box.add(inner_vbox)
check_key(settings, "custom-items", [])
if settings["custom-items"]:
for item in settings["custom-items"]:
check_key(item, "name", "undefined")
check_key(item, "icon", "")
check_key(item, "cmd", "")
c_item = self.custom_item(item["name"], item["icon"], item["cmd"])
v_box.pack_start(c_item, True, True, 6)
check_key(settings, "menu", {})
if settings["menu"]:
template = settings["menu"]
sep = Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL)
v_box.pack_start(sep, True, True, 10)
e_box = Gtk.EventBox()
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
e_box.add(box)
inner_hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
box.pack_start(inner_hbox, True, True, 6)
v_box.pack_start(e_box, True, True, 6)
img = Gtk.Image()
update_image(img, template["icon"], self.icon_size, self.icons_path)
inner_hbox.pack_start(img, False, False, 6)
check_key(template, "name", "Menu name")
label = Gtk.Label(template["name"])
inner_hbox.pack_start(label, False, False, 6)
check_key(template, "items", [])
if template["items"]:
img = Gtk.Image()
update_image(img, "pan-down-symbolic", self.icon_size, self.icons_path)
inner_hbox.pack_end(img, False, True, 5)
e_box.connect("enter-notify-event", self.on_enter_notify_event)
e_box.connect("leave-notify-event", self.on_leave_notify_event)
self.menu_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
v_box.pack_start(self.menu_box, False, False, 0)
for item in template["items"]:
eb = Gtk.EventBox()
vb = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
hb = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
vb.pack_start(hb, False, False, 6)
i = Gtk.Label(item["name"])
hb.pack_start(i, False, False, self.icon_size + 18)
eb.add(vb)
eb.connect("enter_notify_event", self.on_enter_notify_event)
eb.connect("leave_notify_event", self.on_leave_notify_event)
eb.connect("button-press-event", self.launch, item["cmd"])
self.menu_box.pack_start(eb, False, False, 0)
e_box.connect('button-press-event', self.switch_menu_box)
Gdk.threads_add_timeout_seconds(GLib.PRIORITY_LOW, settings["interval"], self.refresh)
def on_window_exit(self, w, e):
self.hide()
def switch_menu_box(self, widget, event):
if self.menu_box.get_visible():
self.menu_box.hide()
else:
self.menu_box.show_all()
def refresh_sinks(self, *args):
self.sinks = list_sinks()
def custom_item(self, name, icon, cmd):
eb = Gtk.EventBox()
v_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
h_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
v_box.pack_start(h_box, False, False, 6)
eb.add(v_box)
image = Gtk.Image()
update_image(image, icon, self.icon_size, self.icons_path)
h_box.pack_start(image, False, True, 6)
label = Gtk.Label(name)
h_box.pack_start(label, False, True, 4)
if cmd:
eb.connect("enter_notify_event", self.on_enter_notify_event)
eb.connect("leave_notify_event", self.on_leave_notify_event)
eb.connect('button-press-event', self.launch, cmd)
img = Gtk.Image()
update_image(img, "pan-end-symbolic", self.icon_size, self.icons_path)
h_box.pack_end(img, False, True, 4)
return eb
def refresh(self):
if self.get_visible():
if "net" in self.settings["components"] and dependencies["netifaces"]:
ip_addr = get_interface(self.settings["net-interface"])
icon_name = "network-wired-symbolic" if ip_addr else "network-wired-disconnected-symbolic"
if icon_name != self.net_icon_name:
update_image(self.net_image, icon_name, self.icon_size, self.icons_path)
self.net_icon_name = icon_name
if not ip_addr:
ip_addr = "disconnected"
self.net_label.set_text("{}: {}".format(self.settings["net-interface"], ip_addr))
if bt_service_enabled() and "bluetooth" in self.settings["components"]:
icon_name = bt_icon_name(bt_on())
if icon_name != self.bt_icon_name:
update_image(self.bt_image, icon_name, self.icon_size, self.icons_path)
self.bt_icon_name = icon_name
self.bt_label.set_text(bt_name())
if "battery" in self.settings["components"]:
level, msg, charging = get_battery()
icon_name = bat_icon_name(level, charging)
if icon_name != self.bat_icon_name:
update_image(self.bat_image, icon_name, self.icon_size, self.icons_path)
self.bat_icon_name = icon_name
self.bat_label.set_text("{}% {}".format(level, msg))
return True
def on_enter_notify_event(self, widget, event):
widget.get_style_context().set_state(Gtk.StateFlags.SELECTED)
def on_leave_notify_event(self, widget, event):
widget.get_style_context().set_state(Gtk.StateFlags.NORMAL)
def set_bri(self, slider):
set_brightness(slider)
icon_name = bri_icon_name(int(slider.get_value()))
if icon_name != self.bri_icon_name:
update_image(self.bri_image, icon_name, self.icon_size, self.icons_path)
self.bri_icon_name = icon_name
def set_vol(self, slider):
set_volume(slider)
vol, switch = get_volume()
icon_name = vol_icon_name(vol, switch)
if icon_name != self.vol_icon_name:
update_image(self.vol_image, icon_name, self.icon_size, self.icons_path)
self.vol_icon_name = icon_name
def close_win(self, w, e):
self.hide()
def handle_keyboard(self, w, e):
if e.type == Gdk.EventType.KEY_RELEASE and e.keyval == Gdk.KEY_Escape:
self.close_win(w, e)
return e
def launch(self, w, e, cmd):
print("Executing '{}'".format(cmd))
subprocess.Popen('exec {}'.format(cmd), shell=True)
self.hide()
class SinkBox(Gtk.Box):
def __init__(self):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.VERTICAL)
self.sinks = None
self.refresh()
def refresh(self):
for item in self.get_children():
item.destroy()
self.sinks = list_sinks()
for sink in self.sinks:
eb = Gtk.EventBox()
eb.connect("enter_notify_event", self.on_enter_notify_event)
eb.connect("leave_notify_event", self.on_leave_notify_event)
eb.connect('button-press-event', self.switch_sink, sink["name"])
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
vbox.pack_start(hbox, True, True, 6)
desc = sink["desc"]
if len(desc) > 26:
desc = "{}\u2026".format(desc[:26])
label = Gtk.Label(desc)
hbox.pack_start(label, True, True, 0)
eb.add(vbox)
self.pack_start(eb, False, False, 0)
def switch_visibility(self, *args):
if self.get_visible():
self.hide()
else:
self.refresh()
self.show_all()
def on_enter_notify_event(self, widget, event):
widget.get_style_context().set_state(Gtk.StateFlags.SELECTED)
def on_leave_notify_event(self, widget, event):
widget.get_style_context().set_state(Gtk.StateFlags.NORMAL)
def switch_sink(self, w, e, sink):
print("Sink: '{}'".format(sink))
subprocess.Popen('exec pacmd set-default-sink "{}"'.format(sink), shell=True)
self.hide()
def bri_icon_name(value):
icon_name = "display-brightness-low-symbolic"
if value > 70:
icon_name = "display-brightness-high-symbolic"
elif value > 30:
icon_name = "display-brightness-medium-symbolic"
return icon_name
def vol_icon_name(value, switch):
icon_name = "audio-volume-muted-symbolic"
if switch:
if value is not None:
if value > 70:
icon_name = "audio-volume-high-symbolic"
elif value > 30:
icon_name = "audio-volume-medium-symbolic"
else:
icon_name = "audio-volume-low-symbolic"
else:
icon_name = "audio-volume-muted-symbolic"
return icon_name
def bat_icon_name(value, is_charging):
icon_name = "battery-empty-symbolic"
if is_charging:
if value > 95:
icon_name = "battery-full-charging-symbolic"
elif value > 50:
icon_name = "battery-good-charging-symbolic"
elif value > 20:
icon_name = "battery-low-charging-symbolic"
else:
if value > 95:
icon_name = "battery-full-symbolic"
elif value > 50:
icon_name = "battery-good-symbolic"
elif value > 20:
icon_name = "battery-low-symbolic"
return icon_name
def bt_icon_name(is_on):
icon_name = "bluetooth-active-symbolic" if is_on else "bluetooth-disabled-symbolic"
return icon_name
|
bot_atcoder.py
|
import os
import os.path as pth
import re
import time
from threading import Thread
import pyperclip as clip
from selenium.webdriver.common.keys import Keys
from bot_cp import *
class bot_atcoder(bot_cp):
def prept(self, prob_code: str, autoload=False):
prob_code = prob_code.lower()
driver = get_driver()
contestname = self.contestname
driver.get(rf'https://atcoder.jp/contests/{contestname}/tasks/{contestname}_{prob_code.lower()}')
caught = []
seen = set()
for cp in driver.find_elements_by_class_name('btn-copy'):
id = cp.get_attribute('data-target')
if id not in seen:
seen.add(id)
pre = driver.find_element_by_id(id)
if pre.text:
caught.append(pre.text)
with open(reference, 'r') as f:
samplecpp: list = f.readlines()
for idx in range(len(samplecpp)):
if samplecpp[idx].startswith(' re(T);'):
samplecpp[idx] = '//' + samplecpp[idx] # most likely no T cases in ATCODER
break
srcfr, srcto = [idx for idx, line in enumerate(samplecpp) if line.endswith('python-autofill-src>\n')]
del samplecpp[srcfr + 1:srcto]
infr, into = [idx for idx, line in enumerate(samplecpp) if line.endswith('python-autofill-in>\n')]
del samplecpp[infr + 1:into]
outfr, outto = [idx for idx, line in enumerate(samplecpp) if line.endswith('python-autofill-out>\n')]
del samplecpp[outfr + 1:outto]
#
ins, outs = caught[0::2], caught[1::2]
filename = solution_format.format(prob_code)
samplecpp[0] = f'//{filename} {contestname}\n'
inject(filename, samplecpp, ins, outs, infr, outfr)
# now preparing the solution
print(f'task {prob_code} prepared')
driver.back()
if autoload:
self.load(prob_code)
def sub(self):
driver = get_driver()
with open(solving, 'r') as f:
got = f.readlines()
_ = solution_format.format(r'(\w+)')
task_code, self.contestname = re.match(fr'//{_} (\w+)', got[0]).groups()
task_code = task_code.lower()
self.prob_codes, *_ = re.match("//([\w+ ]+)", got[1]).groups()
self.prob_codes = self.prob_codes.split()
while driver.current_url != fr'https://atcoder.jp/contests/{self.contestname}/submit':
driver.get(fr'https://atcoder.jp/contests/{self.contestname}/submit')
for s in driver.find_elements_by_class_name('select2-selection--single'):
if s.get_attribute('aria-labelledby') != 'select2-select-task-container':
continue
else:
s.send_keys(' ')
selection_type = driver.find_element_by_class_name('select2-search__field')
selection_type.send_keys(task_code + ' -')
selection_type.send_keys(Keys.ENTER)
break
filtered = [s for s in driver.find_elements_by_class_name('select2-selection') if
s.get_attribute('role') == 'combobox' \
and s.get_attribute('tabindex') == '0' \
and s.find_elements_by_class_name('select2-selection__arrow')]
ffilt = []
for s in filtered:
try:
s.click()
s.click()
ffilt.append(s)
except Exception as e:
continue
ffilt[1].click()
selection_type = driver.find_element_by_class_name('select2-search__field')
selection_type.send_keys('c++')
selection_type.send_keys(Keys.ENTER)
simple_editor = driver.find_element_by_class_name('btn-toggle-editor')
while simple_editor.get_attribute('aria-pressed') != 'true':
simple_editor.click()
src = driver.find_element_by_name('sourceCode')
self.cp()
src.send_keys(Keys.CONTROL + 'v')
driver.find_element_by_id('submit').send_keys(Keys.ENTER)
lastsubmission = driver.find_elements_by_tag_name('tr')[1]
def report():
try:
while not any(stat in lastsubmission.text for stat in ['AC', 'RE', 'TLE', 'MLE', 'OLE', 'IE', 'WA']):
print(f'\r{"|".join(lastsubmission.text.split()[2:])}', end='\r')
time.sleep(0.3)
print(lastsubmission.text)
driver.get(fr'https://atcoder.jp/contests/{self.contestname}/submit')
print('report thread died nauturally')
except Exception:
print('report thread died unnauturally')
self.check_score = Thread(target=report)
self.check_score.start()
if self.prob_codes[-1] != task_code:
try:
self.load(self.prob_codes[self.prob_codes.index(task_code) + 1])
except Exception as e:
print(f'unable to autoload next, either no more next or problem_code not defined:{e}')
self.check_score.join()
def prep(self, contestname):
driver = get_driver()
self.clr()
self.contestname = contestname = str(contestname)
driver.get(f'https://atcoder.jp/contests/{contestname}/tasks/')
problems = driver.find_elements_by_tag_name('tr')[1:]
problems = [tr.find_element_by_tag_name('td').find_element_by_tag_name('a') for tr in problems]
self.prob_codes = [x.text for x in problems]
for problink in problems:
problink.send_keys(Keys.CONTROL + Keys.ENTER)
for i, c in enumerate(self.prob_codes):
self.prept(c, i == 0)
while driver.current_url != fr'https://atcoder.jp/contests/{contestname}/submit':
driver.get(fr'https://atcoder.jp/contests/{contestname}/submit')
if __name__ == '__main__':
interface(bot_atcoder())
|
utils.py
|
#! coding:utf-8
# compatible for win32 / python 2 & 3
from __future__ import division, print_function
import argparse
import hashlib
import importlib
import json
import os
import pickle
import re
import shlex
import signal
import sys
import time
import timeit
from base64 import b64decode, b64encode
from codecs import open
from datetime import datetime
from fractions import Fraction
from functools import wraps
from logging import getLogger
from threading import Lock, Thread
from _codecs import escape_decode
from .configs import Config
from .exceptions import ImportErrorModule
from .logs import print_info
from .main import run_after_async, threads, tPool
from .versions import PY2, PY3
logger = getLogger("torequests")
if PY2:
import repr as reprlib
from Queue import Empty, PriorityQueue
from urllib import quote, quote_plus, unquote_plus
from urlparse import (
parse_qs,
parse_qsl,
urlparse,
unquote,
urljoin,
urlsplit,
urlunparse,
)
from cgi import escape
import HTMLParser
unescape = HTMLParser.HTMLParser().unescape
def retry(tries=1, exceptions=(Exception,), catch_exception=False):
def wrapper_sync(function):
@wraps(function)
def retry_sync(*args, **kwargs):
for _ in range(tries):
try:
return function(*args, **kwargs)
except exceptions as err:
error = err
if catch_exception:
return error
raise error
return retry_sync
return wrapper_sync
elif PY3:
import reprlib
from urllib.parse import (
parse_qs,
parse_qsl,
urlparse,
quote,
quote_plus,
unquote,
unquote_plus,
urljoin,
urlsplit,
urlunparse,
)
from html import escape, unescape
from queue import Empty, PriorityQueue
from ._py3_patch import retry
unicode = str
else:
logger.warning('Unhandled python version.')
__all__ = "parse_qs parse_qsl urlparse quote quote_plus unquote unquote_plus urljoin urlsplit urlunparse escape unescape simple_cmd print_mem get_mem curlparse Null null itertools_chain slice_into_pieces slice_by_size ttime ptime split_seconds timeago timepass md5 Counts unique unparse_qs unparse_qsl Regex kill_after UA try_import ensure_request Timer ClipboardWatcher Saver guess_interval split_n find_one register_re_findone Cooldown curlrequests sort_url_query retry get_readable_size encode_as_base64 decode_as_base64 check_in_time get_host find_jsons update_url".split(
" ")
NotSet = object()
def simple_cmd():
"""
``Deprecated``: Not better than ``fire`` -> pip install fire
"""
parser = argparse.ArgumentParser(
prog="Simple command-line function toolkit.",
description="""Input function name and args and kwargs.
python xxx.py main -a 1 2 3 -k a=1,b=2,c=3""",
)
parser.add_argument("-f", "--func_name", default="main")
parser.add_argument("-a", "--args", dest="args", nargs="*")
parser.add_argument("-k", "--kwargs", dest="kwargs")
parser.add_argument(
"-i",
"-s",
"--info",
"--show",
"--status",
dest="show",
action="store_true",
help="show the args, kwargs and function's source code.",
)
params = parser.parse_args()
func_name = params.func_name
func = globals().get(func_name)
if not (callable(func)):
logger.warning("invalid func_name: %s" % func_name)
return
args = params.args or []
kwargs = params.kwargs or {}
if kwargs:
items = [re.split("[:=]", i) for i in re.split("[,;]+", kwargs)]
kwargs = dict(items)
if params.show:
from inspect import getsource
logger.info("args: %s; kwargs: %s" % (args, kwargs))
logger.info(getsource(func))
return
func(*args, **kwargs)
def get_readable_size(input_num,
unit=None,
rounded=NotSet,
format="%s %s",
units=None,
carry=1024):
"""Show the num readable with unit.
:param input_num: raw number
:type input_num: float, int
:param unit: target unit, defaults to None for auto set.
:type unit: str, optional
:param rounded: defaults to NotSet return raw float without round.
:type rounded: None or int, optional
:param format: output string format, defaults to "%s %s"
:type format: str, optional
:param units: unit list, defaults to None for computer storage unit
:type units: list, optional
:param carry: carry a number as in adding, defaults to 1024
:type carry: int, optional
:return: string for input_num with unit.
:rtype: str
"""
units = units or ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'BB']
result_size = input_num
if unit in units:
result_size = input_num / (carry**units.index(unit))
else:
unit = units[0]
for idx, _unit in enumerate(units):
_result_size = input_num / (carry**units.index(_unit))
if _result_size < 1:
break
result_size = _result_size
unit = _unit
if rounded is not NotSet:
if rounded is None and PY2:
# PY2 rounded should not be None
result_size = int(result_size)
else:
result_size = round(result_size, rounded)
result = format % (result_size, unit)
return result
def print_mem(unit=None, callback=print_info, rounded=2):
"""Show the proc-mem-cost with psutil, use this only for lazinesssss.
:param unit: B, KB, MB, GB.
"""
result = get_mem(unit=unit, rounded=rounded)
if callback:
return callback(result)
return result
def get_mem(unit=None, callback=print_info, rounded=2, attribute='uss'):
try:
import psutil
memory_full_info = psutil.Process(os.getpid()).memory_full_info()
B = float(getattr(memory_full_info, attribute, memory_full_info.uss))
result = get_readable_size(B, unit=unit, rounded=rounded)
return result
except ImportError:
print("pip install psutil first.")
class _Curl:
"""Curl args parser.
**Use curlparse function directly.**
"""
parser = argparse.ArgumentParser()
parser.add_argument("curl")
parser.add_argument("--url", default='')
parser.add_argument("-X", "--request", default="")
parser.add_argument("-A", "--user-agent")
parser.add_argument("-e", "--referer")
parser.add_argument("-u", "--user") # <user[:password]>
parser.add_argument("-x", "--proxy") # proxy.com:port
parser.add_argument("-d", "--data", "--data-raw")
parser.add_argument("-F", "--form", "--form-string")
parser.add_argument("--data-binary")
parser.add_argument("--data-urlencode")
parser.add_argument("-I", "--head", action="store_true")
parser.add_argument("-L", "--location", action="store_true")
# for retry
parser.add_argument("--retry-max-time", type=int, default=0)
parser.add_argument("--connect-timeout", type=float)
parser.add_argument("-m", "--max-time", type=float)
# key: value
parser.add_argument("-H", "--header", action="append", default=[])
parser.add_argument("--compressed", action="store_true")
def curlparse(string, encoding="utf-8", remain_unknown_args=False):
"""Translate curl-string into dict of request. Do not support file upload which contains @file_path.
:param string: standard curl-string, like `r'''curl ...'''`.
:param encoding: encoding for post-data encoding.
Basic Usage::
>>> from torequests.utils import curlparse
>>> curl_string = '''curl 'https://p.3.cn?skuIds=1&nonsense=1&nonce=0' -H 'Pragma: no-cache' -H 'DNT: 1' -H 'Accept-Encoding: gzip, deflate' -H 'Accept-Language: zh-CN,zh;q=0.9' -H 'Upgrade-Insecure-Requests: 1' -H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36' -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' -H 'Cache-Control: no-cache' -H 'Referer: https://p.3.cn?skuIds=1&nonsense=1&nonce=0' -H 'Cookie: ASPSESSIONIDSQRRSADB=MLHDPOPCAMBDGPFGBEEJKLAF' -H 'Connection: keep-alive' --compressed'''
>>> request_args = curlparse(curl_string)
>>> request_args
{'url': 'https://p.3.cn?skuIds=1&nonsense=1&nonce=0', 'headers': {'Pragma': 'no-cache', 'Dnt': '1', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Cache-Control': 'no-cache', 'Referer': 'https://p.3.cn?skuIds=1&nonsense=1&nonce=0', 'Cookie': 'ASPSESSIONIDSQRRSADB=MLHDPOPCAMBDGPFGBEEJKLAF', 'Connection': 'keep-alive'}, 'method': 'get'}
>>> import requests
>>> requests.request(**request_args)
<Response [200]>
"""
def unescape_sig(s):
if s.startswith(escape_sig):
return decode_as_base64(s[len(escape_sig):], encoding=encoding)
else:
return s
escape_sig = u'fac4833e034b6771e5a1c74037e9153e'
string = string.replace('\\\n', ' ')
if string.startswith("http"):
return {"url": string, "method": "get"}
# escape $'' ANSI-C strings
for arg in re.findall(r"\$'[\s\S]*(?<!\\)'", string):
if PY2:
_escaped = escape_decode(bytes(arg[2:-1]))[0].decode(encoding)
else:
_escaped = escape_decode(bytes(arg[2:-1],
encoding))[0].decode(encoding)
string = string.replace(
arg, "'{}{}'".format(escape_sig,
encode_as_base64(_escaped, encoding=encoding)))
lex_list = shlex.split(string.strip())
args, unknown = _Curl.parser.parse_known_args(lex_list)
requests_args = {}
headers = {}
requests_args["url"] = unescape_sig(args.url)
if not requests_args["url"]:
for arg in unknown:
if re.match(r'https?://', arg):
requests_args["url"] = arg
break
# else:
# return None
for header in args.header:
key, value = unescape_sig(header).split(":", 1)
headers[key.title()] = value.strip()
if args.user_agent:
headers["User-Agent"] = unescape_sig(args.user_agent)
if args.referer:
headers["Referer"] = args.referer
if headers:
requests_args["headers"] = headers
if args.user:
requests_args["auth"] = [
u for u in unescape_sig(args.user).split(":", 1) + [""]
][:2]
# if args.proxy:
# pass
data = args.data or args.data_binary or args.form
if args.data_urlencode:
data = quote_plus(args.data_urlencode)
if data:
# if PY2:
# # not fix the UnicodeEncodeError, so use `replace`, damn python2.x.
# data = data.replace(r'\r', '\r').replace(r'\n', '\n')
# else:
# data = data.encode(
# 'latin-1',
# 'backslashreplace').decode('unicode-escape').encode(encoding)
requests_args["data"] = unescape_sig(data).encode(encoding)
if not args.request:
args.request = "post" if data else "get"
requests_args["method"] = args.request.lower()
if args.head:
requests_args['method'] = 'head'
if args.connect_timeout and args.max_time:
requests_args["timeout"] = (args.connect_timeout, args.max_time)
elif args.connect_timeout:
requests_args["timeout"] = args.connect_timeout
elif args.max_time:
requests_args["timeout"] = args.max_time
if remain_unknown_args:
requests_args['unknown_args'] = unknown
if args.location:
requests_args['allow_redirects'] = True
if args.retry_max_time:
requests_args['retry'] = args.retry_max_time
return requests_args
class Null(object):
"""Null instance will return self when be called, it will alway be False."""
def __init__(self, *args, **kwargs):
return
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, mname):
return self
def __setattr__(self, name, value):
return self
def __getitem__(self, key):
return self
def __delattr__(self, name):
return self
def __repr__(self):
return ""
def __str__(self):
return ""
def __bool__(self):
return False
def __nonzero__(self):
return False
null = Null()
def itertools_chain(*iterables):
"""For the shortage of Python2's, Python3: `from itertools import chain`."""
for it in iterables:
for element in it:
yield element
def slice_into_pieces(seq, n):
"""Slice a sequence into `n` pieces, return a generation of n pieces.
::
>>> from torequests.utils import slice_into_pieces
>>> for chunk in slice_into_pieces(range(10), 3):
... print(chunk)
(0, 1, 2, 3)
(4, 5, 6, 7)
(8, 9)
"""
length = len(seq)
if length % n == 0:
size = length // n
else:
size = length // n + 1
for it in slice_by_size(seq, size):
yield it
def slice_by_size(seq, size):
"""Slice a sequence into chunks, return as a generation of chunks with `size`.
::
>>> from torequests.utils import slice_by_size
>>> for chunk in slice_by_size(range(10), 3):
... print(chunk)
(0, 1, 2)
(3, 4, 5)
(6, 7, 8)
(9,)
"""
filling = object()
for it in zip(*(itertools_chain(seq, [filling] * size),) * size):
if filling in it:
it = tuple(i for i in it if i is not filling)
if it:
yield it
def ttime(timestamp=None, tzone=None, fail="", fmt="%Y-%m-%d %H:%M:%S"):
"""Translate timestamp into human-readable: %Y-%m-%d %H:%M:%S.
:param timestamp: the timestamp float, or `time.time()` by default.
:param tzone: time compensation, int(-time.timezone / 3600) by default,
(can be set with Config.TIMEZONE).
:param fail: while raising an exception, return it.
:param fmt: %Y-%m-%d %H:%M:%S, %z not work.
:rtype: str
>>> ttime()
2018-03-15 01:24:35
>>> ttime(1486572818.421858323)
2017-02-09 00:53:38
"""
tzone = Config.TIMEZONE if tzone is None else tzone
fix_tz = tzone * 3600
if timestamp is None:
timestamp = time.time()
else:
timestamp = float(timestamp)
if 1e12 <= timestamp < 1e13:
# Compatible timestamp with 13-digit milliseconds
timestamp = timestamp / 1000
try:
timestamp = time.time() if timestamp is None else timestamp
return time.strftime(fmt, time.gmtime(timestamp + fix_tz))
except Exception:
return fail
def ptime(timestr=None, tzone=None, fail=0, fmt="%Y-%m-%d %H:%M:%S"):
"""Translate %Y-%m-%d %H:%M:%S into timestamp.
:param timestr: string like 2018-03-15 01:27:56, or time.time() if not set.
:param tzone: time compensation, int(-time.timezone / 3600) by default,
(can be set with Config.TIMEZONE).
:param fail: while raising an exception, return it.
:param fmt: %Y-%m-%d %H:%M:%S, %z not work.
:rtype: int
>>> ptime('2018-03-15 01:27:56')
1521048476
"""
tzone = Config.TIMEZONE if tzone is None else tzone
fix_tz = -(tzone * 3600 + time.timezone)
#: str(timestr) for datetime.datetime object
timestr = str(timestr or ttime())
try:
return int(time.mktime(time.strptime(timestr, fmt)) + fix_tz)
except Exception:
return fail
def split_seconds(seconds):
"""Split seconds into [day, hour, minute, second, ms]
`divisor: 1, 24, 60, 60, 1000`
`units: day, hour, minute, second, ms`
>>> split_seconds(6666666)
[77, 3, 51, 6, 0]
"""
ms = seconds * 1000
divisors = (1, 24, 60, 60, 1000)
quotient, result = ms, []
for divisor in divisors[::-1]:
quotient, remainder = divmod(quotient, divisor)
result.append(quotient) if divisor == 1 else result.append(remainder)
return result[::-1]
def timeago(seconds=0, accuracy=4, format=0, lang="en", short_name=False):
"""Translate seconds into human-readable.
:param seconds: seconds (float/int).
:param accuracy: 4 by default (units[:accuracy]), determine the length of elements.
:param format: index of [led, literal, dict].
:param lang: en or cn.
:param units: day, hour, minute, second, ms.
>>> timeago(93245732.0032424, 5)
'1079 days, 05:35:32,003'
>>> timeago(93245732.0032424, 4, 1)
'1079 days 5 hours 35 minutes 32 seconds'
>>> timeago(-389, 4, 1)
'-6 minutes 29 seconds 0 ms'
"""
assert format in [0, 1,
2], ValueError("format arg should be one of 0, 1, 2")
negative = "-" if seconds < 0 else ""
is_en = lang == "en"
seconds = abs(seconds)
if is_en:
if short_name:
units = ("day", "hr", "min", "sec", "ms")
else:
units = ("day", "hour", "minute", "second", "ms")
elif lang == "cn":
if short_name:
units = (u"日", u"时", u"分", u"秒", u"毫秒")
else:
units = (u"天", u"小时", u"分钟", u"秒", u"毫秒")
times = split_seconds(seconds)
if format == 2:
return dict(zip(units, times))
day, hour, minute, second, ms = times
if format == 0:
day_str = ("%d %s%s, " %
(day, units[0], "s" if day > 1 and is_en else "")
if day else "")
mid_str = ":".join(("%02d" % i for i in (hour, minute, second)))
if accuracy > 4:
mid_str += ",%03d" % ms
return negative + day_str + mid_str
elif format == 1:
if seconds:
# find longest valid fields index (non-zero for head and tail)
for index, item in enumerate(times):
if item != 0:
head_index = index
break
for index, item in enumerate(reversed(times)):
if item != 0:
tail_index = len(times) - index
break
result_str = [
"%d %s%s" %
(num, unit, "s" if is_en and num > 1 and unit != "ms" else "")
for num, unit in zip(times, units)
][head_index:tail_index][:accuracy]
result_str = " ".join(result_str)
else:
result_str = "0 %s" % units[-1]
return negative + result_str
# alias name
timepass = timeago
def md5(string, n=32, encoding="utf-8", skip_encode=False):
"""str(obj) -> md5_string
:param string: string to operate.
:param n: md5_str length.
>>> from torequests.utils import md5
>>> md5(1, 10)
'923820dcc5'
>>> md5('test')
'098f6bcd4621d373cade4e832627b4f6'
"""
todo = string if skip_encode else unicode(string).encode(encoding)
if n == 32:
return hashlib.md5(todo).hexdigest()
elif isinstance(n, (int, float)):
return hashlib.md5(todo).hexdigest()[(32 - n) // 2:(n - 32) // 2]
elif isinstance(n, (tuple, list)):
return hashlib.md5(todo).hexdigest()[n[0]:n[1]]
class Counts(object):
"""Counter for counting the times been called
>>> from torequests.utils import Counts
>>> cc = Counts()
>>> cc.x
1
>>> cc.x
2
>>> cc.now
2
>>> cc.current
2
>>> cc.sub()
1
"""
__slots__ = ("start", "step", "current", "total")
def __init__(self, start=0, step=1):
self.start = start
self.step = step
self.current = start
self.total = -1
def clear(self):
self.current = self.start
@property
def x(self):
return self.add()
@property
def s(self):
return self.sub()
@property
def c(self):
return self.x
@property
def now(self):
return self.current
def add(self, num=None):
self.current += num or self.step
return self.current
def sub(self, num=None):
self.current -= num or self.step
return self.current
def unique(seq, key=None, return_as=None):
"""Unique the seq and keep the order.
Instead of the slow way:
`lambda seq: (x for index, x in enumerate(seq) if seq.index(x)==index)`
:param seq: raw sequence.
:param return_as: generator for default, or list / set / str...
>>> from torequests.utils import unique
>>> a = [1,2,3,4,2,3,4]
>>> unique(a)
<generator object unique.<locals>.<genexpr> at 0x05720EA0>
>>> unique(a, str)
'1234'
>>> unique(a, list)
[1, 2, 3, 4]
"""
seen = set()
add = seen.add
if key:
generator = (x for x in seq if key(x) not in seen and not add(key(x)))
else:
generator = (x for x in seq if x not in seen and not add(x))
if return_as:
if return_as == str:
return "".join(map(str, generator))
else:
return return_as(generator)
else:
# python2 not support yield from
return generator
def unparse_qs(qs, sort=False, reverse=False):
"""Reverse conversion for parse_qs"""
result = []
items = qs.items()
if sort:
items = sorted(items, key=lambda x: x[0], reverse=reverse)
for keys, values in items:
query_name = quote(keys)
for value in values:
result.append(query_name + "=" + quote(value))
return "&".join(result)
def unparse_qsl(qsl, sort=False, reverse=False):
"""Reverse conversion for parse_qsl"""
result = []
items = qsl
if sort:
items = sorted(items, key=lambda x: x[0], reverse=reverse)
for keys, values in items:
query_name = quote(keys)
result.append(query_name + "=" + quote(values))
return "&".join(result)
class Regex(object):
"""Register some objects(like functions) to the regular expression.
>>> from torequests.utils import Regex, re
>>> reg = Regex()
>>> @reg.register_function('http.*cctv.*')
... def mock():
... pass
...
>>> reg.register('http.*HELLOWORLD', 'helloworld', instances='http://helloworld', flags=re.I)
>>> reg.register('http.*HELLOWORLD2', 'helloworld2', flags=re.I)
>>> reg.find('http://cctv.com')
[<function mock at 0x031FC5D0>]
>>> reg.match('http://helloworld')
['helloworld']
>>> reg.match('non-http://helloworld')
[]
>>> reg.search('non-http://helloworld')
['helloworld']
>>> len(reg.search('non-http://helloworld2'))
2
>>> print(reg.show_all())
('http.*cctv.*') => => <class 'function'> mock ""
('http.*HELLOWORLD', re.IGNORECASE) => http://helloworld => <class 'str'> helloworld
('http.*HELLOWORLD2', re.IGNORECASE) => => <class 'str'> helloworld2
"""
def __init__(self, ensure_mapping=False):
"""
:param ensure_mapping: ensure mapping one to one, if False,
will return all(more than 1) mapped object list."""
self.container = []
self.ensure_mapping = ensure_mapping
def register(self, patterns, obj=None, instances=None, **reg_kwargs):
"""Register one object which can be matched/searched by regex.
:param patterns: a list/tuple/set of regex-pattern.
:param obj: return it while search/match success.
:param instances: instance list will search/match the patterns.
:param reg_kwargs: kwargs for re.compile.
"""
assert obj, "bool(obj) should be True."
patterns = patterns if isinstance(patterns,
(list, tuple, set)) else [patterns]
instances = instances or []
instances = (instances if isinstance(instances, (list, tuple,
set)) else [instances])
for pattern in patterns:
pattern_compiled = re.compile(pattern, **reg_kwargs)
self.container.append((pattern_compiled, obj, instances))
if self.ensure_mapping:
# check all instances to avoid one-to-many instances.
self._check_instances()
else:
# no need to check all instances.
for instance in instances:
assert self.search(instance) == [
obj
] or self.match(instance) == [obj], (
"instance %s should fit at least one pattern %s" %
(instance, pattern))
def register_function(self, patterns, instances=None, **reg_kwargs):
"""Decorator for register."""
def wrapper(function):
self.register(patterns, function, instances=instances, **reg_kwargs)
return function
return wrapper
def find(self, string, default=None):
"""Return match or search result.
:rtype: list"""
return self.match(string) or self.search(string) or default
def search(self, string, default=None):
"""Use re.search to find the result
:rtype: list"""
default = default if default else []
result = [item[1] for item in self.container if item[0].search(string)]
if self.ensure_mapping:
assert len(result) < 2, "%s matches more than one pattern: %s" % (
string,
result,
)
return result if result else default
def match(self, string, default=None):
"""Use re.search to find the result
:rtype: list"""
default = default if default else []
result = [item[1] for item in self.container if item[0].match(string)]
if self.ensure_mapping:
assert len(result) < 2, "%s matches more than one pattern: %s" % (
string,
result,
)
return result if result else default
def _check_instances(self):
for item in self.container:
for instance in item[2]:
assert self.search(instance) or self.match(
instance), "instance %s not fit pattern %s" % (
instance, item[0].pattern)
def show_all(self, as_string=True):
""", python2 will not show flags"""
result = []
for item in self.container:
pattern = str(item[0])[10:] if PY3 else item[0].pattern
instances = item[2] or []
value = ('%s "%s"' % (item[1].__name__, (item[1].__doc__ or ""))
if callable(item[1]) else str(item[1]))
value = "%s %s" % (type(item[1]), value)
result.append(" => ".join((pattern, ",".join(instances), value)))
return "\n".join(result) if as_string else result
def kill_after(seconds, timeout=2):
"""Kill self after seconds"""
pid = os.getpid()
kill = os.kill
run_after_async(seconds, kill, pid, signal.SIGTERM)
run_after_async(seconds + timeout, kill, pid, 9)
class UA:
"""Some common User-Agents for crawler.
Android, iPhone, iPad, Firefox, Chrome, IE6, IE9"""
__slots__ = ()
Android = "Mozilla/5.0 (Linux; Android 5.1.1; Nexus 6 Build/LYZ28E) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Mobile Safari/537.36"
iPhone = "Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.1"
iPad = "Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1"
Firefox = (
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0"
)
Chrome = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36"
IE6 = "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
IE9 = "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"
WECHAT_ANDROID = "Mozilla/5.0 (Linux; Android 5.0; SM-N9100 Build/LRX21V) > AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 > Chrome/37.0.0.0 Mobile Safari/537.36 > MicroMessenger/6.0.2.56_r958800.520 NetType/WIFI"
WECHAT_IOS = "Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Mobile/9B176 MicroMessenger/4.3.2"
def try_import(module_name, names=None, default=ImportErrorModule, warn=True):
"""Try import module_name, except ImportError and return default,
sometimes to be used for catch ImportError and lazy-import.
"""
try:
module = importlib.import_module(module_name)
except ImportError:
if warn:
if warn is True:
logger.warning(
"Module `%s` not found. Install it to remove this warning" %
module_name)
else:
warn(module_name, names, default)
module = (ImportErrorModule(module_name)
if default is ImportErrorModule else default)
if not names:
return module
if not isinstance(names, (tuple, set, list)):
names = [names]
result = []
for name in names:
if hasattr(module, name):
result.append(module.__getattribute__(name))
else:
if default is ImportErrorModule:
result.append(ImportErrorModule("%s.%s" % (module_name, name)))
else:
result.append(default)
return result[0] if len(result) == 1 else result
def ensure_request(request):
"""Used for requests.request / Requests.request with **ensure_request(request)**
:param request: dict or curl-string or url
:type request: [dict]
:return: dict of request
:rtype: [dict]
Basic Usage::
>>> from torequests.utils import ensure_request
>>> ensure_request('''curl http://test.com''')
{'url': 'http://test.com', 'method': 'get'}
>>> ensure_request('http://test.com')
{'method': 'get', 'url': 'http://test.com'}
>>> ensure_request({'method': 'get', 'url': 'http://test.com'})
{'method': 'get', 'url': 'http://test.com'}
>>> ensure_request({'url': 'http://test.com'})
{'url': 'http://test.com', 'method': 'get'}
"""
if isinstance(request, dict):
result = request
elif isinstance(request, (unicode, str)):
request = request.strip()
if request.startswith("http"):
result = {"method": "get", "url": request}
elif request.startswith("curl "):
result = curlparse(request)
else:
raise ValueError("request should be dict or str.")
result["method"] = result.setdefault("method", "get").lower()
return result
class Timer(object):
"""
Usage:
init Timer anywhere:
such as head of function, or head of module, then it will show log after del it by gc.
:param name: be used in log or None.
:param log_func: some function to show process.
:param default_timer: use `timeit.default_timer` by default.
:param rounding: None, or seconds will be round(xxx, rounding)
:param readable: None, or use `timepass`: readable(cost_seconds) -> 00:00:01,234
Basic Usage::
from torequests.utils import Timer
import time
Timer()
@Timer.watch()
def test(a=1):
Timer()
time.sleep(1)
def test_inner():
t = Timer('test_non_del')
time.sleep(1)
t.x
test_inner()
test(3)
time.sleep(1)
# [2018-03-10 02:16:48]: Timer [00:00:01]: test_non_del, start at 2018-03-10 02:16:47.
# [2018-03-10 02:16:48]: Timer [00:00:02]: test(a=3), start at 2018-03-10 02:16:46.
# [2018-03-10 02:16:48]: Timer [00:00:02]: test(3), start at 2018-03-10 02:16:46.
# [2018-03-10 02:16:49]: Timer [00:00:03]: <module>: __main__ (temp_code.py), start at 2018-03-10 02:16:46.
"""
def __init__(
self,
name=None,
log_func=None,
default_timer=None,
rounding=None,
readable=None,
log_after_del=True,
stack_level=1,
):
readable = readable or timepass
self._log_after_del = False
self.start_at = time.time()
uid = md5("%s%s" % (self.start_at, id(self)))
if not name:
f_name = sys._getframe(stack_level).f_code.co_name
f_local = sys._getframe(stack_level).f_locals
if f_name == "<module>":
f_vars = ": %s (%s)" % (
f_local.get("__name__"),
os.path.split(f_local.get("__file__"))[-1],
)
# f_vars = f_vars.replace(' __main__', '')
else:
f_vars = ("(%s)" % ", ".join([
"%s=%s" % (i, repr(f_local[i]))
for i in sorted(f_local.keys())
]) if f_local else "()")
if self not in f_local.values():
# add self to name space for __del__ way.
sys._getframe(stack_level).f_locals.update(**{uid: self})
name = "%s%s" % (f_name, f_vars)
self.name = name
self.log_func = log_func
self.timer = default_timer or timeit.default_timer
self.rounding = rounding
self.readable = readable
self.start_timer = self.timer()
self._log_after_del = log_after_del
@property
def string(self):
"""Only return the expect_string quietly."""
return self.tick()
@property
def x(self):
"""Call self.log_func(self) and return expect_string."""
self._log_after_del = False
passed_string = self.string
if self.log_func:
self.log_func(self)
else:
print_info("Timer [%(passed)s]: %(name)s, start at %(start)s." %
(dict(name=self.name,
start=ttime(self.start_at),
passed=passed_string)))
return passed_string
@property
def passed(self):
"""Return the cost_seconds after starting up."""
return self.timer() - self.start_timer
def tick(self):
"""Return the time cost string as expect."""
string = self.passed
if self.rounding:
string = round(string)
if self.readable:
string = self.readable(string)
return string
@staticmethod
def watch(*timer_args, **timer_kwargs):
"""Decorator for Timer."""
def wrapper(function):
@wraps(function)
def inner(*args, **kwargs):
args1 = ", ".join(map(repr, args)) if args else ""
kwargs1 = ", ".join([
"%s=%s" % (i, repr(kwargs[i]))
for i in sorted(kwargs.keys())
])
arg = ", ".join(filter(None, [args1, kwargs1]))
name = "%s(%s)" % (function.__name__, arg)
_ = Timer(name=name, *timer_args, **timer_kwargs)
result = function(*args, **kwargs)
return result
return inner
return wrapper
def __del__(self):
if self._log_after_del:
# not be called by self.x yet.
self.x
def __enter__(self):
return self
def __exit__(self, *args):
self.x
def ensure_dict_key_title(dict_obj):
"""Set the dict key as key.title(); keys should be str.
Always be used to headers.
>>> from torequests.utils import ensure_dict_key_title
>>> ensure_dict_key_title({'hello-world':1, 'HELLOWORLD':2})
{'Hello-World': 1, 'Helloworld': 2}
"""
if not all((isinstance(i, unicode) for i in dict_obj.keys())):
return dict_obj
return {key.title(): value for key, value in dict_obj.items()}
class TKClipboard(object):
"""Use tkinter to implement a simple pyperclip. Need python3-tk.
:: Example
from torequests.utils import TKClipboard
text = '123'
pyperclip = TKClipboard()
pyperclip.clear()
print(repr(pyperclip.paste()))
pyperclip.copy(text)
print(repr(pyperclip.paste()))
pyperclip.append(text)
print(repr(pyperclip.paste()))
# ''
# '123'
# '123123'
with TKClipboard() as pyperclip:
pyperclip.clear()
print(repr(pyperclip.paste()))
pyperclip.copy(text)
print(repr(pyperclip.paste()))
pyperclip.append(text)
print(repr(pyperclip.paste()))
# ''
# '123'
# '123123'
"""
def __init__(self):
from tkinter import Tk, TclError
self.root = Tk()
self.root.withdraw()
self.TclError = TclError
self.closed = False
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __del__(self, *args):
self.close()
def close(self):
if not self.closed:
self.root.destroy()
self.closed = True
def paste(self):
try:
return self.root.clipboard_get()
except self.TclError:
return ''
def copy(self, text):
self.clear()
self.append(text)
def append(self, text):
return self.root.clipboard_append(text)
def clear(self):
return self.root.clipboard_clear()
class ClipboardWatcher(object):
"""Watch clipboard with `pyperclip`, run callback while changed.
:: Example
from torequests.utils import ClipboardWatcher
ClipboardWatcher().x
"""
def __init__(self, interval=0.2, callback=None):
try:
import pyperclip
self.pyperclip = pyperclip
except ImportError:
try:
self.pyperclip = TKClipboard()
logger.warning('pyperclip is not installed, using tkinter.')
except ImportError:
logger.error(
'please install pyperclip or tkinter before using this tool.'
)
self.interval = interval
self.callback = callback or self.default_callback
self.temp = self.current
def read(self):
"""Return the current clipboard content."""
return self.pyperclip.paste()
def write(self, text):
"""Rewrite the current clipboard content."""
return self.pyperclip.copy(text)
@property
def current(self):
"""Return the current clipboard content."""
return self.read()
def default_callback(self, text):
"""Default clean the \\n in text."""
text = text.replace("\r\n", "\n")
text = "%s\n" % text
flush_print(text, sep="", end="")
return text
def watch(self, limit=None, timeout=None):
"""Block method to watch the clipboard changing."""
start_time = time.time()
count = 0
while not timeout or time.time() - start_time < timeout:
new = self.read()
if new != self.temp:
count += 1
self.callback(new)
if count == limit:
break
self.temp = new
time.sleep(self.interval)
@property
def x(self):
"""Return self.watch()"""
return self.watch()
@threads(1)
def watch_async(self, limit=None, timeout=None):
"""Non-block method to watch the clipboard changing."""
return self.watch(limit=limit, timeout=timeout)
class Saver(object):
"""
Simple object persistent toolkit with pickle/json,
if only you don't care the performance and security.
**Do not set the key startswith "_"**
:param path: if not set, will be ~/_saver.db. print(self._path) to show it.
Set pickle's protocol < 3 for compatibility between python2/3,
but use -1 for performance and some other optimizations.
:param save_mode: pickle / json.
>>> ss = Saver()
>>> ss._path
'/home/work/_saver.json'
>>> ss.a = 1
>>> ss['b'] = 2
>>> str(ss)
{'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> del ss.b
>>> str(ss)
"{'a': 1, 'c': 3, 'd': 4}"
>>> ss._update({'c': 3, 'd': 4})
>>> ss
Saver(path="/home/work/_saver.json"){'a': 1, 'c': 3, 'd': 4}
"""
_instances = {}
_locks = {}
_protected_keys = {
"_auto_backup",
"_lock",
"_path",
"_saver_args",
"_save_mode",
"_cache",
"__getitem__",
"_keys",
"_values",
"__getattr__",
"__len__",
"_popitem",
"_shutdown",
"__setitem__",
"__delitem__",
"_save_obj",
"_get",
"__dict__",
"_clear",
"_locks",
"__weakref__",
"_items",
"__module__",
"_pop",
"__contains__",
"_reload",
"_load",
"_save",
"_update",
"_set",
"_protected_keys",
"_instances",
"_get_home_path",
"_save_back_up",
"_encoding",
}
_protected_keys = _protected_keys | set(object.__dict__.keys())
def __new__(cls,
path=None,
save_mode="json",
auto_backup=False,
encoding='utf-8',
**saver_args):
# BORG
path = path or cls._get_home_path(save_mode=save_mode)
return cls._instances.setdefault(path, super(Saver, cls).__new__(cls))
def __init__(self,
path=None,
save_mode="json",
auto_backup=False,
encoding='utf-8',
**saver_args):
super(Saver, self).__init__()
self._auto_backup = auto_backup
self._encoding = encoding
self._lock = self.__class__._locks.setdefault(path, Lock())
self._path = path or self._get_home_path(save_mode=save_mode)
self._saver_args = saver_args
self._save_mode = save_mode
self._reload()
@classmethod
def _get_home_path(cls, save_mode=None):
home = os.path.expanduser("~")
if save_mode == "json":
ext = "json"
elif save_mode == "pickle":
ext = "pkl"
else:
ext = "db"
file_name = "_saver.%s" % ext
path = os.path.join(home, file_name)
return path
def _save_back_up(self):
with open(self._path, "rb") as f_raw:
with open(self._path + ".bk", "wb") as f_bk:
f_bk.write(f_raw.read())
def _save_obj(self, obj):
mode = "wb" if self._save_mode == "pickle" else "w"
with self._lock:
with open(self._path, mode, encoding=self._encoding) as f:
if self._save_mode == "json":
json.dump(obj, f, **self._saver_args)
if self._save_mode == "pickle":
pickle.dump(obj, f, **self._saver_args)
if self._auto_backup:
self._save_back_up()
return obj
def _reload(self):
self._cache = self._load()
def _load(self):
if not (os.path.isfile(self._path) and os.path.getsize(self._path)):
cache = {}
self._save_obj(cache)
return cache
mode = "rb" if self._save_mode == "pickle" else "r"
with self._lock:
with open(self._path, mode, encoding=self._encoding) as f:
if self._save_mode == "json":
return json.load(f)
if self._save_mode == "pickle":
return pickle.load(f)
def _save(self):
return self._save_obj(self._cache)
def _set(self, key, value):
if self._save_mode == "json":
try:
json.dumps(value)
except TypeError:
logger.warning(
"Saver._set(%s, %s) failed: bad type, using str(value) instead."
% (key, value))
value = str(value)
self._cache[key] = value
self._save()
def _get(self, key, default=None):
return self._cache.get(key, default)
def __setattr__(self, key, value):
if key in self._protected_keys:
object.__setattr__(self, key, value)
else:
self._set(key, value)
def __getattr__(self, key):
if key in self._protected_keys:
return object.__getattribute__(self, key)
return self._get(key)
def __contains__(self, key):
return key in self._cache
def __delattr__(self, key):
self._cache.pop(key, None)
self._save()
def __dir__(self):
return dir(object)
def __len__(self):
return len(self._cache)
def _clear(self):
self._cache = {}
self._save()
def _shutdown(self):
if self._auto_backup:
os.remove(self._path + ".bk")
return os.remove(self._path)
def _keys(self):
return self._cache.keys()
def _items(self):
return self._cache.items()
def _values(self):
return self._cache.values()
def _pop(self, key, default=None):
result = self._cache.pop(key, default)
self._save()
return result
def _popitem(self):
result = self._cache.popitem()
self._save()
return result
def _update(self, *args, **kwargs):
self._cache.update(*args, **kwargs)
self._save()
def __getitem__(self, key):
if key in self._cache:
return self._get(key)
raise KeyError
def __setitem__(self, key, value):
self._set(key, value)
def __delitem__(self, key):
self._cache.pop(key, None)
self._save()
def __str__(self):
return str(self._cache)
def __repr__(self):
return 'Saver(path="%s")%s' % (self._path, reprlib.repr(self._cache))
def guess_interval(nums, accuracy=0):
"""Given a seq of number, return the median, only calculate interval >= accuracy.
Basic Usage::
from torequests.utils import guess_interval
import random
seq = [random.randint(1, 100) for i in range(20)]
print(guess_interval(seq, 5))
# sorted_seq: [2, 10, 12, 19, 19, 29, 30, 32, 38, 40, 41, 54, 62, 69, 75, 79, 82, 88, 97, 99]
# diffs: [8, 7, 10, 6, 13, 8, 7, 6, 6, 9]
# median: 8
"""
if not nums:
return 0
nums = sorted([int(i) for i in nums])
if len(nums) == 1:
return nums[0]
diffs = [nums[i + 1] - nums[i] for i in range(len(nums) - 1)]
diffs = [item for item in diffs if item >= accuracy]
sorted_diff = sorted(diffs)
result = sorted_diff[len(diffs) // 2]
return result
def _re_split_mixin(string, sep, reg=False):
if reg:
return re.split(sep, string)
else:
return string.split(sep)
def split_n(string, seps, reg=False):
r"""Split strings into n-dimensional list.
Basic Usage::
from torequests.utils import split_n
ss = '''a b c d e f 1 2 3 4 5 6
a b c d e f 1 2 3 4 5 6
a b c d e f 1 2 3 4 5 6'''
print(split_n(ss, ('\n', ' ', ' ')))
# [[['a', 'b', 'c'], ['d', 'e', 'f'], ['1', '2', '3'], ['4', '5', '6']], [['a', 'b', 'c'], ['d', 'e', 'f'], ['1', '2', '3'], ['4', '5', '6']], [['a', 'b', 'c'], ['d', 'e', 'f'], ['1', '2', '3'], ['4', '5', '6']]]
print(split_n(ss, ['\s+'], reg=1))
# ['a', 'b', 'c', 'd', 'e', 'f', '1', '2', '3', '4', '5', '6', 'a', 'b', 'c', 'd', 'e', 'f', '1', '2', '3', '4', '5', '6', 'a', 'b', 'c', 'd', 'e', 'f', '1', '2', '3', '4', '5', '6']
"""
deep = len(seps)
if not deep:
return string
return [
split_n(i, seps[1:]) for i in _re_split_mixin(string, seps[0], reg=reg)
]
def bg(func):
"""Run a function in background, will not block main thread's exit.(thread.daemon=True)
Basic Usage::
from torequests.utils import bg, print_info
import time
def test1(n):
time.sleep(n)
print_info(n, 'done')
@bg
def test2(n):
time.sleep(n)
print_info(n, 'done')
test3 = bg(test1)
test2(1)
test3(1)
print_info('not be blocked')
time.sleep(2)
# [2018-06-12 23:46:19](L81): not be blocked
# [2018-06-12 23:46:20](L81): 1 done
# [2018-06-12 23:46:20](L81): 1 done
"""
@wraps(func)
def wrapper(*args, **kwargs):
t = Thread(target=func, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
return wrapper
def countdown(
seconds=None,
block=True,
interval=1,
daemon=True,
tick_callback=None,
finish_callback=None,
):
"""Run a countdown function to wait something, similar to threading.Timer,
but will show the detail tick by tick_callback.
Basic Usage::
from torequests.utils import countdown
countdown(3)
# 3 2 1
# countdown finished [3 seconds]: 2018-06-13 00:12:55 => 2018-06-13 00:12:58.
countdown('2018-06-13 00:13:29')
# 10 9 8 7 6 5 4 3 2 1
# countdown finished [10 seconds]: 2018-06-13 00:13:18 => 2018-06-13 00:13:28.
"""
def default_tick_callback(s, seconds, *args):
flush_print(s, sep="", end=" ")
def default_finish_callback(seconds, start_time):
flush_print()
def cd(seconds, interval):
for s in range(seconds, 0, -interval):
tick_callback(s, seconds, interval)
time.sleep(interval)
if callable(finish_callback):
finish_callback(seconds, start_time)
start_time = time.time()
tick_callback = tick_callback or default_tick_callback
finish_callback = (default_finish_callback
if finish_callback is None else finish_callback)
if unicode(seconds).isdigit():
seconds = int(seconds)
elif isinstance(seconds, (unicode, str)):
seconds = int(ptime(seconds) - time.time())
t = Thread(target=cd, args=(seconds, interval))
t.daemon = daemon
t.start()
if block:
t.join()
def flush_print(*args, **kwargs):
"""
Like print_function at python3, support flush, but not support file.
:param sep: space by default
:param end: '\\n' by default
:param flush: True by default
Basic Usage::
import time
from torequests.utils import flush_print
flush_print("=" * 10)
for _ in range(10):
time.sleep(0.2)
flush_print("=", sep="", end="")
"""
# PY2 raise SyntaxError for : def flush_print(*args, sep='', end=''):
sep, end, flush = (
kwargs.pop("sep", " "),
kwargs.pop("end", "\n"),
kwargs.pop("flush", 1),
)
string = sep.join((unicode(i) for i in args))
sys.stdout.write("%s%s" % (string, end))
if flush:
sys.stdout.flush()
class ProgressBar(object):
"""Simple progress bar.
:param size: total counts of calling ProgressBar.x.
:param length: length of print log.
:param sig: string of each printing log.
Basic Usage::
pb = ProgressBar(50, 10)
for _ in range(50):
time.sleep(0.1)
pb.x
print("current completion rate:", pb.completion_rate)
# ==========
# ==========
# current completion rate: 1.0
"""
def __init__(self, size, length=100, sig="="):
self.size = size or 0
self.length = length
self.sig = sig
self.current = 0
self.last_print = 0
self.printed = 0
if size:
# use Fraction for the deviation of division
self.chunk = Fraction(self.size, self.length)
flush_print(self.sig * self.length)
else:
self.chunk = 1
def add(self, step):
# ensure step >= 0
self.current += step
count = int((self.current - self.last_print) / self.chunk)
if count < 1:
return self.printed
for _ in range(count):
self.printed += 1
flush_print(self.sig, end="")
self.last_print = count * self.chunk + self.last_print
if self.current == self.size:
flush_print()
return self.printed
@property
def x(self):
return self.add(1)
@property
def completion_rate(self):
return self.current / self.size
class RegMatch(object):
"""JS-like match object. Use index number to get groups, if not match or no group, will return ''."""
def __init__(self, item):
self.item = item
def __getattr__(self, key, default=null):
return getattr(self.item, key, default)
def __getitem__(self, index):
if self.item is None:
return ""
if not isinstance(index, int):
raise IndexError
try:
return self.item.group(index)
except IndexError:
return ""
def __bool__(self):
return bool(self.item)
def __nonzero__(self):
return bool(self.item)
@classmethod
def find_one(cls, pattern, string, flags=0):
"""JS-like match object. Use index number to get groups, if not match or no group, will return ''.
Basic Usage::
>>> from torequests.utils import find_one
>>> string = "abcd"
>>> find_one("a.*", string)
<torequests.utils.RegMatch object at 0x0705F1D0>
>>> find_one("a.*", string)[0]
'abcd'
>>> find_one("a.*", string)[1]
''
>>> find_one("a(.)", string)[0]
'ab'
>>> find_one("a(.)", string)[1]
'b'
>>> find_one("a(.)", string)[2] or "default"
'default'
>>> import re
>>> item = find_one("a(B)(C)", string, flags=re.I | re.S)
>>> item
<torequests.utils.RegMatch object at 0x0705F1D0>
>>> item[0]
'abc'
>>> item[1]
'b'
>>> item[2]
'c'
>>> item[3]
''
>>> # import re
>>> # re.findone = find_one
>>> register_re_findone()
>>> re.findone('a(b)', 'abcd')[1] or 'default'
'b'
"""
item = re.search(pattern, string, flags=flags)
return cls(item)
find_one = RegMatch.find_one
def register_re_findone():
"""import re; re.findone = find_one"""
re.findone = find_one
class TimeItem(object):
"""Used for Cooldown."""
__slots__ = ('data', 'use_at')
def __init__(self, data, use_at):
self.data = data
self.use_at = use_at
def __hash__(self):
return hash(self.data)
def __gt__(self, other):
return self.use_at > other.use_at
def __ge__(self, other):
return self.use_at >= other.use_at
def __lt__(self, other):
return self.use_at < other.use_at
def __le__(self, other):
return self.use_at <= other.use_at
def __eq__(self, other):
return self.use_at == other.use_at
def __ne__(self, other):
return self.use_at != other.use_at
class Cooldown(object):
"""Thread-safe Cooldown toolkit.
:param init_items: iterables to add into the default queue at first.
:param interval: each item will cooldown `interval` seconds before return.
:param born_at_now: if be set True, the item.use_at will be set time.time()
instead of 0 when adding to queue at the first time.
>>> from torequests.logs import print_info
>>> cd = Cooldown(range(1, 3), interval=2)
>>> cd.add_items([3, 4])
>>> cd.add_item(5)
>>> for _ in range(7):
... print_info(cd.get(1, 'timeout'))
[2019-01-17 01:50:59] pyld.py(152): 1
[2019-01-17 01:50:59] pyld.py(152): 3
[2019-01-17 01:50:59] pyld.py(152): 5
[2019-01-17 01:50:59] pyld.py(152): 2
[2019-01-17 01:50:59] pyld.py(152): 4
[2019-01-17 01:51:00] pyld.py(152): timeout
[2019-01-17 01:51:01] pyld.py(152): 1
>>> cd.size
5
"""
def __init__(self, init_items=None, interval=0, born_at_now=False):
self.interval = interval
self.queue = PriorityQueue()
self.use_at_function = self.get_now_timestamp if born_at_now else lambda: 0
self.add_items(init_items or [])
@property
def size(self):
return self.queue.qsize()
@property
def all_items(self):
return [item.data for item in self.queue.queue]
def get_now_timestamp(self):
return time.time()
def add_item(self, item):
if not isinstance(item, TimeItem):
item = TimeItem(item, self.use_at_function())
self.queue.put(item)
def add_items(self, items):
for item in items:
self.add_item(item)
def remove_item(self, item):
self.queue.queue = [i for i in self.queue.queue if i.data != item]
return self.queue.qsize()
def remove_items(self, items):
self.queue.queue = [i for i in self.queue.queue if i.data in items]
return self.queue.qsize()
def get(self, timeout=None, default=None):
try:
start_time = time.time()
if timeout is None:
timeout = float('inf')
while time.time() - start_time < timeout:
item = self.queue.get(timeout=timeout)
if time.time() - item.use_at < self.interval:
self.queue.put(item)
wait_time = self.interval - (time.time() - item.use_at)
wait_time = min((wait_time, timeout))
time.sleep(wait_time)
continue
item.use_at = self.get_now_timestamp()
self.queue.put(item)
return item.data
else:
return default
except Empty:
return default
def curlrequests(curl_string, **kwargs):
"""Use tPool to request for curl string.
If kwargs contains the req which hasattr request method, like req=requests.
:param curl_string: standard curl string.
:type curl_string: str
:param kwargs: valid kwargs for tPool.
:type curl_string: dict
Basic Usage::
from torequests.utils import curlrequests
r = curlrequests('''curl 'http://p.3.cn/' -H 'Connection: keep-alive' -H 'Cache-Control: max-age=0' -H 'Upgrade-Insecure-Requests: 1' -H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' -H 'DNT: 1' -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' -H 'Accept-Encoding: gzip, deflate' -H 'Accept-Language: zh-CN,zh;q=0.9,en;q=0.8' -H 'If-None-Match: "55dd9090-264"' -H 'If-Modified-Since: Wed, 26 Aug 2015 10:10:24 GMT' --compressed''', retry=1)
print(r.text)
"""
req = kwargs.pop('req', tPool())
kwargs.update(curlparse(curl_string))
return req.request(**kwargs)
def sort_url_query(url, reverse=False, _replace_kwargs=None):
"""sort url query args.
_replace_kwargs is a dict to update attributes before sorting (such as scheme / netloc...).
http://www.google.com?b=2&z=26&a=1 => http://www.google.com?a=1&b=2&z=26
"""
parsed = urlparse(url)
if _replace_kwargs:
parsed = parsed._replace(**_replace_kwargs)
sorted_parsed = parsed._replace(
query=unparse_qsl(parse_qsl(parsed.query), sort=True, reverse=reverse))
return urlunparse(sorted_parsed)
def encode_as_base64(string, encoding='utf-8'):
return b64encode(string.encode(encoding)).decode(encoding)
def decode_as_base64(string, encoding='utf-8'):
return b64decode(string.encode(encoding)).decode(encoding)
def _check_in_time(time_string, now=None):
now = now or datetime.now()
if '==' in time_string:
# check time_string with strftime: %Y==2020
fmt, target = time_string.split('==')
current = now.strftime(fmt)
# check current time format equals to target
return current == target
elif '!=' in time_string:
# check time_string with strftime: %Y!=2020
fmt, target = time_string.split('!=')
current = now.strftime(fmt)
# check current time format equals to target
return current != target
else:
# other hours format: [1, 3, 11, 23]
current_hour = now.hour
if time_string[0] == '[' and time_string[-1] == ']':
time_string_list = sorted(json.loads(time_string))
else:
nums = [int(num) for num in re.findall(r'\d+', time_string)]
time_string_list = sorted(range(*nums))
# check if current_hour is work hour
return current_hour in time_string_list
def check_in_time(time_string, now=None):
"""Check the datetime whether it fit time_string. Support logic symbol:
equal => '=='
not equal => '!='
or => '|'
and => ';' or '&'
:: Test Code
from torequests.utils import check_in_time, datetime
now = datetime.strptime('2020-03-14 11:47:32', '%Y-%m-%d %H:%M:%S')
oks = [
'0, 24',
'[1, 2, 3, 11]',
'[1, 2, 3, 11];%Y==2020',
'%d==14',
'16, 24|[11]',
'16, 24|%M==47',
'%M==46|%M==47',
'%H!=11|%d!=12',
'16, 24|%M!=41',
]
for time_string in oks:
ok = check_in_time(time_string, now)
print(ok, time_string)
assert ok
no_oks = [
'0, 5',
'[1, 2, 3, 5]',
'[1, 2, 3, 11];%Y==2021',
'%d==11',
'16, 24|[12]',
'%M==17|16, 24',
'%M==46|[1, 2, 3]',
'%H!=11&%d!=12',
'%M!=46;%M!=47',
]
for time_string in no_oks:
ok = check_in_time(time_string, now)
print(ok, time_string)
assert not ok
"""
if '|' in time_string:
if '&' in time_string or ';' in time_string:
raise ValueError('| can not use with "&" or ";"')
return any((_check_in_time(partial_work_hour, now)
for partial_work_hour in time_string.split('|')))
else:
if ('&' in time_string or ';' in time_string) and '|' in time_string:
raise ValueError('| can not use with "&" or ";"')
return all((_check_in_time(partial_work_hour, now)
for partial_work_hour in re.split('&|;', time_string)))
def get_host(url):
if not url:
return url
return urlparse(url).netloc
def find_jsons(string, return_as='json', json_loader=None):
"""Generator for finding the valid JSON string, only support dict and list.
return_as could be 'json' / 'object' / 'index'.
::
>>> from torequests.utils import find_jsons
>>> list(find_jsons('string["123"]123{"a": 1}[{"a": 1, "b": [1,2,3]}]'))
['["123"]', '{"a": 1}', '[{"a": 1, "b": [1,2,3]}]']
>>> list(find_jsons('string[]{}{"a": 1}'))
['[]', '{}', '{"a": 1}']
>>> list(find_jsons('string[]|{}string{"a": 1}', return_as='index'))
[(6, 8), (9, 11), (17, 25)]
>>> list(find_jsons('xxxx[{"a": 1, "b": [1,2,3]}]xxxx', return_as='object'))
[[{'a': 1, 'b': [1, 2, 3]}]]
"""
def find_matched(string, left, right):
_stack = []
for index, char in enumerate(string):
if char == left:
_stack.append(index)
elif char == right:
try:
_stack.pop()
except IndexError:
break
else:
continue
if not _stack:
return index
json_loader = json_loader or json.loads
search = re.search
brackets_map = {'{': '}', '[': ']'}
current_start = 0
while string and isinstance(string, str):
_match = search(r'[\[\{]', string)
if not _match:
break
left = _match.group()
right = brackets_map[left]
_start = _match.span()[0]
sub_string = string[_start:]
_end = find_matched(sub_string, left, right)
if _end is None:
# not found matched, check next left
string = sub_string
continue
string = sub_string[_end + 1:]
try:
_partial = sub_string[:_end + 1]
_loaded_result = json_loader(_partial)
yield {
'json': _partial,
'object': _loaded_result,
'index':
(current_start + _start, current_start + _start + _end + 1),
}.get(return_as, string)
except (ValueError, TypeError):
pass
current_start += _start + _end + 1
def update_url(url, params=None, **_params):
"""Update your URL with given params.
:param url: raw URL
:type url: str
:param params: new params, and skip the keys with value None
:type params: dict
Basic Usage::
from torequests.utils import update_url
print(update_url('http://httpbin.org/get?a=1&b=2', {'a': '2', 'b': None}, c='3'))
# http://httpbin.org/get?a=2&c=3
"""
if params:
_params.update(params)
parsed_url = urlparse(url)
qls_dict = dict(parse_qsl(parsed_url.query))
for key, value in _params.items():
if value is None:
qls_dict.pop(key, None)
continue
else:
qls_dict[key] = str(value)
return urlunparse(parsed_url._replace(query=unparse_qsl(qls_dict.items())))
|
interface_rpc.py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests some generic aspects of the RPC interface."""
import os
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_greater_than_or_equal
from threading import Thread
import subprocess
def expect_http_status(expected_http_status, expected_rpc_code,
fcn, *args):
try:
fcn(*args)
raise AssertionError("Expected RPC error %d, got none" % expected_rpc_code)
except JSONRPCException as exc:
assert_equal(exc.error["code"], expected_rpc_code)
assert_equal(exc.http_status, expected_http_status)
def test_work_queue_getblock(node, got_exceeded_error):
while not got_exceeded_error:
try:
node.cli('getrpcinfo').send_cli()
except subprocess.CalledProcessError as e:
assert_equal(e.output, 'error: Server response: Work queue depth exceeded\n')
got_exceeded_error.append(True)
class RPCInterfaceTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def test_getrpcinfo(self):
self.log.info("Testing getrpcinfo...")
info = self.nodes[0].getrpcinfo()
assert_equal(len(info['active_commands']), 1)
command = info['active_commands'][0]
assert_equal(command['method'], 'getrpcinfo')
assert_greater_than_or_equal(command['duration'], 0)
assert_equal(info['logpath'], os.path.join(self.nodes[0].datadir, self.chain, 'debug.log'))
def test_batch_request(self):
self.log.info("Testing basic JSON-RPC batch request...")
results = self.nodes[0].batch([
# A basic request that will work fine.
{"method": "getblockcount", "id": 1},
# Request that will fail. The whole batch request should still
# work fine.
{"method": "invalidmethod", "id": 2},
# Another call that should succeed.
{"method": "getblockhash", "id": 3, "params": [0]},
])
result_by_id = {}
for res in results:
result_by_id[res["id"]] = res
assert_equal(result_by_id[1]['error'], None)
assert_equal(result_by_id[1]['result'], 0)
assert_equal(result_by_id[2]['error']['code'], -32601)
assert_equal(result_by_id[2]['result'], None)
assert_equal(result_by_id[3]['error'], None)
assert result_by_id[3]['result'] is not None
def test_http_status_codes(self):
self.log.info("Testing HTTP status codes for JSON-RPC requests...")
expect_http_status(404, -32601, self.nodes[0].invalidmethod)
expect_http_status(500, -8, self.nodes[0].getblockhash, 42)
def test_work_queue_exceeded(self):
self.log.info("Testing work queue exceeded...")
self.restart_node(0, ['-rpcworkqueue=1', '-rpcthreads=1'])
got_exceeded_error = []
threads = []
for _ in range(3):
t = Thread(target=test_work_queue_getblock, args=(self.nodes[0], got_exceeded_error))
t.start()
threads.append(t)
for t in threads:
t.join()
def run_test(self):
self.test_getrpcinfo()
self.test_batch_request()
self.test_http_status_codes()
self.test_work_queue_exceeded()
if __name__ == '__main__':
RPCInterfaceTest().main()
|
training.py
|
from __future__ import print_function
from __future__ import absolute_import
import warnings
import copy
import time
import numpy as np
import threading
try:
import queue
except ImportError:
import Queue as queue
from .topology import Container
from .. import backend as K
from .. import optimizers
from .. import objectives
from .. import metrics as metrics_module
from ..utils.generic_utils import Progbar
from .. import callbacks as cbks
def standardize_input_data(data, names, shapes=None,
check_batch_dim=True,
exception_prefix=''):
'''Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
'''
if type(data) is dict:
arrays = []
for name in names:
if name not in data:
raise Exception('No data provided for "' +
name + '". Need data for each key in: ' +
str(data.keys()))
arrays.append(data[name])
elif type(data) is list:
if len(data) != len(names):
if len(data) > 0 and hasattr(data[0], 'shape'):
raise Exception('Error when checking ' + exception_prefix +
': the list of Numpy arrays '
'that you are passing to your model '
'is not the size the model expected. '
'Expected to see ' + str(len(names)) +
' arrays but instead got '
'the following list of ' + str(len(data)) +
' arrays: ' + str(data)[:200] +
'...')
else:
if len(names) == 1:
data = [np.asarray(data)]
else:
raise Exception('Error when checking ' + exception_prefix +
': you are passing a list as '
'input to your model, '
'but the model expects '
'a list of ' + str(len(names)) +
' Numpy arrays instead. '
'The list you passed was: ' +
str(data)[:200])
arrays = data
else:
if not hasattr(data, 'shape'):
raise Exception('Error when checking ' + exception_prefix +
': data should be a Numpy array, '
'or list/dict of Numpy arrays. '
'Found: ' + str(data)[:200] + '...')
if len(names) != 1:
# case: model expects multiple inputs but only received
# a single Numpy array
raise Exception('The model expects ' + str(len(names)) +
' input arrays, but only received one array. '
'Found: array with shape ' + str(data.shape))
arrays = [data]
# make arrays at least 2D
for i in range(len(names)):
array = arrays[i]
if len(array.shape) == 1:
array = np.expand_dims(array, 1)
arrays[i] = array
# check shapes compatibility
if shapes:
for i in range(len(names)):
if shapes[i] is None:
continue
array = arrays[i]
if len(array.shape) != len(shapes[i]):
raise Exception('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have ' + str(len(shapes[i])) +
' dimensions, but got array with shape ' +
str(array.shape))
for j, (dim, ref_dim) in enumerate(zip(array.shape, shapes[i])):
if not j and not check_batch_dim:
# skip the first axis
continue
if ref_dim:
if ref_dim != dim:
raise Exception('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have shape ' + str(shapes[i]) +
' but got array with shape ' +
str(array.shape))
return arrays
def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
if x_weight is None or len(x_weight) == 0:
return [None for _ in output_names]
if len(output_names) == 1:
if type(x_weight) is list and len(x_weight) == 1:
return x_weight
if type(x_weight) is dict and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if type(x_weight) is list:
if len(x_weight) != len(output_names):
raise Exception('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) +
' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if type(x_weight) is dict:
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise Exception('The model has multiple outputs, so `' +
weight_type + '` '
'should be either a list of a dict. '
'Provided `' + weight_type +
'` type not understood: ' +
str(x_weight))
def standardize_class_weights(class_weight, output_names):
return standardize_sample_or_class_weights(class_weight,
output_names,
'class_weight')
def standardize_sample_weights(sample_weight, output_names):
return standardize_sample_or_class_weights(sample_weight,
output_names,
'sample_weight')
def check_array_lengths(X, Y, W):
x_lengths = [x.shape[0] for x in X]
y_lengths = [y.shape[0] for y in Y]
w_lengths = [w.shape[0] for w in W]
set_x = set(x_lengths)
if len(set_x) != 1:
raise Exception('All input arrays (x) should have '
'the same number of samples.')
set_y = set(y_lengths)
if len(set_y) != 1:
raise Exception('All target arrays (y) should have '
'the same number of samples.')
set_w = set(w_lengths)
if len(set_w) != 1:
raise Exception('All sample_weight arrays should have '
'the same number of samples.')
if list(set_x)[0] != list(set_y)[0]:
raise Exception('Input arrays should have '
'the same number of samples as target arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_y)[0]) + ' target samples.')
if list(set_x)[0] != list(set_w)[0]:
raise Exception('Sample_weight arrays should have '
'the same number of samples as input arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def check_loss_and_target_compatibility(targets, losses, output_shapes):
assert len(targets) == len(losses) == len(output_shapes)
key_losses = {'mean_square_error',
'binary_crossentropy',
'categorical_crossentropy'}
for y, loss, shape in zip(targets, losses, output_shapes):
if loss.__name__ == 'categorical_crossentropy':
if y.shape[1] == 1:
raise Exception('You are passing a target array of shape ' + str(y.shape) +
' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils.np_utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
if loss.__name__ in key_losses and shape[1] is not None and y.shape[1] != shape[1]:
raise Exception('A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss.__name__ + '`. '
'This loss expects '
'targets to have the same shape '
'as the output.')
def collect_metrics(metrics, output_names):
if not metrics:
return [[] for _ in output_names]
if type(metrics) is list:
# we then apply all metrics to all outputs.
return [copy.copy(metrics) for _ in output_names]
elif type(metrics) is dict:
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if type(output_metrics) is not list:
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise Exception('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' +
str(metrics))
def collect_trainable_weights(layer):
trainable = getattr(layer, 'trainable', True)
if not trainable:
return []
weights = []
if layer.__class__.__name__ == 'Sequential':
for sublayer in layer.flattened_layers:
weights += collect_trainable_weights(sublayer)
elif layer.__class__.__name__ == 'Model':
for sublayer in layer.layers:
weights += collect_trainable_weights(sublayer)
elif layer.__class__.__name__ == 'Graph':
for sublayer in layer._graph_nodes.values():
weights += collect_trainable_weights(sublayer)
else:
weights += layer.trainable_weights
return weights
def batch_shuffle(index_array, batch_size):
'''This shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
'''
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def make_batches(size, batch_size):
'''Returns a list of batch indices (tuples of indices).
'''
nb_batch = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, nb_batch)]
def slice_X(X, start=None, stop=None):
'''This takes an array-like, or a list of
array-likes, and outputs:
- X[start:stop] if X is an array-like
- [x[start:stop] for x in X] if X in a list
Can also work on list/array of indices: `slice_X(x, indices)`
# Arguments:
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
'''
if type(X) == list:
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [x[start] for x in X]
else:
return [x[start:stop] for x in X]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return X[start]
else:
return X[start:stop]
def weighted_objective(fn):
'''Transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
'''
def weighted(y_true, y_pred, weights, mask=None):
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
# apply sample weighting
if weights is not None:
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def standardize_weights(y, sample_weight=None, class_weight=None,
sample_weight_mode=None):
'''Performs weight input validation and standardization
to a single sample-wise (or timestep-wise) weight array.
'''
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise Exception('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise Exception('Found a sample_weight array for '
'an input with shape ' +
str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise Exception('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise Exception('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
assert len(sample_weight.shape) <= len(y.shape)
# TODO: proper error message
assert y.shape[:sample_weight.ndim] == sample_weight.shape
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise Exception('class_weight not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray([class_weight[cls] for cls in y_classes])
return weights
else:
if sample_weight_mode is None:
return np.ones((y.shape[0],))
else:
return np.ones((y.shape[0], y.shape[1]))
def generator_queue(generator, max_q_size=10,
wait_time=0.05, nb_worker=1):
'''Builds a threading queue out of a data generator.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
'''
q = queue.Queue()
_stop = threading.Event()
def data_generator_task():
while not _stop.is_set():
try:
if q.qsize() < max_q_size:
try:
generator_output = next(generator)
except ValueError:
continue
q.put(generator_output)
else:
time.sleep(wait_time)
except Exception:
_stop.set()
raise
generator_threads = [threading.Thread(target=data_generator_task)
for _ in range(nb_worker)]
for thread in generator_threads:
thread.daemon = True
thread.start()
return q, _stop
class Model(Container):
def compile(self, optimizer, loss, metrics=[], loss_weights=None,
sample_weight_mode=None, **kwargs):
'''Configures the model for training.
# Arguments
optimizer: str (name of optimizer) or optimizer object.
See [optimizers](/optimizers).
loss: str (name of objective function) or objective function.
See [objectives](/objectives).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of objectives.
metrics: list of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
sample_weight_mode: if you need to do timestep-wise
sample weighting (2D weights), set this to "temporal".
"None" defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
kwargs: when using the Theano backend, these arguments
are passed into K.function. Ignored for Tensorflow backend.
'''
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = loss
self.loss_weights = loss_weights
# prepare loss weights
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif type(loss_weights) is dict:
for name in loss_weights:
if name not in self.output_names:
raise Exception('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif type(loss_weights) is list:
if len(loss_weights) != len(self.outputs):
raise Exception('When passing a list as loss_weights, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' +
str(loss))
loss_weights_list = loss_weights
else:
raise Exception('Could not interpret loss_weights argument: ' +
str(loss_weights))
# prepare loss functions
if type(loss) is dict:
for name in loss:
if name not in self.output_names:
raise Exception('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
raise Exception('Output "' + name +
'" missing from loss dictionary')
loss_functions.append(objectives.get(loss[name]))
elif type(loss) is list:
if len(loss) != len(self.outputs):
raise Exception('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' +
str(loss))
loss_functions = [objectives.get(l) for l in loss]
else:
loss_function = objectives.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [weighted_objective(fn) for fn in loss_functions]
# prepare output masks
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if type(masks) is not list:
masks = [masks]
# prepare sample weights
if type(sample_weight_mode) is dict:
for name in sample_weight_mode:
if name not in self.output_names:
raise Exception('Unknown entry in '
'sample_weight_mode dictionary: "' +
name + '". '
'Only expected the following keys: ' +
str(self.output_names))
sample_weights = []
sample_weight_modes = []
for name in self.output_names:
if name not in sample_weight_mode:
raise Exception('Output "' + name +
'" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif type(sample_weight_mode) is list:
if len(sample_weight_mode) != len(self.outputs):
raise Exception('When passing a list as sample_weight_mode, ' +
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed sample_weight_mode=' +
str(sample_weight_mode))
sample_weights = []
sample_weight_modes = []
for mode, name in zip(sample_weight_mode, self.output_names):
if mode == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
if sample_weight_mode == 'temporal':
sample_weights = [K.placeholder(ndim=2, name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = ['temporal' for name in self.output_names]
else:
sample_weights = [K.placeholder(ndim=1, name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = [None for name in self.output_names]
self.sample_weight_modes = sample_weight_modes
# prepare targets of model
self.targets = []
for i in range(len(self.outputs)):
shape = self.internal_output_shapes[i]
name = self.output_names[i]
self.targets.append(K.placeholder(ndim=len(shape), name=name + '_target'))
# prepare metrics
self.metrics_names = ['loss']
self.metrics = []
# compute total loss
total_loss = None
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
output_loss = weighted_loss(y_true, y_pred,
sample_weight, mask)
if len(self.outputs) > 1:
self.metrics.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
# add regularization penalties to the loss
for r in self.regularizers:
total_loss = r(total_loss)
# list of same size as output_names.
# contains tuples (metrics for output, names of metrics)
nested_metrics = collect_metrics(metrics, self.output_names)
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
output_metrics = nested_metrics[i]
for metric in output_metrics:
if metric == 'accuracy' or metric == 'acc':
# custom handling of accuracy (because of class mode duality)
output_shape = self.internal_output_shapes[i]
if output_shape[-1] == 1:
# case: binary accuracy
self.metrics.append(metrics_module.binary_accuracy(y_true, y_pred))
elif self.loss_functions[i] == objectives.sparse_categorical_crossentropy:
# case: categorical accuracy with sparse targets
self.metrics.append(
metrics_module.sparse_categorical_accuracy(y_true, y_pred))
else:
# case: categorical accuracy with dense targets
self.metrics.append(metrics_module.categorical_accuracy(y_true, y_pred))
if len(self.output_names) == 1:
self.metrics_names.append('acc')
else:
self.metrics_names.append(self.output_layers[i].name + '_acc')
else:
metric_fn = metrics_module.get(metric)
self.metrics.append(metric_fn(y_true, y_pred))
if len(self.output_names) == 1:
self.metrics_names.append(metric_fn.__name__)
else:
self.metrics_names.append(self.output_layers[i].name + '_' + metric_fn.__name__)
# prepare gradient updates and state updates
self.optimizer = optimizers.get(optimizer)
self.total_loss = total_loss
self.sample_weights = sample_weights
# functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise Exception('You must compile your model before using it.')
if self.train_function is None:
if self.uses_learning_phase:
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
# get trainable weights
trainable_weights = []
for layer in self.layers:
trainable_weights += collect_trainable_weights(layer)
training_updates = self.optimizer.get_updates(trainable_weights, self.constraints, self.total_loss)
updates = self.updates + training_updates
# returns loss and metrics. Updates weights at each call.
self.train_function = K.function(inputs,
[self.total_loss] + self.metrics,
updates=updates,
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise Exception('You must compile your model before using it.')
if self.test_function is None:
if self.uses_learning_phase:
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
# return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(inputs,
[self.total_loss] + self.metrics,
updates=self.state_updates,
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase:
inputs = self.inputs + [K.learning_phase()]
else:
inputs = self.inputs
# returns network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(inputs,
self.outputs,
updates=self.state_updates,
**kwargs)
def _fit_loop(self, f, ins, out_labels=[], batch_size=32,
nb_epoch=100, verbose=1, callbacks=[],
val_f=None, val_ins=None, shuffle=True,
callback_metrics=[]):
'''Abstract fit function for f(ins).
Assume that f returns a list, labeled by out_labels.
# Arguments
f: Keras function returning a list of tensors
ins: list of tensors to be fed to `f`
out_labels: list of strings, display names of
the outputs of `f`
batch_size: integer batch size
nb_epoch: number of times to iterate over the data
verbose: verbosity mode, 0, 1 or 2
callbacks: list of callbacks to be called during training
val_f: Keras function to call for validation
val_ins: list of tensors to be fed to `val_f`
shuffle: whether to shuffle the data at the beginning of each epoch
callback_metrics: list of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
# Returns
`History` object.
'''
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print('Train on %d samples, validate on %d samples' %
(len(ins[0]), len(val_ins[0])))
nb_train_sample = len(ins[0])
index_array = np.arange(nb_train_sample)
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self
# (used by Sequential models)
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks._set_model(callback_model)
callbacks._set_params({
'batch_size': batch_size,
'nb_epoch': nb_epoch,
'nb_sample': nb_train_sample,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
callback_model.stop_training = False
self.validation_data = val_ins
for epoch in range(nb_epoch):
callbacks.on_epoch_begin(epoch)
if shuffle == 'batch':
index_array = batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(nb_train_sample, batch_size)
epoch_logs = {}
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
except TypeError:
raise Exception('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch)
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if batch_index == len(batches) - 1: # last batch
# validation
if do_validation:
# replace with self._evaluate
val_outs = self._test_loop(val_f, val_ins,
batch_size=batch_size,
verbose=0)
if type(val_outs) != list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
return self.history
def _predict_loop(self, f, ins, batch_size=32, verbose=0):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) != list:
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (nb_sample,) + batch_out.shape[1:]
outs.append(np.zeros(shape))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def _test_loop(self, f, ins, batch_size=32, verbose=0):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i, out in enumerate(outs):
outs[i] /= nb_sample
if len(outs) == 1:
return outs[0]
return outs
def _standardize_user_data(self, x, y,
sample_weight=None, class_weight=None,
check_batch_dim=True, batch_size=None):
if not hasattr(self, 'optimizer'):
raise Exception('You must compile a model before training/testing.'
' Use `model.compile(optimizer, loss)`.')
output_shapes = []
for output_shape, loss_fn in zip(self.internal_output_shapes, self.loss_functions):
if loss_fn.__name__ == 'sparse_categorical_crossentropy':
output_shapes.append(output_shape[:-1] + (1,))
elif getattr(objectives, loss_fn.__name__, None) is None:
output_shapes.append(None)
else:
output_shapes.append(output_shape)
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_dim=False,
exception_prefix='model input')
y = standardize_input_data(y, self.output_names,
output_shapes,
check_batch_dim=False,
exception_prefix='model target')
sample_weights = standardize_sample_weights(sample_weight,
self.output_names)
class_weights = standardize_class_weights(class_weight,
self.output_names)
sample_weights = [standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode)
in zip(y, sample_weights, class_weights, self.sample_weight_modes)]
check_array_lengths(x, y, sample_weights)
check_loss_and_target_compatibility(y, self.loss_functions, self.internal_output_shapes)
if self.stateful and batch_size:
if x[0].shape[0] % batch_size != 0:
raise Exception('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def fit(self, x, y, batch_size=32, nb_epoch=10, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True,
class_weight=None, sample_weight=None):
'''Trains the model for a fixed number of epochs (iterations on a dataset).
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
nb_epoch: integer, the number of times to iterate over the training data arrays.
verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = verbose, 2 = one log line per epoch.
callbacks: list of callbacks to be called during training.
See [callbacks](/callbacks).
validation_split: float between 0 and 1:
fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate the loss and any model metrics
on this data at the end of each epoch.
validation_data: data on which to evaluate the loss and any model metrics
at the end of each epoch. The model will not be trained on this data.
This could be a tuple (x_val, y_val) or a tuple (val_x, val_y, val_sample_weights).
shuffle: boolean, whether to shuffle the training data before each epoch.
class_weight: optional dictionary mapping class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
# Returns
A `History` instance. Its `history` attribute contains
all information collected during training.
'''
# validate user data
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_dim=False,
batch_size=batch_size)
# prepare validation data
if validation_data:
do_validation = True
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y,
sample_weight=val_sample_weight,
check_batch_dim=False,
batch_size=batch_size)
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase:
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_split and 0. < validation_split < 1.:
do_validation = True
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_X(x, 0, split_at), slice_X(x, split_at))
y, val_y = (slice_X(y, 0, split_at), slice_X(y, split_at))
sample_weights, val_sample_weights = (slice_X(sample_weights, 0, split_at), slice_X(sample_weights, split_at))
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase:
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
else:
do_validation = False
val_f = None
val_ins = None
# prepare input arrays and training function
if self.uses_learning_phase:
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
f = self.train_function
# prepare display labels
out_labels = self.metrics_names
# rename duplicated metrics name
# (can happen with an output layer shared among multiple dataflows)
deduped_out_labels = []
for i, label in enumerate(out_labels):
new_label = label
if out_labels.count(label) > 1:
dup_idx = out_labels[:i].count(label)
new_label += '_' + str(dup_idx + 1)
deduped_out_labels.append(new_label)
out_labels = deduped_out_labels
if do_validation:
callback_metrics = copy.copy(out_labels) + ['val_' + n for n in out_labels]
else:
callback_metrics = copy.copy(out_labels)
# delegate logic to _fit_loop
return self._fit_loop(f, ins, out_labels=out_labels,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins, shuffle=shuffle,
callback_metrics=callback_metrics)
def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
'''Returns the loss value and metrics values for the model
in test mode. Computation in done in batches.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
# validate user data
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
check_batch_dim=False,
batch_size=batch_size)
# prepare inputs, delegate logic to _test_loop
if self.uses_learning_phase:
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
f = self.test_function
return self._test_loop(f, ins,
batch_size=batch_size,
verbose=verbose)
def predict(self, x, batch_size=32, verbose=0):
'''Generates output predictions for the input samples,
processing the samples in a batched way.
# Arguments
x: the input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
# Returns
A Numpy array of predictions.
'''
# validate user data
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_dim=False)
if self.stateful:
if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
raise Exception('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples. '
'Batch size: ' + str(batch_size) + '.')
# prepare inputs, delegate logic to _predict_loop
if self.uses_learning_phase:
ins = x + [0.]
else:
ins = x
self._make_predict_function()
f = self.predict_function
return self._predict_loop(f, ins,
batch_size=batch_size, verbose=verbose)
def train_on_batch(self, x, y,
sample_weight=None, class_weight=None):
'''Runs a single gradient update on a single batch of data.
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
class_weight: optional dictionary mapping class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
# Returns
Scalar training loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_dim=True)
if self.uses_learning_phase:
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
'''Test the model on a single batch of samples.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
check_batch_dim=True)
if self.uses_learning_phase:
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
'''Returns predictions for a single batch of samples.
'''
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes)
if self.uses_learning_phase:
ins = x + [0.]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self, generator, samples_per_epoch, nb_epoch,
verbose=1, callbacks=[],
validation_data=None, nb_val_samples=None,
class_weight={}, max_q_size=10):
'''Fits the model on data generated batch-by-batch by
a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
# Arguments
generator: a generator.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `samples_per_epoch`
samples have been seen by the model.
samples_per_epoch: integer, number of samples to process before
going to the next epoch.
nb_epoch: integer, total number of iterations on the data.
verbose: verbosity mode, 0, 1, or 2.
callbacks: list of callbacks to be called during training.
validation_data: this can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
nb_val_samples: only relevant if `validation_data` is a generator.
number of samples to use from validation generator
at the end of every epoch.
class_weight: dictionary mapping class indices to a weight
for the class.
max_q_size: maximum size for the generator queue
# Returns
A `History` object.
# Example
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
samples_per_epoch=10000, nb_epoch=10)
```
'''
wait_time = 0.01 # in seconds
epoch = 0
do_validation = bool(validation_data)
self._make_train_function()
if do_validation:
self._make_test_function()
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__'))
if val_gen and not nb_val_samples:
raise Exception('When using a generator for validation data, '
'you must specify a value for "nb_val_samples".')
out_labels = self.metrics_names
callback_metrics = out_labels + ['val_' + n for n in out_labels]
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks._set_model(callback_model)
callbacks._set_params({
'nb_epoch': nb_epoch,
'nb_sample': samples_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
if do_validation and not val_gen:
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise Exception('validation_data should be a tuple '
'(val_x, val_y, val_sample_weight) '
'or (val_x, val_y). Found: ' + str(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y, val_sample_weight)
self.validation_data = val_x + [val_y, val_sample_weights]
else:
self.validation_data = None
# start generator thread storing batches into a queue
data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size)
callback_model.stop_training = False
while epoch < nb_epoch:
callbacks.on_epoch_begin(epoch)
samples_seen = 0
batch_index = 0
while samples_seen < samples_per_epoch:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
# build batch logs
batch_logs = {}
if type(x) is list:
batch_size = len(x[0])
elif type(x) is dict:
batch_size = len(list(x.values())[0])
else:
batch_size = len(x)
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
try:
outs = self.train_on_batch(x, y,
sample_weight=sample_weight,
class_weight=class_weight)
except:
_stop.set()
raise
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# construct epoch logs
epoch_logs = {}
batch_index += 1
samples_seen += batch_size
# epoch finished
if samples_seen > samples_per_epoch:
warnings.warn('Epoch comprised more than '
'`samples_per_epoch` samples, '
'which might affect learning results. '
'Set `samples_per_epoch` correctly '
'to avoid this warning.')
if samples_seen >= samples_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(validation_data,
nb_val_samples,
max_q_size=max_q_size)
else:
# no need for try/except because
# data has already been validated
val_outs = self.evaluate(val_x, val_y,
sample_weight=val_sample_weights,
verbose=0)
if type(val_outs) is not list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
_stop.set()
callbacks.on_train_end()
return self.history
def evaluate_generator(self, generator, val_samples, max_q_size=10):
'''Evaluates the model on a data generator. The generator should
return the same kind of data as accepted by `test_on_batch`.
Arguments:
generator:
generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
val_samples:
total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
self._make_test_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
weights = []
data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size)
while processed_samples < val_samples:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
try:
outs = self.test_on_batch(x, y, sample_weight=sample_weight)
except:
_stop.set()
raise
if type(x) is list:
nb_samples = len(x[0])
elif type(x) is dict:
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
all_outs.append(outs)
processed_samples += nb_samples
weights.append(nb_samples)
_stop.set()
if type(outs) is not list:
return np.average(np.asarray(all_outs),
weights=weights)
else:
averages = []
for i in range(len(outs)):
averages.append(np.average([out[i] for out in all_outs],
weights=weights))
return averages
def predict_generator(self, generator, val_samples, max_q_size=10):
'''Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
# Arguments
generator: generator yielding batches of input samples.
val_samples: total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
# Returns
Numpy array(s) of predictions.
'''
self._make_predict_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size)
while processed_samples < val_samples:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if isinstance(generator_output, tuple):
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
else:
x = generator_output
try:
outs = self.predict_on_batch(x)
except:
_stop.set()
raise
if type(x) is list:
nb_samples = len(x[0])
elif type(x) is dict:
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
if type(outs) != list:
outs = [outs]
if len(all_outs) == 0:
for out in outs:
shape = (val_samples,) + out.shape[1:]
all_outs.append(np.zeros(shape))
for i, out in enumerate(outs):
all_outs[i][processed_samples:(processed_samples + nb_samples)] = out
processed_samples += nb_samples
_stop.set()
if len(all_outs) == 1:
return all_outs[0]
return all_outs
|
server.py
|
import sys
import time
import socket
import struct
import signal
import threading
from queue import Queue
from config import Config
THREADS = 2
TASKS = [1, 2]
queue = Queue()
COMMANDS = {
"help": ["Shows this help"],
"ls clients": ["Lists connected clients"],
"connect": ["Selects a client by its index. Takes index as a parameter"],
"quit": [
"Stops current connection with a client. To be used when client is selected"
],
"exit": ["Shuts server down"],
}
class Server(object):
def __init__(self):
self.host = ""
self.port = Config.PORT
self.socket = None
self.connections = []
self.conn_addresses = []
def show_help(self):
for cmd, v in COMMANDS.items():
print(f"{cmd}:\t{v[0]}")
return
def register_signal_handler(self):
signal.signal(signal.SIGINT, self.quit_conn)
signal.signal(signal.SIGTERM, self.quit_conn)
return
def quit_conn(self, signal=None, frame=None):
print("\n......Quitting Connection.......")
for conn in self.connections:
try:
conn.shutdown(2)
conn.close()
except Exception as e:
print("Could not close connection %s" % str(e))
self.socket.close()
sys.exit(0)
def create_socket(self):
try:
self.socket = socket.socket()
except socket.error as msg:
print("Socket creation error: " + str(msg))
sys.exit(1)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return
def bind_socket(self):
""" Bind socket to port and wait for connection from client """
try:
self.socket.bind((self.host, self.port))
self.socket.listen(5)
except socket.error as e:
print("Socket binding error: " + str(e))
time.sleep(5)
self.bind_socket()
return
def accept_connections(self):
""" Accept connections from multiple clients and add to list """
for c in self.connections:
c.close()
self.connections = []
self.conn_addresses = []
while 1:
try:
conn, address = self.socket.accept()
conn.setblocking(1)
client_hostname = conn.recv(1024).decode("utf-8")
address = address + (client_hostname,)
except Exception as e:
print("Error accepting connections: %s" % str(e))
continue
self.connections.append(conn)
self.conn_addresses.append(address)
print(f"\nConnection has been established: {address[-1]} ({address[0]})")
return
def run_grumpy(self):
""" Command shell for sending commands to remote """
while True:
cmd = input("grumpy$ ")
if cmd == "ls clients":
self.show_connections()
continue
elif "connect" in cmd:
target, conn = self.get_target(cmd)
if conn is not None:
self.send_target_commands(target, conn)
elif cmd == "exit":
queue.task_done()
queue.task_done()
print("Server shutdown")
break
# self.quit_conn()
elif cmd == "help":
self.show_help()
elif cmd == "":
pass
else:
print("Command not recognized")
return
def show_connections(self):
""" Show all connections """
results = ""
for i, conn in enumerate(self.connections):
try:
conn.send(str.encode(" "))
conn.recv(20480)
except:
del self.connections[i]
del self.conn_addresses[i]
continue
results += (
str(i)
+ " "
+ str(self.conn_addresses[i][0])
+ " "
+ str(self.conn_addresses[i][1])
+ " "
+ str(self.conn_addresses[i][2])
+ "\n"
)
print("----- Clients -----" + "\n" + results)
return
def get_target(self, cmd):
"""Select target client
:param cmd:
"""
target = cmd.split(" ")[-1]
try:
target = int(target)
except:
print("Client index should be an integer")
return None, None
try:
conn = self.connections[target]
except IndexError:
print("Invalid connection")
return None, None
print("You are now connected to " + str(self.conn_addresses[target][2]))
return target, conn
def read_command_output(self, conn):
"""Read message length and unpack it into an integer
:param conn:
"""
raw_msglen = self.recvall(conn, 4)
if not raw_msglen:
return None
msglen = struct.unpack(">I", raw_msglen)[0]
return self.recvall(conn, msglen)
def recvall(self, conn, n):
"""Helper function to recv n bytes or return None if EOF is hit
:param n:
:param conn:
"""
data = b""
while len(data) < n:
packet = conn.recv(n - len(data))
if not packet:
return None
data += packet
return data
def send_target_commands(self, target, conn):
"""Connect with remote target client
:param conn:
:param target:
"""
conn.send(str.encode(" "))
cwd_bytes = self.read_command_output(conn)
cwd = str(cwd_bytes, "utf-8")
print(cwd, end="")
while True:
try:
cmd = input()
if len(str.encode(cmd)) > 0:
conn.send(str.encode(cmd))
cmd_output = self.read_command_output(conn)
client_response = str(cmd_output, "utf-8")
print(client_response, end="")
if cmd == "quit":
break
except Exception as e:
print("Connection was lost %s" % str(e))
break
del self.connections[target]
del self.conn_addresses[target]
return
def create_thread():
""" Create worker threads (will die when main exits) """
server = Server()
server.register_signal_handler()
for _ in range(THREADS):
t = threading.Thread(target=task, args=(server,))
t.daemon = True
t.start()
return
def task(server):
"""peform next task in the queue. Thread 1 handels connections and 2 handles sending commands
:param server:
"""
while True:
x = queue.get()
if x == 1:
server.create_socket()
server.bind_socket()
server.accept_connections()
if x == 2:
server.run_grumpy()
queue.task_done()
return
def create_task():
""" Each list item is a new job """
for x in TASKS:
queue.put(x)
queue.join()
return
def main():
create_thread()
create_task()
if __name__ == "__main__":
main()
|
client_sim.py
|
import requests
import cv2
import os
import time
import base64
import json
from threading import Thread
# '127.0.0.1' #'172.20.16.10' # '137.110.115.9'
# '34.68.142.133' # '34.94.7.7' # 'https://gazelearning-apis.wl.r.appspot.com'
host = '34.69.132.236'
PORT = 8000
N_SERVER = 1
TOTAL = 400
img_folder = 'data_temp/12344/face/' # 'dataset_rw/'
labels = [0, 1] # ['not_confused', 'confused']
def getImage(count, label):
filename = '{}_{}.jpg'.format(label, count)
filename = os.path.join(img_folder, filename)
with open(filename, "rb") as img_file:
imgb64 = base64.b64encode(img_file.read())
return "test," + imgb64.decode('utf-8')
IMG = getImage(190, labels[1])
def sendRequest(pID):
port = PORT + pID % N_SERVER
# url = 'http://{}:{}'.format(host, port)
url = 'http://{}:{}/detection'.format(host, port)
# url = 'https://mlserver-302123.uc.r.appspot.com/detection'
pID = 'user_' + str(pID).zfill(2)
stage = 0 # 0: collect data; 1: inference,
idx = 0 # 0: collect c, 1: collect nc
label_idx = [1, 0] # 1: cofused label, 0: neutral label
count = 0
total = TOTAL // 2
count_request = 0
latency = [0,0]
while True:
if idx < 2 and stage < 1:
# img = getImage(count, labels[idx])
data = {'img': IMG, 'stage': stage, 'label': label_idx[idx],
'username': pID, 'frameId': total-count}
# print(data)
start = time.time()
res = requests.post(url, data=json.dumps(data))
latency[stage] += time.time() - start
print(res.content)
count += 1
if count == total:
idx += 1
count = 0
# time.sleep(0)
else:
stage = 1
idx = 1
# img = getImage(count, labels[idx])
start = time.time()
data = {'img': IMG, 'stage': stage, 'label': label_idx[idx],
'username': pID, 'frameId': -1}
res = requests.post(url, data=json.dumps(data))
latency[stage] += time.time() - start
print(res.content)
time.sleep(1)
print('pID:{}, count: {}, stage: {}'.format(pID, count_request, stage))
count_request += 1
if count_request == total * 2 + 25:
break
res = 'pID: {}, Stage0 Latency:{}, Stage1 Latency:{}'\
.format(pID,
latency[0] / (2 * total),
latency[1] / (count_request - 2 * total))
with open('res.txt', 'a') as outfile:
outfile.write(res + '\n')
threaded = True
num_threads = 15
if threaded:
request_threads = []
for i in range(num_threads):
request_threads.append(Thread(target=sendRequest, args=(i, )))
for i in range(num_threads):
request_threads[i].start()
time.sleep(1.5)
else:
sendRequest(0)
# test = getImage(0, labels[0])
# print(test)
|
dropbox.py
|
#!/usr/bin/env python3
#
# Copyright (c) Dropbox, Inc.
#
# dropbox
# Dropbox frontend script
# This file is part of nautilus-dropbox 2020.03.04.
#
# nautilus-dropbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nautilus-dropbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nautilus-dropbox. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import with_statement
import _thread
import errno
import locale
import optparse
import os
import platform
import shutil
import socket
import subprocess
import sys
import tarfile
import tempfile
import threading
import time
import traceback
import urllib.request
try:
import gpg
gpgme = None
except ImportError:
gpg = None
# Still support gpgme for now. Remove this once we only support 17.04+.
try:
import gpgme
except ImportError:
gpgme = None
from contextlib import closing, contextmanager
from io import BytesIO
from operator import methodcaller
from os.path import relpath
from posixpath import abspath, commonprefix, curdir, join, pardir, sep
INFO = "Dropbox is the easiest way to share and store your files online. Want to learn more? Head to"
LINK = "https://www.dropbox.com/"
WARNING = "In order to use Dropbox, you must download the proprietary daemon."
GPG_WARNING = "Note: python3-gpg (python3-gpgme for Ubuntu 16.10 and lower) is not installed, we will not be able to verify binary signatures."
ERROR_CONNECTING = "Trouble connecting to Dropbox servers. Maybe your internet connection is down, or you need to set your http_proxy environment variable."
ERROR_SIGNATURE = "Downloaded binary does not match Dropbox signature, aborting install."
ERROR_INVALID_DROPBOX = "Could not start the Dropbox daemon. Make sure your computer meets the minimum requirements:\nhttps://www.dropbox.com/help/desktop-web/system-requirements#desktop"
DOWNLOAD_LOCATION_FMT = "https://www.dropbox.com/download?plat=%s"
SIGNATURE_LOCATION_FMT = "https://www.dropbox.com/download?plat=%s&signature=1"
DOWNLOADING = "Downloading Dropbox... %d%%"
UNPACKING = "Unpacking Dropbox... %d%%"
PARENT_DIR = os.path.expanduser("~")
DROPBOX_DIST_PATH = "%s/.dropbox-dist" % PARENT_DIR
DROPBOXD_PATH = os.path.join(DROPBOX_DIST_PATH, "dropboxd")
DESKTOP_FILE = "/usr/share/applications/dropbox.desktop"
enc = locale.getpreferredencoding()
# Available from https://linux.dropbox.com/fedora/rpm-public-key.asc
DROPBOX_PUBLIC_KEY = b"""
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: SKS 1.1.0
mQENBEt0ibEBCACv4hZRPqwtpU6z8+BB5YZU1a3yjEvg2W68+a6hEwxtCa2U++4dzQ+7EqaU
q5ybQnwtbDdpFpsOi9x31J+PCpufPUfIG694/0rlEpmzl2GWzY8NqfdBFGGm/SPSSwvKbeNc
FMRLu5neo7W9kwvfMbGjHmvUbzBUVpCVKD0OEEf1q/Ii0Qcekx9CMoLvWq7ZwNHEbNnij7ec
nvwNlE2MxNsOSJj+hwZGK+tM19kuYGSKw4b5mR8IyThlgiSLIfpSBh1n2KX+TDdk9GR+57TY
vlRu6nTPu98P05IlrrCP+KF0hYZYOaMvQs9Rmc09tc/eoQlN0kkaBWw9Rv/dvLVc0aUXABEB
AAG0MURyb3Bib3ggQXV0b21hdGljIFNpZ25pbmcgS2V5IDxsaW51eEBkcm9wYm94LmNvbT6J
ATYEEwECACAFAkt0ibECGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRD8kYszUESRLi/z
B/wMscEa15rS+0mIpsORknD7kawKwyda+LHdtZc0hD/73QGFINR2P23UTol/R4nyAFEuYNsF
0C4IAD6y4pL49eZ72IktPrr4H27Q9eXhNZfJhD7BvQMBx75L0F5gSQwuC7GdYNlwSlCD0AAh
Qbi70VBwzeIgITBkMQcJIhLvllYo/AKD7Gv9huy4RLaIoSeofp+2Q0zUHNPl/7zymOqu+5Ox
e1ltuJT/kd/8hU+N5WNxJTSaOK0sF1/wWFM6rWd6XQUP03VyNosAevX5tBo++iD1WY2/lFVU
JkvAvge2WFk3c6tAwZT/tKxspFy4M/tNbDKeyvr685XKJw9ei6GcOGHD
=5rWG
-----END PGP PUBLIC KEY BLOCK-----
"""
def console_print(st="", f=sys.stdout, linebreak=True):
f.write(st)
if linebreak: f.write(os.linesep)
def console_flush(f=sys.stdout):
f.flush()
def yes_no_question(question):
while True:
console_print(question, linebreak=False)
console_print(" [y/n] ", linebreak=False)
console_flush()
text = input()
if text.lower().startswith("y"):
return True
elif text.lower().startswith("n"):
return False
else:
console_print("Sorry, I didn't understand that. Please type yes or no.")
def plat():
if sys.platform.lower().startswith('linux'):
arch = platform.machine()
if (arch[0] == 'i' and
arch[1].isdigit() and
arch[2:4] == '86'):
plat = "x86"
elif arch == 'x86_64':
plat = arch
else:
FatalVisibleError("Platform not supported")
return "lnx.%s" % plat
else:
FatalVisibleError("Platform not supported")
def is_dropbox_running():
pidfile = os.path.expanduser("~/.dropbox/dropbox.pid")
try:
with open(pidfile, "r") as f:
pid = int(f.read())
with open("/proc/%d/cmdline" % pid, "r") as f:
cmdline = f.read().lower()
except:
cmdline = ""
return "dropbox" in cmdline
@contextmanager
def gpg_context(keys):
gpg_conf_contents = b''
_gpghome = tempfile.mkdtemp(prefix='tmp.gpghome')
try:
os.environ['GNUPGHOME'] = _gpghome
fp = open(os.path.join(_gpghome, 'gpg.conf'), 'wb')
fp.write(gpg_conf_contents)
fp.close()
if gpg:
ctx = gpg.Context()
else:
ctx = gpgme.Context()
loaded = []
for key_file in keys:
if gpg:
ctx.op_import(key_file.read())
result = ctx.op_import_result()
key = ctx.get_key(result.imports[0].fpr)
else:
result = ctx.import_(key_file)
key = ctx.get_key(result.imports[0][0])
loaded.append(key)
ctx.signers = loaded
yield ctx
finally:
del os.environ['GNUPGHOME']
shutil.rmtree(_gpghome, ignore_errors=True)
class SignatureVerifyError(Exception):
pass
def verify_signature(key_file, sig_file, plain_file):
with gpg_context([key_file]) as ctx:
if gpg:
ctx.op_verify(sig_file.read(), plain_file.read(), None)
result = ctx.op_verify_result()
return result.signatures[0].status == 0
# gpgme exists
sigs = ctx.verify(sig_file, plain_file, None)
return sigs[0].status == None
def download_file_chunk(url, buf):
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', "DropboxLinuxDownloader/2020.03.04")]
with closing(opener.open(url)) as f:
size = int(f.info()['content-length'])
bufsize = int(max(size / 200, 4096))
progress = 0
yield (0, True)
while True:
try:
chunk = f.read(bufsize)
progress += len(chunk)
buf.write(chunk)
yield (float(progress)/size, True)
if progress == size:
break
except OSError as e:
if hasattr(e, 'errno') and e.errno == errno.EAGAIN:
# nothing left to read
yield (float(progress)/size, False)
else:
raise
class DownloadState(object):
def __init__(self):
self.local_file = BytesIO()
def copy_data(self):
return download_file_chunk(DOWNLOAD_LOCATION_FMT % plat(), self.local_file)
def unpack(self):
# download signature
signature = BytesIO()
for _ in download_file_chunk(SIGNATURE_LOCATION_FMT % plat(), signature):
pass
signature.seek(0)
self.local_file.seek(0)
if gpg or gpgme:
if not verify_signature(BytesIO(DROPBOX_PUBLIC_KEY), signature, self.local_file):
raise SignatureVerifyError()
self.local_file.seek(0)
archive = tarfile.open(fileobj=self.local_file, mode='r:gz')
total_members = len(archive.getmembers())
for i, member in enumerate(archive.getmembers()):
filename = os.path.join(PARENT_DIR, member.name)
if os.path.exists(filename) and not os.path.isdir(filename):
os.unlink(filename)
archive.extract(member, PARENT_DIR)
yield member.name, i, total_members
archive.close()
def cancel(self):
if not self.local_file.closed:
self.local_file.close()
def is_dropbox_valid(self):
"""
Validate that Dropbox runs, so we can show an error
message to the user if it doesn't work.
Returns True if Dropbox can run, false otherwise.
"""
f = open("/dev/null", "w")
try:
a = subprocess.Popen([DROPBOXD_PATH, "/testrun", "0"], preexec_fn=os.setsid, cwd=os.path.expanduser("~"),
stderr=sys.stderr, stdout=f, close_fds=True)
except Exception as e:
print(e)
return False
# in seconds
interval = 0.5
wait_for = 30
for _ in range(int(wait_for / interval)):
ret_val = a.poll()
if ret_val is None:
time.sleep(interval)
continue
return ret_val == 0
return False
def load_serialized_images():
global box_logo_pixbuf, window_icon
import gi
gi.require_version('GdkPixbuf', '2.0')
from gi.repository import GdkPixbuf
box_logo_pixbuf = GdkPixbuf.Pixbuf.new_from_data(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\xff\x1b\x00c\xff\xad\x00c\xff\xb1\x00j\xff\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x1d\x00c\xff\xb0\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\x02\x00d\xffn\x00c\xff\xf2\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffq\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00d\xffp\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf2\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcb\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xcd\x00b\xff4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff3\x00b\xff\xcd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00f\xff2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00c\xff\x8e\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfd\x00b\xff\x91\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00c\xff\x90\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00b\xff\x8f\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xffN\x00b\xff\xe2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00e\xffQ\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00c\xffP\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00d\xffO\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00b\xff\xae\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xb1\x00j\xff\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x1d\x00c\xff\xb1\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffq\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffq\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xce\x00b\xff4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff4\x00b\xff\xcd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfd\x00b\xff\x91\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00b\xff\x91\x00c\xff\xfd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00c\xff\x90\x00j\xff\x0c\x00\x00\x00\x00\x00d\xffO\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00d\xffR\x00e\xffQ\x00b\xff\xe4\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00c\xffP\x00d\xffO\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00d\xffR\x00d\xffR\x00b\xff\xe4\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00c\xffP\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfd\x00d\xff\x92\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00d\xff\x92\x00c\xff\xfd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00c\xff\x90\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xce\x00b\xff4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff4\x00c\xff\xce\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffq\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00b\xffr\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00c\xff\xaf\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xb1\x00j\xff\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x1d\x00c\xff\xb1\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xffO\x00b\xff\xe2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00e\xffQ\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00e\xffQ\x00b\xff\xe4\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe2\x00d\xffO\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfd\x00b\xff\x91\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00b\xff\x91\x00c\xff\xfd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00b\xff\x8f\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xcd\x00b\xff4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff4\x00b\xff\xcd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00f\xff2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf2\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffq\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffq\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf2\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00b\xff\xae\x00c\xff\xb1\x00j\xff\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x1d\x00c\xff\xb0\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00b\xff\xae\x00c\xff\xb1\x00j\xff\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x1d\x00c\xff\xb0\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00d\xffn\x00c\xff\xf2\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffq\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00d\xffp\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf2\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xcd\x00b\xff4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff4\x00b\xff\xcd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00f\xff2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfd\x00b\xff\x91\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00b\xff\x91\x00c\xff\xfd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00b\xff\x8f\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xffO\x00b\xff\xe2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00e\xffQ\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00e\xffQ\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe2\x00d\xffO\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00c\xff\xaf\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xb1\x00j\xff\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x1d\x00c\xff\xb1\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffq\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffq\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xce\x00b\xff4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff4\x00b\xff\xcd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfd\x00b\xff\x91\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00b\xff\x91\x00c\xff\xfd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00c\xff\x90\x00j\xff\x0c\x00\x00\x00\x00\x00d\xffO\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00d\xffR\x00e\xffQ\x00b\xff\xe4\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00c\xffP\x00d\xffO\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00d\xffR\x00e\xffQ\x00b\xff\xe4\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00c\xffP\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfd\x00d\xff\x92\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00b\xff\x91\x00c\xff\xfd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00c\xff\x90\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xce\x00b\xff4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff4\x00b\xff\xcd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffq\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00c\xff\xaf\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffq\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00c\xff\xaf\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xb1\x00j\xff\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x1d\x00c\xff\xb1\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xffO\x00b\xff\xe2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00e\xffQ\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00e\xffQ\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe2\x00d\xffO\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfd\x00b\xff\x91\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00b\xff\x8f\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00b\xff\x91\x00c\xff\xfd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00b\xff\x8f\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xcd\x00b\xff4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xffO\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00d\xffO\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff4\x00b\xff\xcd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00f\xff2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf2\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffq\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00c\xff\xaf\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xb0\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00d\xffp\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf2\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00b\xff\xae\x00c\xff\xb1\x00j\xff\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x1d\x00c\xff\xb0\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff3\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00c\xff\x90\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xffO\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00c\xffP\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xffO\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00c\xffP\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00c\xff\x90\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00d\xffp\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00c\xff\xaf\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xb0\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xffO\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00c\xffP\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00c\xff\x90\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff3\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00d\xffp\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00c\xff\xaf\x00c\xff\xb0\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', GdkPixbuf.Colorspace.RGB, True, 8, 64, 64, 256)
window_icon = GdkPixbuf.Pixbuf.new_from_data(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff3\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xfc\x00b\xff\x8f\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xfc\x00b\xff\x8f\x00j\xff\x0c\x00\x00\x00\x00\x00b\xffN\x00b\xff\xe2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe2\x00d\xffO\x00b\xffN\x00b\xff\xe2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe2\x00b\xffN\x00e\xffQ\x00b\xff\xe4\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00d\xffR\x00e\xffQ\x00b\xff\xe4\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00e\xffQ\x00\x00\x00\x00\x00b\xff\r\x00d\xff\x92\x00c\xff\xfd\x00c\xff\xfd\x00d\xff\x92\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00d\xff\x92\x00c\xff\xfd\x00c\xff\xfd\x00d\xff\x92\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xffe\x00d\xfff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xfff\x00d\xfff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00t\xff\x0b\x00b\xff\x8c\x00b\xff\xfc\x00b\xff\xfc\x00c\xff\x8d\x00t\xff\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00t\xff\x0b\x00b\xff\x8c\x00b\xff\xfc\x00b\xff\xfc\x00b\xff\x8c\x00t\xff\x0b\x00\x00\x00\x00\x00c\xffK\x00c\xff\xe0\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe1\x00e\xffL\x00e\xffL\x00c\xff\xe1\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe1\x00e\xffL\x00d\xffT\x00c\xff\xe5\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe6\x00c\xffU\x00d\xffT\x00c\xff\xe6\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe6\x00d\xffT\x00\x00\x00\x00\x00m\xff\x0e\x00b\xff\x94\x00c\xff\xfd\x00c\xff\xfd\x00c\xff\x95\x00i\xff\x11\x00c\xffj\x00d\xffk\x00i\xff\x11\x00c\xff\x95\x00c\xff\xfd\x00c\xff\xfd\x00c\xff\x95\x00m\xff\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff7\x00d\xff8\x00d\xff.\x00b\xff\xc8\x00b\xff\xff\x00b\xff\xff\x00b\xff\xc8\x00g\xff/\x00f\xff7\x00f\xff7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\x7f\x00c\xff\xfb\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfb\x00d\xff\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\xff \x00b\xff\xb6\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xb7\x00d\xff!\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xffW\x00b\xff\xe7\x00b\xff\xe7\x00d\xffW\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff\x0f\x00p\xff\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', GdkPixbuf.Colorspace.RGB, True, 8, 16, 16, 64)
GUI_AVAILABLE = os.environ.get("DISPLAY", '')
if GUI_AVAILABLE:
def download():
import gi
gi.require_version('Gdk', '3.0')
gi.require_version('Gtk', '3.0')
import webbrowser
from gi.repository import Gdk, GObject, Gtk, Pango
GObject.threads_init()
load_serialized_images()
global FatalVisibleError
def FatalVisibleError(s):
error = Gtk.MessageDialog(parent = None,
flags = Gtk.DialogFlags.MODAL,
type = Gtk.MessageType.ERROR,
buttons = Gtk.ButtonsType.OK,
message_format = s)
error.set_title("Error")
error.run()
Gtk.main_quit()
sys.exit(-1)
class GeneratorTask(object):
def __init__(self, generator, loop_callback, on_done=None, on_exception=None):
self.generator = generator
self.loop_callback = loop_callback
self.on_done = on_done
self.on_exception = on_exception
def _run(self, *args, **kwargs):
self._stopped = False
try:
for ret in self.generator(*args, **kwargs):
if ret is None:
ret = ()
if not isinstance(ret, tuple):
ret = (ret,)
GObject.idle_add(self.loop_callback, *ret)
if self._stopped:
_thread.exit()
except Exception as e:
print(e)
if self.on_exception is not None:
GObject.idle_add(self.on_exception, e)
else:
if self.on_done is not None:
GObject.idle_add(self.on_done)
def start(self, *args, **kwargs):
t = threading.Thread(target=self._run, args=args, kwargs=kwargs)
t.setDaemon(True)
t.start()
def stop(self):
self._stopped = True
class DownloadDialog(Gtk.Dialog):
def handle_delete_event(self, wid, ev, data=None):
self.handle_cancel(wid)
def handle_dont_show_toggle(self, button, data=None):
reroll_autostart(not button.get_active())
def handle_cancel(self, button):
if self.task:
self.task.stop()
if self.download:
self.download.cancel()
Gtk.main_quit()
self.user_cancelled = True
def handle_ok(self, button):
# begin download
self.ok.hide()
self.download = DownloadState()
self.label.hide()
if self.dont_show_again_align is not None:
self.dont_show_again_align.hide()
self.progress.show()
def download_progress(progress, status):
if not status:
self.task.stop()
self.update_progress(DOWNLOADING, progress)
def finished():
self.update_progress(DOWNLOADING, 1.0)
self.unpack_dropbox()
def error(ex):
FatalVisibleError(ERROR_CONNECTING)
self.update_progress(DOWNLOADING, 0)
self.task = GeneratorTask(self.download.copy_data,
download_progress,
finished, error).start()
def update_progress(self, text, fraction):
self.progress.set_text(text % int(fraction*100))
self.progress.set_fraction(fraction)
def unpack_dropbox(self):
def unpack_progress(name, i, total):
self.update_progress(UNPACKING, float(i)/total)
def finished():
self.update_progress(UNPACKING, 1.0)
if not self.download.is_dropbox_valid():
FatalVisibleError(ERROR_INVALID_DROPBOX)
Gtk.main_quit()
def error(ex):
if isinstance(ex, SignatureVerifyError):
FatalVisibleError(ERROR_SIGNATURE)
else:
FatalVisibleError(ERROR_CONNECTING)
self.task = GeneratorTask(self.download.unpack,
unpack_progress,
finished, error).start()
def mouse_down(self, widget, event):
if self.hovering:
self.clicked_link = True
def mouse_up(self, widget, event):
if self.clicked_link:
webbrowser.open(LINK)
self.clicked_link = False
def label_motion(self, widget, event):
offx, offy = self.label.get_layout_offsets()
layout = self.label.get_layout()
index = layout.xy_to_index(int((offx+event.x)*Pango.SCALE),
int((offy+event.y)*Pango.SCALE))[1]
link_index = layout.get_text().find(LINK)
if index >= link_index and index < link_index+len(LINK):
self.hovering = True
self.label_box.get_window().set_cursor(Gdk.Cursor(Gdk.CursorType.HAND2))
else:
self.hovering = False
self.label_box.get_window().set_cursor(Gdk.Cursor(Gdk.CursorType.ARROW))
def __init__(self):
super(DownloadDialog, self).__init__(parent = None,
title = "Dropbox Installation")
self.download = None
self.hovering = False
self.clicked_link = False
self.user_cancelled = False
self.task = None
self.ok = ok = Gtk.Button(stock=Gtk.STOCK_OK)
ok.connect('clicked', self.handle_ok)
self.action_area.add(ok)
ok.show()
cancel = Gtk.Button(stock=Gtk.STOCK_CANCEL)
cancel.connect('clicked', self.handle_cancel)
self.action_area.add(cancel)
cancel.show()
self.connect('delete_event', self.handle_delete_event)
self.box_logo = Gtk.Image.new_from_pixbuf(box_logo_pixbuf)
self.box_logo.show()
self.set_icon(window_icon)
self.progress = Gtk.ProgressBar()
self.progress.set_property('width-request', 300)
self.progress.set_property('show-text', True)
self.label = Gtk.Label()
GPG_WARNING_MSG = ("\n\n" + GPG_WARNING) if not gpg and not gpgme else ""
self.label.set_markup('%s <span foreground="#000099" underline="single" weight="bold">%s</span>\n\n%s%s' % (INFO, LINK, WARNING, GPG_WARNING_MSG))
self.label.set_line_wrap(True)
self.label.set_property('width-request', 300)
self.label.show()
self.label_box = Gtk.EventBox()
self.label_box.add(self.label)
self.label_box.connect("button-release-event", self.mouse_up)
self.label_box.connect("button-press-event", self.mouse_down)
self.label_box.connect("motion-notify-event", self.label_motion)
self.label_box.show()
def on_realize(widget):
self.label_box.add_events(Gdk.EventMask.POINTER_MOTION_MASK)
self.label_box.connect("realize", on_realize)
self.hbox = Gtk.HBox(spacing=10)
self.hbox.set_property('border-width',10)
self.hbox.pack_start(self.box_logo, False, False, 0)
self.hbox.pack_start(self.label_box, False, False, 0)
self.hbox.pack_start(self.progress, False, False, 0)
self.hbox.show()
self.vbox.add(self.hbox)
self.dont_show_again_align = None
try:
if can_reroll_autostart():
dont_show_again = Gtk.CheckButton.new_with_mnemonic("_Don't show this again")
dont_show_again.connect('toggled', self.handle_dont_show_toggle)
dont_show_again.show()
self.dont_show_again_align = Gtk.Alignment(xalign=1.0, yalign=0.0, xscale=0.0, yscale=0.0)
self.dont_show_again_align.add(dont_show_again)
self.dont_show_again_align.show()
hbox = Gtk.HBox()
hbox.set_property('border-width', 10)
hbox.pack_start(self.dont_show_again_align, True, True, 0)
hbox.show()
self.vbox.add(hbox)
self.set_resizable(False)
except:
traceback.print_exc()
self.ok.grab_focus()
dialog = DownloadDialog()
dialog.show()
Gtk.main()
if dialog.user_cancelled:
raise Exception("user cancelled download!!!")
else:
def download():
global FatalVisibleError
def FatalVisibleError(s):
console_print("\nError: %s" % s, f=sys.stderr)
sys.exit(-1)
ESC = "\x1b"
save = ESC+"7"
unsave = ESC+"8"
erase_to_start = ESC+"[1K"
write = sys.stdout.write
flush = sys.stdout.flush
last_progress = [None, None]
def setprogress(text, frac):
if last_progress == [text, frac]:
return
if sys.stdout.isatty():
write(erase_to_start)
write(unsave)
console_print(text % int(100*frac), linebreak=not sys.stdout.isatty())
if sys.stdout.isatty():
flush()
last_progress[0], last_progress[1] = text, frac
console_print()
if sys.stdout.isatty():
write(save)
flush()
console_print("%s %s\n" % (INFO, LINK))
GPG_WARNING_MSG = ("\n%s" % GPG_WARNING) if not gpg and not gpgme else ""
if not yes_no_question("%s%s" % (WARNING, GPG_WARNING_MSG)):
return
download = DownloadState()
try:
for progress, status in download.copy_data():
if not status:
break
setprogress(DOWNLOADING, progress)
except Exception:
traceback.print_exc()
FatalVisibleError(ERROR_CONNECTING)
else:
setprogress(DOWNLOADING, 1.0)
console_print()
write(save)
try:
for _, i, total in download.unpack():
setprogress(UNPACKING, float(i)/total)
except SignatureVerifyError:
traceback.print_exc()
FatalVisibleError(ERROR_SIGNATURE)
except Exception:
traceback.print_exc()
FatalVisibleError(ERROR_CONNECTING)
else:
setprogress(UNPACKING, 1.0)
if not download.is_dropbox_valid():
FatalVisibleError(ERROR_INVALID_DROPBOX)
console_print()
class CommandTicker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.stop_event = threading.Event()
def stop(self):
self.stop_event.set()
def run(self):
ticks = ['[. ]', '[.. ]', '[...]', '[ ..]', '[ .]', '[ ]']
i = 0
first = True
while True:
self.stop_event.wait(0.25)
if self.stop_event.isSet(): break
if i == len(ticks):
first = False
i = 0
if not first:
sys.stderr.write("\r%s\r" % ticks[i])
sys.stderr.flush()
i += 1
sys.stderr.flush()
class DropboxCommand(object):
class CouldntConnectError(Exception): pass
class BadConnectionError(Exception): pass
class EOFError(Exception): pass
class CommandError(Exception): pass
def __init__(self, timeout=5):
self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.s.settimeout(timeout)
try:
self.s.connect(os.path.expanduser('~/.dropbox/command_socket'))
except socket.error:
raise DropboxCommand.CouldntConnectError()
self.f = self.s.makefile("rw", 4096)
def close(self):
self.f.close()
self.s.close()
def __readline(self):
try:
toret = self.f.readline().rstrip("\n")
except socket.error:
raise DropboxCommand.BadConnectionError()
if toret == '':
raise DropboxCommand.EOFError()
else:
return toret
# atttribute doesn't exist, i know what you want
def send_command(self, name, args):
self.f.write(name)
self.f.write("\n")
self.f.writelines(("\t".join([k] + ([v]
if isinstance(v, str) else
list(v))) + "\n")
for k,v in args.items())
self.f.write("done\n")
self.f.flush()
# Start a ticker
ticker_thread = CommandTicker()
ticker_thread.start()
# This is the potentially long-running call.
try:
ok = self.__readline() == "ok"
except KeyboardInterrupt:
raise DropboxCommand.BadConnectionError("Keyboard interruption detected")
finally:
# Tell the ticker to stop.
ticker_thread.stop()
ticker_thread.join()
if ok:
toret = {}
for i in range(21):
if i == 20:
raise Exception("close this connection!")
line = self.__readline()
if line == "done":
break
argval = line.split("\t")
toret[argval[0]] = argval[1:]
return toret
else:
problems = []
for i in range(21):
if i == 20:
raise Exception("close this connection!")
line = self.__readline()
if line == "done":
break
problems.append(line)
raise DropboxCommand.CommandError("\n".join(problems))
# this is the hotness, auto marshalling
def __getattr__(self, name):
try:
return super(DropboxCommand, self).__getattr__(name)
except:
def __spec_command(**kw):
return self.send_command(str(name), kw)
self.__setattr__(name, __spec_command)
return __spec_command
commands = {}
aliases = {}
def command(meth):
global commands, aliases
assert meth.__doc__, "All commands need properly formatted docstrings (even %r!!)" % meth
if hasattr(meth, 'im_func'): # bound method, if we ever have one
meth = meth.im_func
commands[meth.__name__] = meth
meth_aliases = [str(alias) for alias in aliases.keys() if aliases[alias].__name__ == meth.__name__]
if meth_aliases:
meth.__doc__ += "\nAliases: %s" % ",".join(meth_aliases)
return meth
def alias(name):
def decorator(meth):
global commands, aliases
assert name not in commands, "This alias is the name of a command."
aliases[name] = meth
return meth
return decorator
def requires_dropbox_running(meth):
def newmeth(*n, **kw):
if is_dropbox_running():
return meth(*n, **kw)
else:
console_print("Dropbox isn't running!")
newmeth.__name__ = meth.__name__
newmeth.__doc__ = meth.__doc__
return newmeth
def start_dropbox():
if os.access(DROPBOXD_PATH, os.X_OK):
f = open("/dev/null", "w")
# Fix indicator icon and menu on Unity environments. (LP: #1559249)
# Fix indicator icon and menu in Budgie environment. (LP: #1683051)
new_env = os.environ.copy()
current_env = os.environ.get("XDG_CURRENT_DESKTOP", '').split(":")
to_check = ['Unity', 'Budgie']
if any(word in to_check for word in current_env):
new_env['XDG_CURRENT_DESKTOP'] = 'Unity'
# we don't reap the child because we're gonna die anyway, let init do it
subprocess.Popen([DROPBOXD_PATH], preexec_fn=os.setsid, cwd=os.path.expanduser("~"),
stderr=sys.stderr, stdout=f, close_fds=True, env=new_env)
# in seconds
interval = 0.5
wait_for = 60
for _ in range(int(wait_for / interval)):
if is_dropbox_running():
return True
# back off from connect for a while
time.sleep(interval)
return False
else:
return False
# Extracted and modified from os.cmd.Cmd
def columnize(list, display_list=None, display_width=None):
if not list:
console_print("<empty>")
return
non_str = [i for i in range(len(list)) if not (isinstance(list[i], str))]
if non_str:
raise TypeError("list[i] not a string for i in %s" %
", ".join(map(str, non_str)))
if not display_width:
d = os.popen('stty size', 'r').read().split()
if d:
display_width = int(d[1])
else:
for item in list:
console_print(item)
return
if not display_list:
display_list = list
size = len(list)
if size == 1:
console_print(display_list[0])
return
for nrows in range(1, len(list)):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -2
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = row + nrows*col
if i >= size:
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += colwidth + 2
if totwidth > display_width:
break
if totwidth <= display_width:
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
lines = []
for row in range(nrows):
texts = []
display_texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
y = ""
else:
x = list[i]
y = display_list[i]
texts.append(x)
display_texts.append(y)
while texts and not texts[-1]:
del texts[-1]
original_texts = texts[:]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
texts[col] = texts[col].replace(original_texts[col], display_texts[col])
line = " ".join(texts)
lines.append(line)
for line in lines:
console_print(line)
@command
def update(args):
"""download latest version of Dropbox
dropbox update
Downloads the latest version of Dropbox. This should not be required
normally, as Dropbox automatically updates itself.
"""
download()
@command
@requires_dropbox_running
@alias('stat')
def filestatus(args):
"""get current sync status of one or more files
dropbox filestatus [-l] [-a] [FILE]...
Prints the current status of each FILE.
options:
-l --list Prints out information in a format similar to ls. Works best when your console supports color :)
-a --all Do not ignore entries starting with "."
"""
global enc
oparser = optparse.OptionParser()
oparser.add_option("-l", "--list", action="store_true", dest="list")
oparser.add_option("-a", "--all", action="store_true", dest="all")
(options, args) = oparser.parse_args(args)
try:
with closing(DropboxCommand()) as dc:
if options.list:
# Listing.
# Separate directories from files.
if len(args) == 0:
dirs, nondirs = ["."], []
else:
dirs, nondirs = [], []
for a in args:
try:
(dirs if os.path.isdir(a) else nondirs).append(a)
except UnicodeDecodeError:
continue
if len(dirs) == 0 and len(nondirs) == 0:
#TODO: why?
exit(1)
dirs.sort(key=methodcaller('lower'))
nondirs.sort(key=methodcaller('lower'))
# Gets a string representation for a path.
def path_to_string(file_path):
if not os.path.exists(file_path):
path = "%s (File doesn't exist!)" % os.path.basename(file_path)
return (path, path)
try:
status = dc.icon_overlay_file_status(path=file_path).get('status', [None])[0]
except DropboxCommand.CommandError as e:
path = "%s (%s)" % (os.path.basename(file_path), e)
return (path, path)
env_term = os.environ.get('TERM','')
supports_color = (sys.stderr.isatty() and (
env_term.startswith('vt') or
env_term.startswith('linux') or
'xterm' in env_term or
'color' in env_term
)
)
# TODO: Test when you don't support color.
if not supports_color:
path = os.path.basename(file_path)
return (path, path)
if status == "up to date":
init, cleanup = "\x1b[32;1m", "\x1b[0m"
elif status == "syncing":
init, cleanup = "\x1b[36;1m", "\x1b[0m"
elif status == "unsyncable":
init, cleanup = "\x1b[41;1m", "\x1b[0m"
elif status == "selsync":
init, cleanup = "\x1b[37;1m", "\x1b[0m"
else:
init, cleanup = '', ''
path = os.path.basename(file_path)
return (path, "%s%s%s" % (init, path, cleanup))
# Prints a directory.
def print_directory(name):
clean_paths = []
formatted_paths = []
for subname in sorted(os.listdir(name), key=methodcaller('lower')):
if type(subname) != str:
continue
if not options.all and subname[0] == '.':
continue
try:
clean, formatted = path_to_string(os.path.abspath(os.path.join(name, subname)))
clean_paths.append(clean)
formatted_paths.append(formatted)
except (UnicodeEncodeError, UnicodeDecodeError):
continue
columnize(clean_paths, formatted_paths)
try:
if len(dirs) == 1 and len(nondirs) == 0:
print_directory(dirs[0])
else:
nondir_formatted_paths = []
nondir_clean_paths = []
for name in nondirs:
try:
clean, formatted = path_to_string(os.path.abspath(name))
nondir_clean_paths.append(clean)
nondir_formatted_paths.append(formatted)
except (UnicodeEncodeError, UnicodeDecodeError):
continue
if nondir_clean_paths:
columnize(nondir_clean_paths, nondir_formatted_paths)
if len(nondirs) == 0:
console_print(dirs[0] + ":")
print_directory(dirs[0])
dirs = dirs[1:]
for name in dirs:
console_print()
console_print(name + ":")
print_directory(name)
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
else:
if len(args) == 0:
args = [name for name in sorted(os.listdir("."), key=methodcaller('lower')) if type(name) == str]
if len(args) == 0:
# Bail early if there's nothing to list to avoid crashing on indent below
console_print("<empty>")
return
indent = max(len(st)+1 for st in args)
for file in args:
try:
if type(file) is not str:
file = file.decode(enc)
fp = os.path.abspath(file)
except (UnicodeEncodeError, UnicodeDecodeError):
continue
if not os.path.exists(fp):
console_print("%-*s %s" % \
(indent, file+':', "File doesn't exist"))
continue
try:
status = dc.icon_overlay_file_status(path=fp).get('status', ['unknown'])[0]
console_print("%-*s %s" % (indent, file+':', status))
except DropboxCommand.CommandError as e:
console_print("%-*s %s" % (indent, file+':', e))
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
@command
@requires_dropbox_running
def ls(args):
"""list directory contents with current sync status
dropbox ls [FILE]...
This is an alias for filestatus -l
"""
return filestatus(["-l"] + args)
@command
@requires_dropbox_running
def puburl(args):
"""get public url of a file in your Dropbox's public folder
dropbox puburl FILE
Prints out a public url for FILE (which must be in your public folder).
"""
if len(args) != 1:
console_print(puburl.__doc__,linebreak=False)
return
try:
with closing(DropboxCommand()) as dc:
try:
console_print(dc.get_public_link(path=os.path.abspath(args[0])).get('link', ['No Link'])[0])
except DropboxCommand.CommandError as e:
console_print("Couldn't get public url: " + str(e))
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
@command
@requires_dropbox_running
def sharelink(args):
"""get a shared link for a file in your Dropbox
dropbox sharelink FILE
Prints out a shared link for FILE.
"""
if len(args) != 1:
console_print(sharelink.__doc__, linebreak=False)
return
try:
with closing(DropboxCommand()) as dc:
try:
path = os.path.abspath(args[0])
link = dc.get_shared_link(path=path).get('link', ['No link'])[0]
console_print(link)
except DropboxCommand.CommandError as e:
console_print("Couldn't get shared link: " + str(e))
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
@command
@requires_dropbox_running
def proxy(args):
"""set proxy settings for Dropbox
dropbox proxy MODE [TYPE] [HOST] [PORT] [USERNAME] [PASSWORD]
Set proxy settings for Dropbox.
MODE - one of "none", "auto", "manual"
TYPE - one of "http", "socks4", "socks5" (only valid with "manual" mode)
HOST - proxy hostname (only valid with "manual" mode)
PORT - proxy port (only valid with "manual" mode)
USERNAME - (optional) proxy username (only valid with "manual" mode)
PASSWORD - (optional) proxy password (only valid with "manual" mode)
"""
mode = None
type_ = None
if len(args) >= 1:
mode = args[0].lower()
if len(args) >= 2:
type_ = args[1].lower()
if (len(args) == 0 or
mode not in ['none', 'auto', 'manual'] or
(mode == 'manual' and len(args) not in (4, 6)) or
(mode != 'manual' and len(args) != 1) or
(mode == 'manual' and type_ not in ['http', 'socks4', 'socks5'])):
# Print help
console_print(proxy.__doc__, linebreak=False)
return
ARGS = ['mode', 'type', 'host', 'port', 'username', 'password']
# Load the args into a dictionary
kwargs = dict(zip(ARGS, args))
# Re-set these two because they were coerced to lower case
kwargs['mode'] = mode
if type_:
kwargs['type'] = type_
try:
with closing(DropboxCommand()) as dc:
try:
dc.set_proxy_settings(**kwargs)
console_print('set')
except DropboxCommand.CommandError as e:
console_print("Couldn't set proxy: " + str(e))
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
@command
@requires_dropbox_running
def throttle(args):
"""set bandwidth limits for Dropbox
dropbox throttle DOWNLOAD UPLOAD
Set bandwidth limits for file sync.
DOWNLOAD - either "unlimited" or a manual limit in KB/s
UPLOAD - one of "unlimited", "auto", or a manual limit in KB/s
"""
if len(args) != 2:
console_print(throttle.__doc__, linebreak=False)
return
downlimit = args[0].lower()
uplimit = args[1].lower()
download_limit = None
download_mode = None
if downlimit == 'unlimited':
download_mode = downlimit
else:
try:
download_limit = int(downlimit)
download_mode = 'manual'
except ValueError:
console_print(throttle.__doc__, linebreak=False)
return
upload_limit = None
upload_mode = None
if uplimit in ['unlimited', 'auto']:
upload_mode = uplimit
else:
try:
upload_limit = int(uplimit)
upload_mode = 'manual'
except ValueError:
console_print(throttle.__doc__, linebreak=False)
return
kwargs = {
'download_mode': download_mode,
'upload_mode': upload_mode,
}
if download_limit:
kwargs['download_limit'] = str(download_limit)
if upload_limit:
kwargs['upload_limit'] = str(upload_limit)
try:
with closing(DropboxCommand()) as dc:
try:
dc.set_bandwidth_limits(**kwargs)
console_print('set')
except DropboxCommand.CommandError as e:
console_print("Couldn't set bandwidth limits: " + str(e))
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
@command
@requires_dropbox_running
def status(args):
"""get current status of the dropboxd
dropbox status
Prints out the current status of the Dropbox daemon.
"""
if len(args) != 0:
console_print(status.__doc__,linebreak=False)
return
try:
with closing(DropboxCommand()) as dc:
try:
lines = dc.get_dropbox_status()['status']
if len(lines) == 0:
console_print('Idle')
else:
for line in lines:
console_print(line)
grab_link_url_if_necessary()
except KeyError:
console_print("Couldn't get status: daemon isn't responding")
except DropboxCommand.CommandError as e:
console_print("Couldn't get status: " + str(e))
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
@command
def running(argv):
"""return whether Dropbox is running
dropbox running
Returns 1 if running, and 0 if not running.
"""
return int(is_dropbox_running())
@command
@requires_dropbox_running
def stop(args):
"""stop dropboxd
dropbox stop
Stops the Dropbox daemon.
"""
try:
with closing(DropboxCommand()) as dc:
try:
dc.tray_action_hard_exit()
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
#returns true if link is necessary
def grab_link_url_if_necessary():
try:
with closing(DropboxCommand()) as dc:
try:
link_url = dc.needs_link().get("link_url", None)
if link_url is not None:
console_print("To link this computer to a Dropbox account, visit the following url:\n%s" % link_url[0])
return True
else:
return False
except DropboxCommand.CommandError:
pass
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
@command
@requires_dropbox_running
def lansync(argv):
"""enables or disables LAN sync
dropbox lansync [y/n]
options:
y Dropbox will use LAN sync (default)
n Dropbox will not use LAN sync
"""
if len(argv) != 1:
console_print(lansync.__doc__, linebreak=False)
return
s = argv[0].lower()
if s.startswith('y') or s.startswith('-y'):
should_lansync = True
elif s.startswith('n') or s.startswith('-n'):
should_lansync = False
else:
should_lansync = None
if should_lansync is None:
console_print(lansync.__doc__,linebreak=False)
else:
with closing(DropboxCommand()) as dc:
dc.set_lan_sync(lansync='enabled' if should_lansync else 'disabled')
@command
@requires_dropbox_running
def exclude(args):
"""ignores/excludes a directory from syncing
dropbox exclude [list]
dropbox exclude add [DIRECTORY] [DIRECTORY] ...
dropbox exclude remove [DIRECTORY] [DIRECTORY] ...
"list" prints a list of directories currently excluded from syncing.
"add" adds one or more directories to the exclusion list, then
resynchronizes Dropbox.
"remove" removes one or more directories from the exclusion list, then
resynchronizes Dropbox.
With no arguments, executes "list".
Any specified path must be within Dropbox.
"""
if len(args) == 0:
try:
with closing(DropboxCommand()) as dc:
try:
lines = [relpath(path) for path in dc.get_ignore_set()['ignore_set']]
lines.sort()
if len(lines) == 0:
console_print('No directories are being ignored.')
else:
console_print('Excluded: ')
for line in lines:
console_print(str(line))
except KeyError:
console_print("Couldn't get ignore set: daemon isn't responding")
except DropboxCommand.CommandError as e:
if e.args[0].startswith("No command exists by that name"):
console_print("This version of the client does not support this command.")
else:
console_print("Couldn't get ignore set: " + str(e))
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
elif len(args) == 1 and args[0] == "list":
exclude([])
elif len(args) >= 2:
sub_command = args[0]
paths = args[1:]
absolute_paths = [os.path.abspath(path) for path in paths]
if sub_command == "add":
try:
with closing(DropboxCommand(timeout=None)) as dc:
try:
result = dc.ignore_set_add(paths=absolute_paths)
if result["ignored"]:
console_print("Excluded: ")
lines = [relpath(path) for path in result["ignored"]]
for line in lines:
console_print(str(line))
except KeyError:
console_print("Couldn't add ignore path: daemon isn't responding")
except DropboxCommand.CommandError as e:
if e.args[0].startswith("No command exists by that name"):
console_print("This version of the client does not support this command.")
else:
console_print("Couldn't get ignore set: " + str(e))
except DropboxCommand.BadConnectionError as e:
console_print("Dropbox isn't responding! [%s]" % e)
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
elif sub_command == "remove":
try:
with closing(DropboxCommand(timeout=None)) as dc:
try:
result = dc.ignore_set_remove(paths=absolute_paths)
if result["removed"]:
console_print("No longer excluded: ")
lines = [relpath(path) for path in result["removed"]]
for line in lines:
console_print(str(line))
except KeyError:
console_print("Couldn't remove ignore path: daemon isn't responding")
except DropboxCommand.CommandError as e:
if e.args[0].startswith("No command exists by that name"):
console_print("This version of the client does not support this command.")
else:
console_print("Couldn't get ignore set: " + str(e))
except DropboxCommand.BadConnectionError as e:
console_print("Dropbox isn't responding! [%s]" % e)
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
else:
console_print(exclude.__doc__, linebreak=False)
return
else:
console_print(exclude.__doc__, linebreak=False)
return
@command
def start(argv):
"""start dropboxd
dropbox start [-i]
Starts the Dropbox daemon, dropboxd. If dropboxd is already running,
this will do nothing.
options:
-i --install auto install dropboxd if not available on the system
"""
should_install = "-i" in argv or "--install" in argv
# first check if dropbox is already running
if is_dropbox_running():
if not grab_link_url_if_necessary():
console_print("Dropbox is already running!")
return
console_print("Starting Dropbox...", linebreak=False)
console_flush()
if not start_dropbox():
if not should_install:
console_print()
console_print("The Dropbox daemon is not installed!")
console_print("Run \"dropbox start -i\" to install the daemon")
return
# install dropbox!!!
try:
download()
except:
traceback.print_exc()
else:
if GUI_AVAILABLE:
start_dropbox()
console_print("Done!")
else:
if start_dropbox():
if not grab_link_url_if_necessary():
console_print("Done!")
else:
if not grab_link_url_if_necessary():
console_print("Done!")
def can_reroll_autostart():
return ".config" in os.listdir(os.path.expanduser('~'))
def reroll_autostart(should_autostart):
home_dir = os.path.expanduser('~')
contents = os.listdir(home_dir)
# UBUNTU
if ".config" in contents:
autostart_dir = os.path.join(home_dir, ".config", "autostart")
autostart_link = os.path.join(autostart_dir, "dropbox.desktop")
if should_autostart:
if os.path.exists(DESKTOP_FILE):
if not os.path.exists(autostart_dir):
os.makedirs(autostart_dir)
shutil.copyfile(DESKTOP_FILE, autostart_link)
elif os.path.exists(autostart_link):
os.remove(autostart_link)
@command
def autostart(argv):
"""automatically start Dropbox at login
dropbox autostart [y/n]
options:
n Dropbox will not start automatically at login
y Dropbox will start automatically at login (default)
Note: May only work on current Ubuntu distributions.
"""
if len(argv) != 1:
console_print(''.join(autostart.__doc__.split('\n', 1)[1:]))
return
s = argv[0].lower()
if s.startswith('y') or s.startswith('-y'):
should_autostart = True
elif s.startswith('n') or s.startswith('-n'):
should_autostart = False
else:
should_autostart = None
if should_autostart is None:
console_print(autostart.__doc__,linebreak=False)
else:
reroll_autostart(should_autostart)
@command
def version(argv):
"""print version information for Dropbox
dropbox version
Prints the version information for the Dropbox proprietary daemon, if
it's installed, and the Dropbox command-line interface.
"""
dropbox_daemon_version = "Not installed"
try:
with open(os.path.join(DROPBOX_DIST_PATH, 'VERSION')) as f:
dropbox_daemon_version = f.read().strip()
except OSError:
pass
console_print("Dropbox daemon version: %s" % dropbox_daemon_version)
console_print("Dropbox command-line interface version: 2020.03.04")
@command
def help(argv):
"""provide help
dropbox help [COMMAND]
With no arguments, print a list of commands and a short description of
each. With a command, print descriptive help on how to use the
command.
"""
if not argv:
return usage()
for command in commands:
if command == argv[0]:
console_print(commands[command].__doc__.split('\n', 1)[1].strip())
return
for alias in aliases:
if alias == argv[0]:
console_print(aliases[alias].__doc__.split('\n', 1)[1].strip())
return
console_print("unknown command '%s'" % argv[0], f=sys.stderr)
def usage():
console_print("Dropbox command-line interface\n")
console_print("commands:\n")
console_print("Note: use dropbox help <command> to view usage for a specific command.\n")
out = []
for command in commands:
out.append((command, commands[command].__doc__.splitlines()[0]))
out.sort(key=lambda x: x[0])
spacing = max(len(o[0])+3 for o in out)
for o in out:
console_print(" %-*s%s" % (spacing, o[0], o[1]))
def main(argv):
global commands
# now we need to find out if one of the commands are in the
# argv list, and if so split the list at the point to
# separate the argv list at that point
cut = None
for i in range(len(argv)):
if argv[i] in commands or argv[i] in aliases:
cut = i
break
if cut == None:
usage()
os._exit(0)
return
# lol no options for now
globaloptionparser = optparse.OptionParser()
globaloptionparser.parse_args(argv[0:i])
# now dispatch and run
result = None
if argv[i] in commands:
result = commands[argv[i]](argv[i+1:])
elif argv[i] in aliases:
result = aliases[argv[i]](argv[i+1:])
# flush, in case output is rerouted to a file.
console_flush()
# done
return result
if __name__ == "__main__":
ret = main(sys.argv)
if ret is not None:
sys.exit(ret)
|
download_cazy_sequence.py
|
import requests
import re
from datetime import datetime
from threading import Thread
import queue, sys
import argparse
from lxml import etree
import os
in_queue = queue.Queue()
records_per_page = 1000
prefix = "http://www.cazy.org/"
fivefamilies = {"GH": "Glycoside-Hydrolases.html", "GT": "GlycosylTransferases.html", "PL": "Polysaccharide-Lyases.html", "CE": "Carbohydrate-Esterases.html", "CBM": "Carbohydrate-Binding-Modules.html", "AA": "Auxiliary-Activities.html"}
#fivefamilies = ["Auxiliary-Activities.html"]
rx_member = re.compile(r'<option value="(\S+?)">\w+</option>')
rx_kingdom = re.compile(r'<a href="(http://www\.cazy\.org/\w+_(archaea|bacteria|eukaryota|viruses|characterized|structure)\.html)">\w+</a> (?:(|\()(\d+).*(?:)|\))</span>')
rx_subfamilies_exist = re.compile(r'http://www\.cazy\.org/\w+_subfamilies\.html">Subfamilies')
rx_ncbi = re.compile(r'http://www\.ncbi\.nlm\.nih\.gov/entrez/viewer\.fcgi\?db=protein\S+val=(\S+)"')
acc_cazy = {}
cazy_acc = {}
characterized = set()
structure = set()
exclusion_list = set()
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--excl", help='Textfile with families that are no longer needed', required=False)
parser.add_argument("-f", "--fam", help='family', required=False, default="all")
parser.add_argument("-o", "--out", help='Fasta output', required=True)
parser.add_argument("-l", "--lst", help='Text output with families processed', required=True)
args = parser.parse_args()
argsdict = vars(args)
if argsdict["excl"] and os.path.isfile(argsdict["excl"]):
for line in open(argsdict["excl"], 'r'):
exclusion_list.add(line.strip())
thisTime = []
if argsdict["fam"] in list(fivefamilies.keys()):
thisTime.append(fivefamilies[argsdict["fam"]])
if len(thisTime) == 0:
thisTime = [fivefamilies[x] for x in fivefamilies]
def work():
while True:
url = in_queue.get()
#try:
#f = urlopen(address).read().decode('utf-8')
page = requests.get(url).content.decode('iso8859-1')
#page = urllib2.urlopen(url).read()
cazy_name = re.findall("http://www\.cazy\.org/(\w+)\.html", url)[0]
#print (cazy_name)
#print (url)
sub_family_exist = False
if rx_subfamilies_exist.search(page):
sub_family_exist = True
taxonurl = []
for taxon in rx_kingdom.findall(page):
taxonurl.append(taxon[0])
amount = int(taxon[2])
#print taxon.group(1), amount
subpages_minus_one = int((amount-1)/records_per_page)
for i in range(subpages_minus_one):
taxonurl_address = prefix + "/" + cazy_name + "_" + taxon[1] + ".html?debut_PRINC=" + str((i+1)*1000) + "#pagination_PRINC"
#print taxonurl_address
taxonurl.append(taxonurl_address)
for taxonurl_address in taxonurl:
charac = False
structu = False
if "characterized" in taxonurl_address:
charac = True
if "structure" in taxonurl_address:
structu = True
taxonpage = requests.get(taxonurl_address).content.decode('iso8859-1')
tree = etree.HTML(taxonpage)
trs = tree.xpath("//tr")
for tr in trs:
#contents = etree.HTML(etree.tostring(tr)).xpath("//td")
#print etree.tostring(tr)
tds = etree.HTML(etree.tostring(tr)).xpath("//td")
accession = ""
family_subfamily =cazy_name
for td in tds:
#print etree.tostring(td)
search_ncbi = rx_ncbi.search(etree.tostring(td).decode())
if search_ncbi:
accession = search_ncbi.group(1).strip()
#print accession
if sub_family_exist:
sub_family = re.search(r'<td id="separateur2" align="center">(\d+)</td>',etree.tostring(td).decode())
if sub_family:
family_subfamily += "-subfamily_" + sub_family.group(1)
if accession != "" and family_subfamily not in exclusion_list:
#print (exclusion_list)
if accession not in acc_cazy:
acc_cazy[accession] = set()
acc_cazy[accession].add(family_subfamily)
if family_subfamily not in cazy_acc:
cazy_acc[family_subfamily] = set()
cazy_acc[family_subfamily].add(accession)
if charac == True:
characterized.add(accession)
if structu == True:
structure.add(accession)
# except:
# pass
# finally:
in_queue.task_done()
for i in range(70):
t = Thread(target=work)
t.daemon = True
t.start()
for family in thisTime:
address = prefix + family
#print (family)
f = requests.get(address).content.decode('iso8859-1')
for member in rx_member.findall(f):
cazy_name = re.findall("http://www\.cazy\.org/(\w+)\.html", member)[0]
in_queue.put(member)
in_queue.join()
#print "now writing, wish me luck"
prefix = "https://www.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=protein&rettype=fasta&id="
id_per_request = 200
def getSeq (id_list):
url = prefix + id_list[:len(id_list)-1]
temp_content = ""
try:
temp_content += requests.get(url).content.decode('iso8859-1')
#temp_content += urllib2.urlopen(url).read()
except:
for id in id_list[:len(id_list)-1].split(","):
url = prefix + id
#print url
try:
temp_content += requests.get(url).content.decode('iso8859-1')
#temp_content += urllib2.urlopen(url).read()
except:
#print id
pass
return temp_content
done = set()
for cazy_name in sorted(cazy_acc.keys()):
content = ""
#print cazy_name
temp_content = ""
id_list = ""
counter = 0
for acc in cazy_acc[cazy_name]:
if acc not in done:
done.add(acc)
#if str.isdigit(acc[0]):
# continue
id_list += acc + ","
counter += 1
if counter == id_per_request:
try:
counter = 0
temp_content += getSeq(id_list)
id_list = ""
except:
pass
if id_list != "":
try:
temp_content += getSeq(id_list)
id_list = ""
except:
pass
for line in temp_content.splitlines():
#print (line, cazy_acc)
if ">" in line:
print (str(datetime.now()), line)
content += line
for acc in cazy_acc[cazy_name]:
try:
if acc in line[1:].split(" ")[0]:
content += "|cazy"
for cazy in acc_cazy[acc]:
content += "|" + cazy
if acc in characterized:
content += "_characterized"
if acc in structure:
content += "_structure"
found = True
break
except:
print (line)
sys.exit()
#if found == False:
# print line + " no acc found in cazy_acc"
content += "\n"
else:
content += line.strip() + "\n"
#print (content)
with open(argsdict["out"], "a") as myfile:
myfile.write(content)
with open(argsdict["lst"], "a") as myfile:
myfile.write(cazy_name + "\n")
|
stream_capture.py
|
# ########################################################################
#
# An example script for capturing stream and processing it with opencv
#
##########################################################################
import cv2
import msvcrt as m
import numpy as np
from time import sleep
from threading import Thread
from stream_client import StreamClient, StreamBuffer
DATA_LEN = 100
def streaming_callback(timestamp, type, data):
global shown
if type=="image":
#print timestamp
sb.update_image(timestamp, data)
# Create stream client
sc = StreamClient("192.168.1.2", 8080)#connect to your camera!
# Stream Buffer
sb = StreamBuffer(DATA_LEN)# DATA_LEN is not really used here because it is an images only stream
# your thread!
def my_thread():
while True:
buffer = sb.swap()
nparr = np.fromstring(buffer.image, np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
#
# do your coding here!
#
sleep(0.1)#//kind of 10 frames per second :)
# start the client:
# - you may need to put a relative url in the get
# - you may need to provide user and password if basic authentication is needed
sc.get(streaming_callback, get="camera_url, generally just empty", user="user", pw="password")
# allow to buffer some frames
sleep(1)
# start a generic computer vision thread (implement yours)
Thread(name="cv", target=my_thread).start()
# wait
m.getch()
|
vpp_papi.py
|
#!/usr/bin/env python
#
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from __future__ import absolute_import
import sys
import os
import logging
import collections
import struct
import json
import threading
import fnmatch
import weakref
import atexit
from . vpp_serializer import VPPType, VPPEnumType, VPPUnionType, BaseTypes
from . vpp_serializer import VPPMessage, vpp_get_type
if sys.version[0] == '2':
import Queue as queue
else:
import queue as queue
class VppEnumType(type):
def __getattr__(cls, name):
t = vpp_get_type(name)
return t.enum
# Python3
# class VppEnum(metaclass=VppEnumType):
# pass
class VppEnum:
__metaclass__ = VppEnumType
def vpp_atexit(vpp_weakref):
"""Clean up VPP connection on shutdown."""
vpp_instance = vpp_weakref()
if vpp_instance and vpp_instance.transport.connected:
vpp_instance.logger.debug('Cleaning up VPP on exit')
vpp_instance.disconnect()
def vpp_iterator(d):
if sys.version[0] == '2':
return d.iteritems()
else:
return d.items()
class VppApiDynamicMethodHolder(object):
pass
class FuncWrapper(object):
def __init__(self, func):
self._func = func
self.__name__ = func.__name__
def __call__(self, **kwargs):
return self._func(**kwargs)
class VPP():
"""VPP interface.
This class provides the APIs to VPP. The APIs are loaded
from provided .api.json files and makes functions accordingly.
These functions are documented in the VPP .api files, as they
are dynamically created.
Additionally, VPP can send callback messages; this class
provides a means to register a callback function to receive
these messages in a background thread.
"""
def process_json_file(self, apidef_file):
api = json.load(apidef_file)
types = {}
for t in api['enums']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'enum', 'data': t}
for t in api['unions']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'union', 'data': t}
for t in api['types']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'type', 'data': t}
i = 0
while True:
unresolved = {}
for k, v in types.items():
t = v['data']
if not vpp_get_type(t[0]):
if v['type'] == 'enum':
try:
VPPEnumType(t[0], t[1:])
except ValueError:
unresolved[k] = v
elif v['type'] == 'union':
try:
VPPUnionType(t[0], t[1:])
except ValueError:
unresolved[k] = v
elif v['type'] == 'type':
try:
VPPType(t[0], t[1:])
except ValueError:
unresolved[k] = v
if len(unresolved) == 0:
break
if i > 3:
raise ValueError('Unresolved type definitions {}'
.format(unresolved))
types = unresolved
i += 1
for m in api['messages']:
try:
self.messages[m[0]] = VPPMessage(m[0], m[1:])
except NotImplementedError:
self.logger.error('Not implemented error for {}'.format(m[0]))
def __init__(self, apifiles=None, testmode=False, async_thread=True,
logger=logging.getLogger('vpp_papi'), loglevel='debug',
read_timeout=5, use_socket=False,
server_address='/run/vpp-api.sock'):
"""Create a VPP API object.
apifiles is a list of files containing API
descriptions that will be loaded - methods will be
dynamically created reflecting these APIs. If not
provided this will load the API files from VPP's
default install location.
logger, if supplied, is the logging logger object to log to.
loglevel, if supplied, is the log level this logger is set
to report at (from the loglevels in the logging module).
"""
if logger is None:
logger = logging.getLogger(__name__)
if loglevel is not None:
logger.setLevel(loglevel)
self.logger = logger
self.messages = {}
self.id_names = []
self.id_msgdef = []
self.header = VPPType('header', [['u16', 'msgid'],
['u32', 'client_index']])
self.apifiles = []
self.event_callback = None
self.message_queue = queue.Queue()
self.read_timeout = read_timeout
self.async_thread = async_thread
if use_socket:
from . vpp_transport_socket import VppTransport
else:
from . vpp_transport_shmem import VppTransport
if not apifiles:
# Pick up API definitions from default directory
try:
apifiles = self.find_api_files()
except RuntimeError:
# In test mode we don't care that we can't find the API files
if testmode:
apifiles = []
else:
raise
for file in apifiles:
with open(file) as apidef_file:
self.process_json_file(apidef_file)
self.apifiles = apifiles
# Basic sanity check
if len(self.messages) == 0 and not testmode:
raise ValueError(1, 'Missing JSON message definitions')
self.transport = VppTransport(self, read_timeout=read_timeout,
server_address=server_address)
# Make sure we allow VPP to clean up the message rings.
atexit.register(vpp_atexit, weakref.ref(self))
class ContextId(object):
"""Thread-safe provider of unique context IDs."""
def __init__(self):
self.context = 0
self.lock = threading.Lock()
def __call__(self):
"""Get a new unique (or, at least, not recently used) context."""
with self.lock:
self.context += 1
return self.context
get_context = ContextId()
def get_type(self, name):
return vpp_get_type(name)
@classmethod
def find_api_dir(cls):
"""Attempt to find the best directory in which API definition
files may reside. If the value VPP_API_DIR exists in the environment
then it is first on the search list. If we're inside a recognized
location in a VPP source tree (src/scripts and src/vpp-api/python)
then entries from there to the likely locations in build-root are
added. Finally the location used by system packages is added.
:returns: A single directory name, or None if no such directory
could be found.
"""
dirs = []
if 'VPP_API_DIR' in os.environ:
dirs.append(os.environ['VPP_API_DIR'])
# perhaps we're in the 'src/scripts' or 'src/vpp-api/python' dir;
# in which case, plot a course to likely places in the src tree
import __main__ as main
if hasattr(main, '__file__'):
# get the path of the calling script
localdir = os.path.dirname(os.path.realpath(main.__file__))
else:
# use cwd if there is no calling script
localdir = os.getcwd()
localdir_s = localdir.split(os.path.sep)
def dmatch(dir):
"""Match dir against right-hand components of the script dir"""
d = dir.split('/') # param 'dir' assumes a / separator
length = len(d)
return len(localdir_s) > length and localdir_s[-length:] == d
def sdir(srcdir, variant):
"""Build a path from srcdir to the staged API files of
'variant' (typically '' or '_debug')"""
# Since 'core' and 'plugin' files are staged
# in separate directories, we target the parent dir.
return os.path.sep.join((
srcdir,
'build-root',
'install-vpp%s-native' % variant,
'vpp',
'share',
'vpp',
'api',
))
srcdir = None
if dmatch('src/scripts'):
srcdir = os.path.sep.join(localdir_s[:-2])
elif dmatch('src/vpp-api/python'):
srcdir = os.path.sep.join(localdir_s[:-3])
elif dmatch('test'):
# we're apparently running tests
srcdir = os.path.sep.join(localdir_s[:-1])
if srcdir:
# we're in the source tree, try both the debug and release
# variants.
dirs.append(sdir(srcdir, '_debug'))
dirs.append(sdir(srcdir, ''))
# Test for staged copies of the scripts
# For these, since we explicitly know if we're running a debug versus
# release variant, target only the relevant directory
if dmatch('build-root/install-vpp_debug-native/vpp/bin'):
srcdir = os.path.sep.join(localdir_s[:-4])
dirs.append(sdir(srcdir, '_debug'))
if dmatch('build-root/install-vpp-native/vpp/bin'):
srcdir = os.path.sep.join(localdir_s[:-4])
dirs.append(sdir(srcdir, ''))
# finally, try the location system packages typically install into
dirs.append(os.path.sep.join(('', 'usr', 'share', 'vpp', 'api')))
# check the directories for existance; first one wins
for dir in dirs:
if os.path.isdir(dir):
return dir
return None
@classmethod
def find_api_files(cls, api_dir=None, patterns='*'):
"""Find API definition files from the given directory tree with the
given pattern. If no directory is given then find_api_dir() is used
to locate one. If no pattern is given then all definition files found
in the directory tree are used.
:param api_dir: A directory tree in which to locate API definition
files; subdirectories are descended into.
If this is None then find_api_dir() is called to discover it.
:param patterns: A list of patterns to use in each visited directory
when looking for files.
This can be a list/tuple object or a comma-separated string of
patterns. Each value in the list will have leading/trialing
whitespace stripped.
The pattern specifies the first part of the filename, '.api.json'
is appended.
The results are de-duplicated, thus overlapping patterns are fine.
If this is None it defaults to '*' meaning "all API files".
:returns: A list of file paths for the API files found.
"""
if api_dir is None:
api_dir = cls.find_api_dir()
if api_dir is None:
raise RuntimeError("api_dir cannot be located")
if isinstance(patterns, list) or isinstance(patterns, tuple):
patterns = [p.strip() + '.api.json' for p in patterns]
else:
patterns = [p.strip() + '.api.json' for p in patterns.split(",")]
api_files = []
for root, dirnames, files in os.walk(api_dir):
# iterate all given patterns and de-dup the result
files = set(sum([fnmatch.filter(files, p) for p in patterns], []))
for filename in files:
api_files.append(os.path.join(root, filename))
return api_files
@property
def api(self):
if not hasattr(self, "_api"):
raise Exception("Not connected, api definitions not available")
return self._api
def make_function(self, msg, i, multipart, do_async):
if (do_async):
def f(**kwargs):
return self._call_vpp_async(i, msg, **kwargs)
else:
def f(**kwargs):
return self._call_vpp(i, msg, multipart, **kwargs)
f.__name__ = str(msg.name)
f.__doc__ = ", ".join(["%s %s" %
(msg.fieldtypes[j], k)
for j, k in enumerate(msg.fields)])
return f
def _register_functions(self, do_async=False):
self.id_names = [None] * (self.vpp_dictionary_maxid + 1)
self.id_msgdef = [None] * (self.vpp_dictionary_maxid + 1)
self._api = VppApiDynamicMethodHolder()
for name, msg in vpp_iterator(self.messages):
n = name + '_' + msg.crc[2:]
i = self.transport.get_msg_index(n.encode())
if i > 0:
self.id_msgdef[i] = msg
self.id_names[i] = name
# TODO: Fix multipart (use services)
multipart = True if name.find('_dump') > 0 else False
f = self.make_function(msg, i, multipart, do_async)
setattr(self._api, name, FuncWrapper(f))
else:
self.logger.debug(
'No such message type or failed CRC checksum: %s', n)
def connect_internal(self, name, msg_handler, chroot_prefix, rx_qlen,
do_async):
pfx = chroot_prefix.encode() if chroot_prefix else None
rv = self.transport.connect(name.encode(), pfx, msg_handler, rx_qlen)
if rv != 0:
raise IOError(2, 'Connect failed')
self.vpp_dictionary_maxid = self.transport.msg_table_max_index()
self._register_functions(do_async=do_async)
# Initialise control ping
crc = self.messages['control_ping'].crc
self.control_ping_index = self.transport.get_msg_index(
('control_ping' + '_' + crc[2:]).encode())
self.control_ping_msgdef = self.messages['control_ping']
if self.async_thread:
self.event_thread = threading.Thread(
target=self.thread_msg_handler)
self.event_thread.daemon = True
self.event_thread.start()
return rv
def connect(self, name, chroot_prefix=None, do_async=False, rx_qlen=32):
"""Attach to VPP.
name - the name of the client.
chroot_prefix - if VPP is chroot'ed, the prefix of the jail
do_async - if true, messages are sent without waiting for a reply
rx_qlen - the length of the VPP message receive queue between
client and server.
"""
msg_handler = self.transport.get_callback(do_async)
return self.connect_internal(name, msg_handler, chroot_prefix, rx_qlen,
do_async)
def connect_sync(self, name, chroot_prefix=None, rx_qlen=32):
"""Attach to VPP in synchronous mode. Application must poll for events.
name - the name of the client.
chroot_prefix - if VPP is chroot'ed, the prefix of the jail
rx_qlen - the length of the VPP message receive queue between
client and server.
"""
return self.connect_internal(name, None, chroot_prefix, rx_qlen,
do_async=False)
def disconnect(self):
"""Detach from VPP."""
rv = self.transport.disconnect()
self.message_queue.put("terminate event thread")
return rv
def msg_handler_sync(self, msg):
"""Process an incoming message from VPP in sync mode.
The message may be a reply or it may be an async notification.
"""
r = self.decode_incoming_msg(msg)
if r is None:
return
# If we have a context, then use the context to find any
# request waiting for a reply
context = 0
if hasattr(r, 'context') and r.context > 0:
context = r.context
if context == 0:
# No context -> async notification that we feed to the callback
self.message_queue.put_nowait(r)
else:
raise IOError(2, 'RPC reply message received in event handler')
def decode_incoming_msg(self, msg):
if not msg:
self.logger.warning('vpp_api.read failed')
return
(i, ci), size = self.header.unpack(msg, 0)
if self.id_names[i] == 'rx_thread_exit':
return
#
# Decode message and returns a tuple.
#
msgobj = self.id_msgdef[i]
if not msgobj:
raise IOError(2, 'Reply message undefined')
r, size = msgobj.unpack(msg)
return r
def msg_handler_async(self, msg):
"""Process a message from VPP in async mode.
In async mode, all messages are returned to the callback.
"""
r = self.decode_incoming_msg(msg)
if r is None:
return
msgname = type(r).__name__
if self.event_callback:
self.event_callback(msgname, r)
def _control_ping(self, context):
"""Send a ping command."""
self._call_vpp_async(self.control_ping_index,
self.control_ping_msgdef,
context=context)
def validate_args(self, msg, kwargs):
d = set(kwargs.keys()) - set(msg.field_by_name.keys())
if d:
raise ValueError('Invalid argument {} to {}'
.format(list(d), msg.name))
def _call_vpp(self, i, msg, multipart, **kwargs):
"""Given a message, send the message and await a reply.
msgdef - the message packing definition
i - the message type index
multipart - True if the message returns multiple
messages in return.
context - context number - chosen at random if not
supplied.
The remainder of the kwargs are the arguments to the API call.
The return value is the message or message array containing
the response. It will raise an IOError exception if there was
no response within the timeout window.
"""
if 'context' not in kwargs:
context = self.get_context()
kwargs['context'] = context
else:
context = kwargs['context']
kwargs['_vl_msg_id'] = i
try:
if self.transport.socket_index:
kwargs['client_index'] = self.transport.socket_index
except AttributeError:
pass
self.validate_args(msg, kwargs)
b = msg.pack(kwargs)
self.transport.suspend()
self.transport.write(b)
if multipart:
# Send a ping after the request - we use its response
# to detect that we have seen all results.
self._control_ping(context)
# Block until we get a reply.
rl = []
while (True):
msg = self.transport.read()
if not msg:
raise IOError(2, 'VPP API client: read failed')
r = self.decode_incoming_msg(msg)
msgname = type(r).__name__
if context not in r or r.context == 0 or context != r.context:
# Message being queued
self.message_queue.put_nowait(r)
continue
if not multipart:
rl = r
break
if msgname == 'control_ping_reply':
break
rl.append(r)
self.transport.resume()
return rl
def _call_vpp_async(self, i, msg, **kwargs):
"""Given a message, send the message and await a reply.
msgdef - the message packing definition
i - the message type index
context - context number - chosen at random if not
supplied.
The remainder of the kwargs are the arguments to the API call.
"""
if 'context' not in kwargs:
context = self.get_context()
kwargs['context'] = context
else:
context = kwargs['context']
try:
if self.transport.socket_index:
kwargs['client_index'] = self.transport.socket_index
except AttributeError:
kwargs['client_index'] = 0
kwargs['_vl_msg_id'] = i
b = msg.pack(kwargs)
self.transport.write(b)
def register_event_callback(self, callback):
"""Register a callback for async messages.
This will be called for async notifications in sync mode,
and all messages in async mode. In sync mode, replies to
requests will not come here.
callback is a fn(msg_type_name, msg_type) that will be
called when a message comes in. While this function is
executing, note that (a) you are in a background thread and
may wish to use threading.Lock to protect your datastructures,
and (b) message processing from VPP will stop (so if you take
a long while about it you may provoke reply timeouts or cause
VPP to fill the RX buffer). Passing None will disable the
callback.
"""
self.event_callback = callback
def thread_msg_handler(self):
"""Python thread calling the user registered message handler.
This is to emulate the old style event callback scheme. Modern
clients should provide their own thread to poll the event
queue.
"""
while True:
r = self.message_queue.get()
if r == "terminate event thread":
break
msgname = type(r).__name__
if self.event_callback:
self.event_callback(msgname, r)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
tests.py
|
import json
import os
import sys
import threading
import time
import unittest
if sys.version_info >= (3, 3):
import unittest.mock as mock
else:
import mock
from yeelight import Bulb
from yeelight import enums
from yeelight import Flow
from yeelight import flows
from yeelight import TemperatureTransition
from yeelight.enums import LightType
from yeelight.enums import SceneClass
from yeelight.flow import Action
sys.path.insert(0, os.path.abspath(__file__ + "/../.."))
class SocketMock(object):
def __init__(self, received=b'{"id": 0, "result": ["ok"]}'):
self.received = received
def send(self, data):
self.sent = json.loads(data.decode("utf8"))
def recv(self, length):
return self.received
class Tests(unittest.TestCase):
def setUp(self):
self.socket = SocketMock()
self.bulb = Bulb(ip="", auto_on=True)
self.bulb._Bulb__socket = self.socket
def test_rgb1(self):
self.bulb.set_rgb(255, 255, 0)
self.assertEqual(self.socket.sent["method"], "set_rgb")
self.assertEqual(self.socket.sent["params"], [16776960, "smooth", 300])
def test_rgb2(self):
self.bulb.effect = "sudden"
self.bulb.set_rgb(255, 255, 0)
self.assertEqual(self.socket.sent["method"], "set_rgb")
self.assertEqual(self.socket.sent["params"], [16776960, "sudden", 300])
def test_rgb3(self):
self.bulb.set_rgb(255, 255, 0, effect="sudden")
self.assertEqual(self.socket.sent["method"], "set_rgb")
self.assertEqual(self.socket.sent["params"], [16776960, "sudden", 300])
def test_hsv1(self):
self.bulb.set_hsv(200, 100, effect="sudden")
self.assertEqual(self.socket.sent["method"], "set_hsv")
self.assertEqual(self.socket.sent["params"], [200, 100, "sudden", 300])
def test_hsv2(self):
self.bulb.set_hsv(200, 100, 10, effect="sudden", duration=500)
self.assertEqual(self.socket.sent["method"], "start_cf")
self.assertEqual(self.socket.sent["params"], [1, 1, "50, 1, 43263, 10"])
def test_hsv3(self):
self.bulb.set_hsv(200, 100, 10, effect="smooth", duration=1000)
self.assertEqual(self.socket.sent["method"], "start_cf")
self.assertEqual(self.socket.sent["params"], [1, 1, "1000, 1, 43263, 10"])
def test_hsv4(self):
self.bulb.effect = "sudden"
self.bulb.set_hsv(200, 100, 10, effect="smooth", duration=1000)
self.assertEqual(self.socket.sent["method"], "start_cf")
self.assertEqual(self.socket.sent["params"], [1, 1, "1000, 1, 43263, 10"])
def test_toggle1(self):
self.bulb.toggle()
self.assertEqual(self.socket.sent["method"], "toggle")
self.assertEqual(self.socket.sent["params"], ["smooth", 300])
self.bulb.toggle(duration=3000)
self.assertEqual(self.socket.sent["params"], ["smooth", 3000])
def test_turn_off1(self):
self.bulb.turn_off()
self.assertEqual(self.socket.sent["method"], "set_power")
self.assertEqual(self.socket.sent["params"], ["off", "smooth", 300])
self.bulb.turn_off(duration=3000)
self.assertEqual(self.socket.sent["params"], ["off", "smooth", 3000])
def test_turn_on1(self):
self.bulb.turn_on()
self.assertEqual(self.socket.sent["method"], "set_power")
self.assertEqual(self.socket.sent["params"], ["on", "smooth", 300])
self.bulb.turn_on(duration=3000)
self.assertEqual(self.socket.sent["params"], ["on", "smooth", 3000])
def test_turn_on2(self):
self.bulb.effect = "sudden"
self.bulb.turn_on()
self.assertEqual(self.socket.sent["method"], "set_power")
self.assertEqual(self.socket.sent["params"], ["on", "sudden", 300])
def test_turn_on3(self):
self.bulb.turn_on(effect="sudden", duration=50)
self.assertEqual(self.socket.sent["method"], "set_power")
self.assertEqual(self.socket.sent["params"], ["on", "sudden", 50])
def test_turn_on4(self):
self.bulb.power_mode = enums.PowerMode.MOONLIGHT
self.bulb.turn_on()
self.assertEqual(self.socket.sent["method"], "set_power")
self.assertEqual(self.socket.sent["params"], ["on", "smooth", 300, enums.PowerMode.MOONLIGHT.value])
def test_turn_on5(self):
self.bulb.turn_on(power_mode=enums.PowerMode.MOONLIGHT)
self.assertEqual(self.socket.sent["method"], "set_power")
self.assertEqual(self.socket.sent["params"], ["on", "smooth", 300, enums.PowerMode.MOONLIGHT.value])
def test_set_power_mode1(self):
self.bulb.set_power_mode(enums.PowerMode.MOONLIGHT)
self.assertEqual(self.socket.sent["method"], "set_power")
self.assertEqual(self.socket.sent["params"], ["on", "smooth", 300, enums.PowerMode.MOONLIGHT.value])
def test_set_power_mode2(self):
self.bulb.set_power_mode(enums.PowerMode.NORMAL)
self.assertEqual(self.socket.sent["method"], "set_power")
self.assertEqual(self.socket.sent["params"], ["on", "smooth", 300, enums.PowerMode.NORMAL.value])
def test_set_power_mode3(self):
self.bulb.set_power_mode(enums.PowerMode.LAST)
self.assertEqual(self.socket.sent["method"], "set_power")
self.assertEqual(self.socket.sent["params"], ["on", "smooth", 300])
def test_color_temp1(self):
self.bulb.set_color_temp(1400)
self.assertEqual(self.socket.sent["method"], "set_ct_abx")
self.assertEqual(self.socket.sent["params"], [1700, "smooth", 300])
self.bulb.set_color_temp(1400, duration=3000)
self.assertEqual(self.socket.sent["params"], [1700, "smooth", 3000])
def test_color_temp2(self):
self.bulb.set_color_temp(8400, effect="sudden")
self.assertEqual(self.socket.sent["method"], "set_ct_abx")
self.assertEqual(self.socket.sent["params"], [6500, "sudden", 300])
def test_color_temp_with_model_declared(self):
self.bulb._model = "ceiling2"
self.bulb.set_color_temp(1800)
self.assertEqual(self.socket.sent["method"], "set_ct_abx")
self.assertEqual(self.socket.sent["params"], [2700, "smooth", 300])
def test_start_flow(self):
transitions = [TemperatureTransition(1700, duration=40000), TemperatureTransition(6500, duration=40000)]
flow = Flow(count=1, action=Action.stay, transitions=transitions)
self.bulb.start_flow(flow)
self.assertEqual(self.socket.sent["method"], "start_cf")
self.assertEqual(self.socket.sent["params"], [2, 1, "40000, 2, 1700, 100, 40000, 2, 6500, 100"])
def test_set_scene_color(self):
self.bulb.set_scene(SceneClass.COLOR, 255, 255, 0, 10)
self.assertEqual(self.socket.sent["method"], "set_scene")
self.assertEqual(self.socket.sent["params"], ["color", 16776960, 10])
def test_set_scene_color_ambilight(self):
self.bulb.set_scene(SceneClass.COLOR, 255, 255, 0, 10, light_type=LightType.Ambient)
self.assertEqual(self.socket.sent["method"], "bg_set_scene")
self.assertEqual(self.socket.sent["params"], ["color", 16776960, 10])
def test_set_scene_color_temperature(self):
self.bulb.set_scene(SceneClass.CT, 2000, 15)
self.assertEqual(self.socket.sent["method"], "set_scene")
self.assertEqual(self.socket.sent["params"], ["ct", 2000, 15])
def test_set_scene_hsv(self):
self.bulb.set_scene(SceneClass.HSV, 200, 100, 10)
self.assertEqual(self.socket.sent["method"], "set_scene")
self.assertEqual(self.socket.sent["params"], ["hsv", 200, 100, 10])
def test_set_scene_color_flow(self):
transitions = [TemperatureTransition(1700, duration=40000), TemperatureTransition(6500, duration=40000)]
flow = Flow(count=1, action=Action.stay, transitions=transitions)
self.bulb.set_scene(SceneClass.CF, flow)
self.assertEqual(self.socket.sent["method"], "set_scene")
self.assertEqual(self.socket.sent["params"], ["cf", 2, 1, "40000, 2, 1700, 100, 40000, 2, 6500, 100"])
def test_set_scene_auto_delay_off(self):
self.bulb.set_scene(SceneClass.AUTO_DELAY_OFF, 20, 1)
self.assertEqual(self.socket.sent["method"], "set_scene")
self.assertEqual(self.socket.sent["params"], ["auto_delay_off", 20, 1])
def test_sunrise(self):
flow = flows.sunrise()
self.bulb.set_scene(SceneClass.CF, flow)
self.assertEqual(self.socket.sent["method"], "set_scene")
self.assertEqual(
self.socket.sent["params"], ["cf", 3, 1, "50, 1, 16731392, 1, 360000, 2, 1700, 10, 540000, 2, 2700, 100"]
)
def test_sunset(self):
flow = flows.sunset()
self.bulb.set_scene(SceneClass.CF, flow)
self.assertEqual(self.socket.sent["method"], "set_scene")
self.assertEqual(
self.socket.sent["params"], ["cf", 3, 2, "50, 2, 2700, 10, 180000, 2, 1700, 5, 420000, 1, 16731136, 1"]
)
def test_romance(self):
flow = flows.romance()
self.bulb.set_scene(SceneClass.CF, flow)
self.assertEqual(self.socket.sent["method"], "set_scene")
self.assertEqual(self.socket.sent["params"], ["cf", 0, 1, "4000, 1, 5838189, 1, 4000, 1, 6689834, 1"])
def test_happy_birthday(self):
flow = flows.happy_birthday()
self.bulb.set_scene(SceneClass.CF, flow)
self.assertEqual(self.socket.sent["method"], "set_scene")
self.assertEqual(
self.socket.sent["params"],
["cf", 0, 1, "1996, 1, 14438425, 80, 1996, 1, 14448670, 80, 1996, 1, 11153940, 80"],
)
def test_candle_flicker(self):
flow = flows.candle_flicker()
self.bulb.set_scene(SceneClass.CF, flow)
self.assertEqual(self.socket.sent["method"], "set_scene")
self.assertEqual(
self.socket.sent["params"],
[
"cf",
0,
0,
"800, 2, 2700, 50, 800, 2, 2700, 30, 1200, 2, 2700, 80, 800, 2, 2700, 60, 1200, 2, 2700, 90, 2400, 2, 2700, 50, 1200, 2, 2700, 80, 800, 2, 2700, 60, 400, 2, 2700, 70",
],
)
def test_home(self):
flow = flows.home()
self.bulb.set_scene(SceneClass.CF, flow)
self.assertEqual(self.socket.sent["method"], "set_scene")
self.assertEqual(self.socket.sent["params"], ["cf", 0, 0, "500, 2, 3200, 80"])
def test_night_mode(self):
flow = flows.night_mode()
self.bulb.set_scene(SceneClass.CF, flow)
self.assertEqual(self.socket.sent["method"], "set_scene")
self.assertEqual(self.socket.sent["params"], ["cf", 0, 0, "500, 1, 16750848, 1"])
def test_date_night(self):
flow = flows.date_night()
self.bulb.set_scene(SceneClass.CF, flow)
self.assertEqual(self.socket.sent["method"], "set_scene")
self.assertEqual(self.socket.sent["params"], ["cf", 0, 0, "500, 1, 16737792, 50"])
def test_movie(self):
flow = flows.movie()
self.bulb.set_scene(SceneClass.CF, flow)
self.assertEqual(self.socket.sent["method"], "set_scene")
self.assertEqual(self.socket.sent["params"], ["cf", 0, 0, "500, 1, 1315890, 50"])
def test_notification(self):
notification_event = threading.Event()
listening_stopped_event = threading.Event()
shutdown = False
def _callback(new_properties):
notification_event.set()
def _listen():
self.bulb.listen(_callback)
listening_stopped_event.set()
def _blocking_recv(size):
time.sleep(0.1)
if shutdown:
raise IOError
return b'{"method": "props", "params": {"power": "on"}}'
def _shutdown(type):
shutdown = True # noqa: F841
socket = mock.MagicMock()
type(socket).recv = mock.MagicMock(side_effect=_blocking_recv)
type(socket).shutdown = mock.MagicMock(side_effect=_shutdown)
with mock.patch("yeelight.main.socket.socket", return_value=socket):
assert self.bulb.last_properties == {}
thread = threading.Thread(target=_listen)
thread.start()
assert notification_event.wait(0.5) is True
assert self.bulb.last_properties == {"power": "on"}
self.bulb.stop_listening()
assert listening_stopped_event.wait(0.5) is True
if __name__ == "__main__":
unittest.main()
|
exposition.py
|
from __future__ import unicode_literals
import base64
from contextlib import closing
import os
import socket
import sys
import threading
from wsgiref.simple_server import make_server, WSGIRequestHandler
from .openmetrics import exposition as openmetrics
from .registry import REGISTRY
from .utils import floatToGoString
try:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
from urllib2 import build_opener, Request, HTTPHandler
from urllib import quote_plus
from urlparse import parse_qs, urlparse
except ImportError:
# Python 3
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from urllib.request import build_opener, Request, HTTPHandler
from urllib.parse import quote_plus, parse_qs, urlparse
CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8')
"""Content type of the latest text format"""
PYTHON26_OR_OLDER = sys.version_info < (2, 7)
PYTHON376_OR_NEWER = sys.version_info > (3, 7, 5)
def make_wsgi_app(registry=REGISTRY):
"""Create a WSGI app which serves the metrics from a registry."""
def prometheus_app(environ, start_response):
params = parse_qs(environ.get('QUERY_STRING', ''))
r = registry
encoder, content_type = choose_encoder(environ.get('HTTP_ACCEPT'))
if 'name[]' in params:
r = r.restricted_registry(params['name[]'])
output = encoder(r)
status = str('200 OK')
headers = [(str('Content-type'), content_type)]
start_response(status, headers)
return [output]
return prometheus_app
class _SilentHandler(WSGIRequestHandler):
"""WSGI handler that does not log requests."""
def log_message(self, format, *args):
"""Log nothing."""
def start_wsgi_server(port, addr='', registry=REGISTRY):
"""Starts a WSGI server for prometheus metrics as a daemon thread."""
app = make_wsgi_app(registry)
httpd = make_server(addr, port, app, handler_class=_SilentHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
def generate_latest(registry=REGISTRY):
"""Returns the metrics from the registry in latest text format as a string."""
def sample_line(line):
if line.labels:
labelstr = '{{{0}}}'.format(','.join(
['{0}="{1}"'.format(
k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"'))
for k, v in sorted(line.labels.items())]))
else:
labelstr = ''
timestamp = ''
if line.timestamp is not None:
# Convert to milliseconds.
timestamp = ' {0:d}'.format(int(float(line.timestamp) * 1000))
return '{0}{1} {2}{3}\n'.format(
line.name, labelstr, floatToGoString(line.value), timestamp)
output = []
for metric in registry.collect():
try:
mname = metric.name
mtype = metric.type
# Munging from OpenMetrics into Prometheus format.
if mtype == 'counter':
mname = mname + '_total'
elif mtype == 'info':
mname = mname + '_info'
mtype = 'gauge'
elif mtype == 'stateset':
mtype = 'gauge'
elif mtype == 'gaugehistogram':
# A gauge histogram is really a gauge,
# but this captures the structure better.
mtype = 'histogram'
elif mtype == 'unknown':
mtype = 'untyped'
output.append('# HELP {0} {1}\n'.format(
mname, metric.documentation.replace('\\', r'\\').replace('\n', r'\n')))
output.append('# TYPE {0} {1}\n'.format(mname, mtype))
om_samples = {}
for s in metric.samples:
for suffix in ['_created', '_gsum', '_gcount']:
if s.name == metric.name + suffix:
# OpenMetrics specific sample, put in a gauge at the end.
om_samples.setdefault(suffix, []).append(sample_line(s))
break
else:
output.append(sample_line(s))
except Exception as exception:
exception.args = (exception.args or ('',)) + (metric,)
raise
for suffix, lines in sorted(om_samples.items()):
output.append('# HELP {0}{1} {2}\n'.format(metric.name, suffix,
metric.documentation.replace('\\', r'\\').replace('\n', r'\n')))
output.append('# TYPE {0}{1} gauge\n'.format(metric.name, suffix))
output.extend(lines)
return ''.join(output).encode('utf-8')
def choose_encoder(accept_header):
accept_header = accept_header or ''
for accepted in accept_header.split(','):
if accepted.split(';')[0].strip() == 'application/openmetrics-text':
return (openmetrics.generate_latest,
openmetrics.CONTENT_TYPE_LATEST)
return generate_latest, CONTENT_TYPE_LATEST
class MetricsHandler(BaseHTTPRequestHandler):
"""HTTP handler that gives metrics from ``REGISTRY``."""
registry = REGISTRY
def do_GET(self):
registry = self.registry
params = parse_qs(urlparse(self.path).query)
encoder, content_type = choose_encoder(self.headers.get('Accept'))
if 'name[]' in params:
registry = registry.restricted_registry(params['name[]'])
try:
output = encoder(registry)
except:
self.send_error(500, 'error generating metric output')
raise
self.send_response(200)
self.send_header('Content-Type', content_type)
self.end_headers()
self.wfile.write(output)
def log_message(self, format, *args):
"""Log nothing."""
@classmethod
def factory(cls, registry):
"""Returns a dynamic MetricsHandler class tied
to the passed registry.
"""
# This implementation relies on MetricsHandler.registry
# (defined above and defaulted to REGISTRY).
# As we have unicode_literals, we need to create a str()
# object for type().
cls_name = str(cls.__name__)
MyMetricsHandler = type(cls_name, (cls, object),
{"registry": registry})
return MyMetricsHandler
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
"""Thread per request HTTP server."""
# Make worker threads "fire and forget". Beginning with Python 3.7 this
# prevents a memory leak because ``ThreadingMixIn`` starts to gather all
# non-daemon threads in a list in order to join on them at server close.
# Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the
# same as Python 3.7's ``ThreadingHTTPServer``.
daemon_threads = True
def start_http_server(port, addr='', registry=REGISTRY):
"""Starts an HTTP server for prometheus metrics as a daemon thread"""
CustomMetricsHandler = MetricsHandler.factory(registry)
httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
def write_to_textfile(path, registry):
"""Write metrics to the given path.
This is intended for use with the Node exporter textfile collector.
The path must end in .prom for the textfile collector to process it."""
tmppath = '%s.%s.%s' % (path, os.getpid(), threading.current_thread().ident)
with open(tmppath, 'wb') as f:
f.write(generate_latest(registry))
# rename(2) is atomic.
os.rename(tmppath, path)
def default_handler(url, method, timeout, headers, data):
"""Default handler that implements HTTP/HTTPS connections.
Used by the push_to_gateway functions. Can be re-used by other handlers."""
def handle():
request = Request(url, data=data)
request.get_method = lambda: method
for k, v in headers:
request.add_header(k, v)
resp = build_opener(HTTPHandler).open(request, timeout=timeout)
if resp.code >= 400:
raise IOError("error talking to pushgateway: {0} {1}".format(
resp.code, resp.msg))
return handle
def basic_auth_handler(url, method, timeout, headers, data, username=None, password=None):
"""Handler that implements HTTP/HTTPS connections with Basic Auth.
Sets auth headers using supplied 'username' and 'password', if set.
Used by the push_to_gateway functions. Can be re-used by other handlers."""
def handle():
"""Handler that implements HTTP Basic Auth.
"""
if username is not None and password is not None:
auth_value = '{0}:{1}'.format(username, password).encode('utf-8')
auth_token = base64.b64encode(auth_value)
auth_header = b'Basic ' + auth_token
headers.append(['Authorization', auth_header])
default_handler(url, method, timeout, headers, data)()
return handle
def push_to_gateway(
gateway, job, registry, grouping_key=None, timeout=30,
handler=default_handler):
"""Push metrics to the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`registry` is an instance of CollectorRegistry
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long push will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
If not None, the argument must be a function which accepts
the following arguments:
url, method, timeout, headers, and content
May be used to implement additional functionality not
supported by the built-in default handler (such as SSL
client certicates, and HTTP authentication mechanisms).
'url' is the URL for the request, the 'gateway' argument
described earlier will form the basis of this URL.
'method' is the HTTP method which should be used when
carrying out the request.
'timeout' requests not successfully completed after this
many seconds should be aborted. If timeout is None, then
the handler should not set a timeout.
'headers' is a list of ("header-name","header-value") tuples
which must be passed to the pushgateway in the form of HTTP
request headers.
The function should raise an exception (e.g. IOError) on
failure.
'content' is the data which should be used to form the HTTP
Message Body.
This overwrites all metrics with the same job and grouping_key.
This uses the PUT HTTP method."""
_use_gateway('PUT', gateway, job, registry, grouping_key, timeout, handler)
def pushadd_to_gateway(
gateway, job, registry, grouping_key=None, timeout=30,
handler=default_handler):
"""PushAdd metrics to the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`registry` is an instance of CollectorRegistry
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long push will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
See the 'prometheus_client.push_to_gateway' documentation
for implementation requirements.
This replaces metrics with the same name, job and grouping_key.
This uses the POST HTTP method."""
_use_gateway('POST', gateway, job, registry, grouping_key, timeout, handler)
def delete_from_gateway(
gateway, job, grouping_key=None, timeout=30, handler=default_handler):
"""Delete metrics from the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long delete will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
See the 'prometheus_client.push_to_gateway' documentation
for implementation requirements.
This deletes metrics with the given job and grouping_key.
This uses the DELETE HTTP method."""
_use_gateway('DELETE', gateway, job, None, grouping_key, timeout, handler)
def _use_gateway(method, gateway, job, registry, grouping_key, timeout, handler):
gateway_url = urlparse(gateway)
# See https://bugs.python.org/issue27657 for details on urlparse in py>=3.7.6.
if not gateway_url.scheme or (
(PYTHON376_OR_NEWER or PYTHON26_OR_OLDER)
and gateway_url.scheme not in ['http', 'https']
):
gateway = 'http://{0}'.format(gateway)
url = '{0}/metrics/{1}/{2}'.format(gateway, *_escape_grouping_key("job", job))
data = b''
if method != 'DELETE':
data = generate_latest(registry)
if grouping_key is None:
grouping_key = {}
url += ''.join(
'/{0}/{1}'.format(*_escape_grouping_key(str(k), str(v)))
for k, v in sorted(grouping_key.items()))
handler(
url=url, method=method, timeout=timeout,
headers=[('Content-Type', CONTENT_TYPE_LATEST)], data=data,
)()
def _escape_grouping_key(k, v):
if '/' in v:
# Added in Pushgateway 0.9.0.
return k + "@base64", base64.urlsafe_b64encode(v.encode("utf-8")).decode("utf-8")
else:
return k, quote_plus(v)
def instance_ip_grouping_key():
"""Grouping key with instance set to the IP Address of this host."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as s:
s.connect(('localhost', 0))
return {'instance': s.getsockname()[0]}
|
vnoanda.py
|
# encoding: utf-8
import logging
# from vtFunction import AppLoger
# apploger = AppLoger()
# apploger.set_log_level(logging.INFO)
# # apiLog = apploger.get_logger()
import json
import requests
from Queue import Queue, Empty
from threading import Thread
API_SETTING = {}
API_SETTING['practice'] = {'rest': 'https://api-fxpractice.oanda.com',
'stream': 'https://stream-fxpractice.oanda.com'}
API_SETTING['trade'] = {'rest': 'https://api-fxtrade.oanda.com',
'stream': 'https://stream-fxtrade.oanda.com/'}
FUNCTIONCODE_GETINSTRUMENTS = 0
FUNCTIONCODE_GETPRICES = 1
FUNCTIONCODE_GETPRICEHISTORY = 2
FUNCTIONCODE_GETACCOUNTS = 3
FUNCTIONCODE_GETACCOUNTINFO = 4
FUNCTIONCODE_GETORDERS = 5
FUNCTIONCODE_SENDORDER = 6
FUNCTIONCODE_GETORDERINFO = 7
FUNCTIONCODE_MODIFYORDER = 8
FUNCTIONCODE_CANCELORDER = 9
FUNCTIONCODE_GETTRADES = 10
FUNCTIONCODE_GETTRADEINFO = 11
FUNCTIONCODE_MODIFYTRADE = 12
FUNCTIONCODE_CLOSETRADE = 13
FUNCTIONCODE_GETPOSITIONS = 14
FUNCTIONCODE_GETPOSITIONINFO = 15
FUNCTIONCODE_CLOSEPOSITION = 16
FUNCTIONCODE_GETTRANSACTIONS = 17
FUNCTIONCODE_GETTRANSACTIONINFO = 18
FUNCTIONCODE_GETACCOUNTHISTORY = 19
FUNCTIONCODE_GETCALENDAR = 20
FUNCTIONCODE_GETPOSITIONRATIOS = 21
FUNCTIONCODE_GETSPREADS = 22
FUNCTIONCODE_GETCOMMIMENTS = 23
FUNCTIONCODE_GETORDERBOOK = 24
FUNCTIONCODE_GETAUTOCHARTIST = 25
FUNCTIONCODE_STREAMPRICES = 26
FUNCTIONCODE_STREAMEVENTS = 27
########################################################################
class OandaApi(object):
""""""
DEBUG = False
# ----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.token = ''
self.accountId = ''
self.headers = {}
self.restDomain = ''
self.streamDomain = ''
self.session = None
self.functionSetting = {}
self.active = False # API的工作状态
self.reqID = 0 # 请求编号
self.reqQueue = Queue() # 请求队列
self.reqThread = Thread(target=self.processQueue) # 请求处理线程
self.streamPricesThread = Thread(target=self.processStreamPrices) # 实时行情线程
self.streamEventsThread = Thread(target=self.processStreamEvents) # 实时事件线程(成交等)
# ----------------------------------------------------------------------
def init(self, settingName, token, accountId):
"""初始化接口"""
self.restDomain = API_SETTING[settingName]['rest']
self.streamDomain = API_SETTING[settingName]['stream']
self.session = requests.Session()
self.token = token
self.accountId = accountId
self.headers['Authorization'] = 'Bearer ' + self.token
self.initFunctionSetting(FUNCTIONCODE_GETINSTRUMENTS, {'path': '/v1/instruments',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETPRICES, {'path': '/v1/prices',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETPRICEHISTORY, {'path': 'v1/candles',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETACCOUNTS, {'path': '/v1/accounts',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETACCOUNTINFO, {'path': '/v1/accounts/%s' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETORDERS, {'path': '/v1/accounts/%s/orders' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_SENDORDER, {'path': '/v1/accounts/%s/orders' % self.accountId,
'method': 'POST'})
self.initFunctionSetting(FUNCTIONCODE_GETORDERINFO, {'path': '/v1/accounts/%s/orders' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_MODIFYORDER, {'path': '/v1/accounts/%s/orders' % self.accountId,
'method': 'PATCH'})
self.initFunctionSetting(FUNCTIONCODE_CANCELORDER, {'path': '/v1/accounts/%s/orders' % self.accountId,
'method': 'DELETE'})
self.initFunctionSetting(FUNCTIONCODE_GETTRADES, {'path': '/v1/accounts/%s/trades' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETTRADEINFO, {'path': '/v1/accounts/%s/trades' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_MODIFYTRADE, {'path': '/v1/accounts/%s/trades' % self.accountId,
'method': 'PATCH'})
self.initFunctionSetting(FUNCTIONCODE_CLOSETRADE, {'path': '/v1/accounts/%s/trades' % self.accountId,
'method': 'PUT'})
self.initFunctionSetting(FUNCTIONCODE_GETPOSITIONS, {'path': '/v1/accounts/%s/positions' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETPOSITIONINFO, {'path': '/v1/accounts/%s/positions' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_CLOSEPOSITION, {'path': '/v1/accounts/%s/positions' % self.accountId,
'method': 'DELETE'})
self.initFunctionSetting(FUNCTIONCODE_GETTRANSACTIONS, {'path': '/v1/accounts/%s/transactions' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETTRANSACTIONINFO,
{'path': '/v1/accounts/%s/transactions' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETACCOUNTHISTORY,
{'path': '/v1/accounts/%s/alltransactions' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETCALENDAR, {'path': '/labs/v1/calendar',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETPOSITIONRATIOS, {'path': '/labs/v1/historical_position_ratios',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETSPREADS, {'path': '/labs/v1/spreads',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETCOMMIMENTS, {'path': '/labs/v1/commitments',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETORDERBOOK, {'path': '/labs/v1/orderbook_data',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETAUTOCHARTIST, {'path': '/labs/v1/autochartist',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETAUTOCHARTIST, {'path': '/labs/v1/autochartist',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_STREAMPRICES, {'path': '/v1/prices',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_STREAMEVENTS, {'path': '/v1/events',
'method': 'GET'})
self.active = True
self.reqThread.start()
self.streamEventsThread.start()
self.streamPricesThread.start()
# ----------------------------------------------------------------------
def exit(self):
"""退出接口"""
if self.active:
self.active = False
self.reqThread.join()
# ----------------------------------------------------------------------
def initFunctionSetting(self, code, setting):
"""初始化API功能字典"""
self.functionSetting[code] = setting
# ----------------------------------------------------------------------
def processRequest(self, req):
"""发送请求并通过回调函数推送数据结果"""
url = req['url']
method = req['method']
params = req['params']
stream = False
if 'stream' in req:
stream = req['stream']
if method in ['GET', 'DELETE']:
myreq = requests.Request(method, url, headers=self.headers, params=params)
elif method in ['POST', 'PATCH']:
myreq = requests.Request(method, url, headers=self.headers, data=params)
pre = myreq.prepare()
r = None
error = None
try:
r = self.session.send(pre, stream=stream)
except Exception, e:
error = e
return r, error
# ----------------------------------------------------------------------
def processQueue(self):
"""处理请求队列中的请求"""
while self.active:
try:
req = self.reqQueue.get(block=True, timeout=1) # 获取请求的阻塞为一秒
callback = req['callback']
reqID = req['reqID']
r, error = self.processRequest(req)
if r:
try:
data = r.json()
# if self.DEBUG:
# print callback.__name__
callback(data, reqID)
except Exception, e:
self.onError(str(e), reqID)
else:
self.onError(error, reqID)
except Empty:
pass
# ----------------------------------------------------------------------
def sendRequest(self, code, params, callback, optional=''):
"""发送请求"""
setting = self.functionSetting[code]
url = self.restDomain + setting['path']
if optional:
url = url + '/' + optional
self.reqID += 1
req = {'url': url,
'method': setting['method'],
'params': params,
'callback': callback,
'reqID': self.reqID}
self.reqQueue.put(req)
# apiLog.info("url:%s" % url)
return self.reqID
# ----------------------------------------------------------------------
def onError(self, error, reqID):
"""错误信息回调"""
print error, reqID
# ----------------------------------------------------------------------
def getInstruments(self, params):
"""查询可交易的合约列表"""
return self.sendRequest(FUNCTIONCODE_GETINSTRUMENTS, params, self.onGetInstruments)
# ----------------------------------------------------------------------
def onGetInstruments(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getPrices(self, params):
"""查询价格"""
return self.sendRequest(FUNCTIONCODE_GETPRICES, params, self.onGetPrices)
# ----------------------------------------------------------------------
def onGetPrices(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getPriceHisory(self, params):
"""查询历史价格数据"""
return self.sendRequest(FUNCTIONCODE_GETPRICEHISTORY, params, self.onGetPriceHistory)
# ----------------------------------------------------------------------
def onGetPriceHistory(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getAccounts(self):
"""查询用户的所有账户"""
return self.sendRequest(FUNCTIONCODE_GETACCOUNTS, {}, self.onGetAccounts)
# ----------------------------------------------------------------------
def onGetAccounts(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getAccountInfo(self):
"""查询账户数据"""
return self.sendRequest(FUNCTIONCODE_GETACCOUNTINFO, {}, self.onGetAccountInfo)
# ----------------------------------------------------------------------
def onGetAccountInfo(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getOrders(self, params):
"""查询所有委托"""
return self.sendRequest(FUNCTIONCODE_GETORDERS, params, self.onGetOrders)
# ----------------------------------------------------------------------
def onGetOrders(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def sendOrder(self, params):
"""发送委托"""
return self.sendRequest(FUNCTIONCODE_SENDORDER, params, self.onSendOrder)
# ----------------------------------------------------------------------
def onSendOrder(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getOrderInfo(self, optional):
"""查询委托信息"""
return self.sendRequest(FUNCTIONCODE_GETORDERINFO, {}, self.onGetOrderInfo, optional)
# ----------------------------------------------------------------------
def onGetOrderInfo(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def modifyOrder(self, params, optional):
"""修改委托"""
return self.sendRequest(FUNCTIONCODE_MODIFYORDER, params, self.onModifyOrder, optional)
# ----------------------------------------------------------------------
def onModifyOrder(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def cancelOrder(self, optional):
"""查询委托信息"""
return self.sendRequest(FUNCTIONCODE_CANCELORDER, {}, self.onCancelOrder, optional)
# ----------------------------------------------------------------------
def onCancelOrder(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getTrades(self, params):
"""查询所有仓位"""
return self.sendRequest(FUNCTIONCODE_GETTRADES, params, self.onGetTrades)
# ----------------------------------------------------------------------
def onGetTrades(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getTradeInfo(self, optional):
"""查询仓位信息"""
return self.sendRequest(FUNCTIONCODE_GETTRADEINFO, {}, self.onGetTradeInfo, optional)
# ----------------------------------------------------------------------
def onGetTradeInfo(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def modifyTrade(self, params, optional):
"""修改仓位"""
return self.sendRequest(FUNCTIONCODE_MODIFYTRADE, params, self.onModifyTrade, optional)
# ----------------------------------------------------------------------
def onModifyTrade(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def closeTrade(self, optional):
"""平仓"""
return self.sendRequest(FUNCTIONCODE_CLOSETRADE, {}, self.onCloseTrade, optional)
# ----------------------------------------------------------------------
def onCloseTrade(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getPositions(self):
"""查询所有汇总仓位"""
return self.sendRequest(FUNCTIONCODE_GETPOSITIONS, {}, self.onGetPositions)
# ----------------------------------------------------------------------
def onGetPositions(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getPositionInfo(self, optional):
"""查询汇总仓位信息"""
return self.sendRequest(FUNCTIONCODE_GETPOSITIONINFO, {}, self.onGetPositionInfo, optional)
# ----------------------------------------------------------------------
def onGetPositionInfo(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def closePosition(self, optional):
"""平仓汇总仓位信息"""
return self.sendRequest(FUNCTIONCODE_CLOSEPOSITION, {}, self.onClosePosition, optional)
# ----------------------------------------------------------------------
def onClosePosition(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getTransactions(self, params):
"""查询所有资金变动"""
return self.sendRequest(FUNCTIONCODE_GETTRANSACTIONS, params, self.onGetTransactions)
# ----------------------------------------------------------------------
def onGetTransactions(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getTransactionInfo(self, optional):
"""查询资金变动信息"""
return self.sendRequest(FUNCTIONCODE_GETTRANSACTIONINFO, {}, self.onGetTransactionInfo, optional)
# ----------------------------------------------------------------------
def onGetTransactionInfo(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getAccountHistory(self):
"""查询账户资金变动历史"""
return self.sendRequest(FUNCTIONCODE_GETACCOUNTHISTORY, {}, self.onGetAccountHistory)
# ----------------------------------------------------------------------
def onGetAccountHistory(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getCalendar(self, params):
"""查询日历"""
return self.sendRequest(FUNCTIONCODE_GETCALENDAR, params, self.onGetCalendar)
# ----------------------------------------------------------------------
def onGetCalendar(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getPositionRatios(self, params):
"""查询持仓比例"""
return self.sendRequest(FUNCTIONCODE_GETPOSITIONRATIOS, params, self.onGetPositionRatios)
# ----------------------------------------------------------------------
def onGetPositionRatios(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getSpreads(self, params):
"""查询所有仓位"""
return self.sendRequest(FUNCTIONCODE_GETSPREADS, params, self.onGetSpreads)
# ----------------------------------------------------------------------
def onGetSpreads(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getCommitments(self, params):
"""查询交易商持仓情况"""
return self.sendRequest(FUNCTIONCODE_GETCOMMIMENTS, params, self.onGetCommitments)
# ----------------------------------------------------------------------
def onGetCommitments(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getOrderbook(self, params):
"""查询订单簿"""
return self.sendRequest(FUNCTIONCODE_GETORDERBOOK, params, self.onGetOrderbook)
# ----------------------------------------------------------------------
def onGetOrderbook(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def getAutochartist(self, params):
"""查询Autochartist识别的模式"""
return self.sendRequest(FUNCTIONCODE_GETAUTOCHARTIST, params, self.onGetAutochartist)
# ----------------------------------------------------------------------
def onGetAutochartist(self, data, reqID):
"""回调函数"""
pass
# ----------------------------------------------------------------------
def onPrice(self, data):
"""行情推送"""
print '------------父类---------------\n',data
# ----------------------------------------------------------------------
def onEvent(self, data):
"""事件推送(成交等)"""
print data
# ----------------------------------------------------------------------
def processStreamPrices(self):
"""获取价格推送"""
# 首先获取所有合约的代码
setting = self.functionSetting[FUNCTIONCODE_GETINSTRUMENTS]
req = {'url': self.restDomain + setting['path'],
'method': setting['method'],
'params': {'accountId': self.accountId}}
r, error = self.processRequest(req)
if r:
try:
data = r.json()
symbols = [d['instrument'] for d in data['instruments']]
except Exception, e:
self.onError(e, -1)
return
else:
self.onError(error, -1)
return
# 然后订阅所有的合约行情
setting = self.functionSetting[FUNCTIONCODE_STREAMPRICES]
params = {'accountId': self.accountId,
'instruments': ','.join(symbols)}
req = {'url': self.streamDomain + setting['path'],
'method': setting['method'],
'params': params,
'stream': True}
r, error = self.processRequest(req)
if r:
for line in r.iter_lines():
if line:
try:
msg = json.loads(line)
# if self.DEBUG:
# print self.onPrice.__name__
self.onPrice(msg)
except Exception, e:
self.onError(e, -1)
if not self.active:
break
else:
self.onError(error, -1)
# ----------------------------------------------------------------------
def processStreamEvents(self):
"""获取事件推送"""
setting = self.functionSetting[FUNCTIONCODE_STREAMEVENTS]
req = {'url': self.streamDomain + setting['path'],
'method': setting['method'],
'params': {},
'stream': True}
r, error = self.processRequest(req)
if r:
for line in r.iter_lines():
if line:
try:
msg = json.loads(line)
# if self.DEBUG:
# print self.onEvent.__name__
# apiLog.info("get Steam event %s" % msg)
self.onEvent(msg)
except Exception, e:
# apiLog.info("get Steam error %s" % e)
self.onError(e, -1)
if not self.active:
break
else:
self.onError(error, -1)
########################################################################
class OandaApiV3(object):
""""""
DEBUG = False
# ----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.token = ''
self.accountId = ''
self.headers = {}
self.restDomain = ''
self.streamDomain = ''
self.session = None
# self.orderInstrument = ['USD_JPY']
self.orderInstrument = []
self.functionSetting = {}
self.active = False # API的工作状态
self.reqID = 0 # 请求编号
self.reqQueue = Queue() # 请求队列
self.reqThread = Thread(target=self.processQueue) # 请求处理线程
self.streamPricesThread = Thread(target=self.processStreamPrices) # 实时行情线程
self.streamEventsThread = Thread(target=self.processStreamEvents) # 实时事件线程(成交等)
# ----------------------------------------------------------------------
def init(self, settingName, token, accountId):
"""初始化接口"""
self.restDomain = API_SETTING[settingName]['rest']
self.streamDomain = API_SETTING[settingName]['stream']
self.session = requests.Session()
self.token = token
self.accountId = accountId
self.headers['Authorization'] = 'Bearer ' + self.token
self.headers['Content-Type'] = 'application/json'
self.initFunctionSetting(FUNCTIONCODE_GETINSTRUMENTS, {'path': '/v3/accounts/%s/instruments' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETPRICES, {'path': '/v3/accounts/%s/pricing' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETPRICEHISTORY, {'path': '/v3/candles',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETACCOUNTS, {'path': '/v3/accounts',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETACCOUNTINFO, {'path': '/v3/accounts/%s/summary' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETORDERS, {'path': '/v3/accounts/%s/orders' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_SENDORDER, {'path': '/v3/accounts/%s/orders' % self.accountId,
'method': 'POST'})
self.initFunctionSetting(FUNCTIONCODE_GETORDERINFO, {'path': '/v3/accounts/%s/orders' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_MODIFYORDER, {'path': '/v3/accounts/%s/orders' % self.accountId,
'method': 'PATCH'})
self.initFunctionSetting(FUNCTIONCODE_CANCELORDER, {'path': '/v3/accounts/%s/orders' % self.accountId,
'method': 'DELETE'})
self.initFunctionSetting(FUNCTIONCODE_GETTRADES, {'path': '/v3/accounts/%s/trades' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETTRADEINFO, {'path': '/v3/accounts/%s/trades' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_MODIFYTRADE, {'path': '/v3/accounts/%s/trades' % self.accountId,
'method': 'PATCH'})
self.initFunctionSetting(FUNCTIONCODE_CLOSETRADE, {'path': '/v3/accounts/%s/trades' % self.accountId,
'method': 'PUT'})
self.initFunctionSetting(FUNCTIONCODE_GETPOSITIONS, {'path': '/v3/accounts/%s/openPositions' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETPOSITIONINFO, {'path': '/v3/accounts/%s/positions' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_CLOSEPOSITION, {'path': '/v3/accounts/%s/positions' % self.accountId,
'method': 'DELETE'})
self.initFunctionSetting(FUNCTIONCODE_GETTRANSACTIONS, {'path': '/v3/accounts/%s/transactions' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETTRANSACTIONINFO,
{'path': '/v3/accounts/%s/transactions' % self.accountId,
'method': 'GET'})
# v20,v1 rest都没有历史账户的功能
self.initFunctionSetting(FUNCTIONCODE_GETACCOUNTHISTORY,
{'path': '/v3/accounts/%s/alltransactions' % self.accountId,
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETCALENDAR, {'path': '/labs/v1/calendar',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETPOSITIONRATIOS, {'path': '/labs/v3/historical_position_ratios',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETSPREADS, {'path': '/labs/v3/spreads',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETCOMMIMENTS, {'path': '/labs/v3/commitments',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETORDERBOOK, {'path': '/labs/v3/orderbook_data',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETAUTOCHARTIST, {'path': '/labs/v3/autochartist',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_GETAUTOCHARTIST, {'path': '/labs/v3/autochartist',
'method': 'GET'})
self.initFunctionSetting(FUNCTIONCODE_STREAMPRICES, {'path': '/v3/accounts/%s/pricing/stream' % self.accountId,
'method': 'GET'})
# v20 transaction stream
self.initFunctionSetting(FUNCTIONCODE_STREAMEVENTS,
{'path': '/v3/accounts/%s/transactions/stream' % self.accountId,
'method': 'GET'})
self.active = True
self.reqThread.start()
self.streamEventsThread.start()
self.streamPricesThread.start()
# ----------------------------------------------------------------------
def exit(self):
"""退出接口"""
if self.active:
self.active = False
self.reqThread.join()
# ----------------------------------------------------------------------
def initFunctionSetting(self, code, setting):
"""初始化API功能字典"""
self.functionSetting[code] = setting
# ----------------------------------------------------------------------
def processRequest(self, req):
"""发送请求并通过回调函数推送数据结果"""
url = req['url']
method = req['method']
params = req['params']
# print self.headers
stream = False
if 'stream' in req:
stream = req['stream']
if method in ['GET', 'DELETE']:
myreq = requests.Request(method, url, headers=self.headers, params=params)
elif method in ['POST', 'PUT']:
myreq = requests.Request(method, url, headers=self.headers, json=params)
# print 'params:'
# print params
pre = myreq.prepare()
# # apiLog.info("method:%s, head:%s, will send url:%s, params:%s" % (method, pre.headers, pre.url, params))
r = None
error = None
try:
# print '----------------pre.body--------------\n',pre.body
# print json.dumps(vars(pre))
r = self.session.send(pre, stream=stream)
# print 'rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr:\n',r.content
# print dir(r)
except Exception, e:
error = e
# print 'eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee'
# print error
# if r != None:
# # apiLog.info("response is %s, error is %s" % (r.json(), error))
return r, error
# ----------------------------------------------------------------------
def processQueue(self):
"""处理请求队列中的请求"""
while self.active:
try:
req = self.reqQueue.get(block=True, timeout=1) # 获取请求的阻塞为一秒
callback = req['callback']
reqID = req['reqID']
# oanda返回消息
r, error = self.processRequest(req)
# apiLog.info("callback is %s, reqID:%d" % (callback.__name__, reqID))
if r:
try:
# 提取返回消息的body
data = r.json()
# if self.DEBUG:
# print callback.__name__
callback(data, reqID)
except Exception, e:
self.onError(str(e), reqID)
# apiLog.error("callback %s exception %s" % (callback.__name__, str(e)))
else:
self.onError(error, reqID)
except Empty:
pass
# ----------------------------------------------------------------------
def sendRequest(self, code, params, callback, optional=''):
"""发送请求"""
setting = self.functionSetting[code]
url = self.restDomain + setting['path']
if optional:
url = url + '/' + optional
self.reqID += 1
req = {'url': url,
'method': setting['method'],
'params': params,
'callback': callback,
'reqID': self.reqID}
self.reqQueue.put(req)
# print '------------callback-----------\n',callback
# print("send url:%s, method:%s, params:%s, reqID:%d" % (url, setting['method'], params, self.reqID))
return self.reqID
# ----------------------------------------------------------------------
def onError(self, error, reqID):
"""错误信息回调"""
print error, reqID
# ----------------------------------------------------------------------
def getInstruments(self, params):
"""查询可交易的合约列表"""
return self.sendRequest(FUNCTIONCODE_GETINSTRUMENTS, params, self.onGetInstruments)
# ----------------------------------------------------------------------
def onGetInstruments(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getPrices(self, params):
"""查询价格"""
# return self.sendRequest(FUNCTIONCODE_GETPRICES, params, self.onGetPrices)
# print '+'*30, 'self', '+'*30,'\n',self
return self.sendRequest(FUNCTIONCODE_GETPRICES, params, self.onPrice)
# ----------------------------------------------------------------------
def onGetPrices(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getPriceHisory(self, params):
"""查询历史价格数据"""
return self.sendRequest(FUNCTIONCODE_GETPRICEHISTORY, params, self.onGetPriceHistory)
# ----------------------------------------------------------------------
def onGetPriceHistory(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getAccounts(self):
"""查询用户的所有账户"""
return self.sendRequest(FUNCTIONCODE_GETACCOUNTS, {}, self.onGetAccounts)
# ----------------------------------------------------------------------
def onGetAccounts(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getAccountInfo(self):
"""查询账户数据"""
return self.sendRequest(FUNCTIONCODE_GETACCOUNTINFO, {}, self.onGetAccountInfo)
# ----------------------------------------------------------------------
def onGetAccountInfo(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getOrders(self, params):
"""查询所有委托"""
return self.sendRequest(FUNCTIONCODE_GETORDERS, params, self.onGetOrders)
# ----------------------------------------------------------------------
def onGetOrders(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def sendOrder(self, params):
"""发送委托"""
return self.sendRequest(FUNCTIONCODE_SENDORDER, params, self.onSendOrder)
# ----------------------------------------------------------------------
def onSendOrder(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getOrderInfo(self, optional):
"""查询委托信息"""
return self.sendRequest(FUNCTIONCODE_GETORDERINFO, {}, self.onGetOrderInfo, optional)
# ----------------------------------------------------------------------
def onGetOrderInfo(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def modifyOrder(self, params, optional):
"""修改委托"""
return self.sendRequest(FUNCTIONCODE_MODIFYORDER, params, self.onModifyOrder, optional)
# ----------------------------------------------------------------------
def onModifyOrder(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def cancelOrder(self, optional):
"""查询委托信息"""
return self.sendRequest(FUNCTIONCODE_CANCELORDER, {}, self.onCancelOrder, optional)
# ----------------------------------------------------------------------
def onCancelOrder(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getTrades(self, params):
"""查询所有仓位"""
return self.sendRequest(FUNCTIONCODE_GETTRADES, params, self.onGetTrades)
# ----------------------------------------------------------------------
def onGetTrades(self, data, reqID):
"""回调函数"""
# print int(data['trades'][0]['currentUnits'])
print data,'\nreqID:',reqID
# ----------------------------------------------------------------------
def getTradeInfo(self, optional):
"""查询仓位信息"""
return self.sendRequest(FUNCTIONCODE_GETTRADEINFO, {}, self.onGetTradeInfo, optional)
# ----------------------------------------------------------------------
def onGetTradeInfo(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def modifyTrade(self, params, optional):
"""修改仓位"""
return self.sendRequest(FUNCTIONCODE_MODIFYTRADE, params, self.onModifyTrade, optional)
# ----------------------------------------------------------------------
def onModifyTrade(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def closeTrade(self, params, optional):
"""平仓"""
return self.sendRequest(FUNCTIONCODE_CLOSETRADE, params, self.onCloseTrade, optional)
# ----------------------------------------------------------------------
def onCloseTrade(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getPositions(self):
"""查询所有汇总仓位"""
return self.sendRequest(FUNCTIONCODE_GETPOSITIONS, {}, self.onGetPositions)
# ----------------------------------------------------------------------
def onGetPositions(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getPositionInfo(self, optional):
"""查询汇总仓位信息"""
return self.sendRequest(FUNCTIONCODE_GETPOSITIONINFO, {}, self.onGetPositionInfo, optional)
# ----------------------------------------------------------------------
def onGetPositionInfo(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def closePosition(self, optional):
"""平仓汇总仓位信息"""
return self.sendRequest(FUNCTIONCODE_CLOSEPOSITION, {}, self.onClosePosition, optional)
# ----------------------------------------------------------------------
def onClosePosition(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getTransactions(self, params):
"""查询所有资金变动"""
return self.sendRequest(FUNCTIONCODE_GETTRANSACTIONS, params, self.onGetTransactions)
# ----------------------------------------------------------------------
def onGetTransactions(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getTransactionInfo(self, optional):
# print ("enter getTransactionInfo %s" % optional)
"""查询资金变动信息"""
return self.sendRequest(FUNCTIONCODE_GETTRANSACTIONINFO, {}, self.onGetTransactionInfo, optional)
# ----------------------------------------------------------------------
def onGetTransactionInfo(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getAccountHistory(self):
"""查询账户资金变动历史"""
return self.sendRequest(FUNCTIONCODE_GETACCOUNTHISTORY, {}, self.onGetAccountHistory)
# ----------------------------------------------------------------------
def onGetAccountHistory(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getCalendar(self, params):
"""查询日历"""
return self.sendRequest(FUNCTIONCODE_GETCALENDAR, params, self.onGetCalendar)
# ----------------------------------------------------------------------
def onGetCalendar(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getPositionRatios(self, params):
"""查询持仓比例"""
return self.sendRequest(FUNCTIONCODE_GETPOSITIONRATIOS, params, self.onGetPositionRatios)
# ----------------------------------------------------------------------
def onGetPositionRatios(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getSpreads(self, params):
"""查询所有仓位"""
return self.sendRequest(FUNCTIONCODE_GETSPREADS, params, self.onGetSpreads)
# ----------------------------------------------------------------------
def onGetSpreads(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getCommitments(self, params):
"""查询交易商持仓情况"""
return self.sendRequest(FUNCTIONCODE_GETCOMMIMENTS, params, self.onGetCommitments)
# ----------------------------------------------------------------------
def onGetCommitments(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getOrderbook(self, params):
"""查询订单簿"""
return self.sendRequest(FUNCTIONCODE_GETORDERBOOK, params, self.onGetOrderbook)
# ----------------------------------------------------------------------
def onGetOrderbook(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def getAutochartist(self, params):
"""查询Autochartist识别的模式"""
return self.sendRequest(FUNCTIONCODE_GETAUTOCHARTIST, params, self.onGetAutochartist)
# ----------------------------------------------------------------------
def onGetAutochartist(self, data, reqID):
"""回调函数"""
print(data, reqID)
# ----------------------------------------------------------------------
def onPrice(self, data, reqID):
"""行情推送"""
print data, reqID
# ----------------------------------------------------------------------
def onEvent(self, data):
"""事件推送(成交等)"""
print data
# ----------------------------------------------------------------------
def processStreamPrices(self):
"""获取价格推送"""
# 首先获取所有合约的代码
setting = self.functionSetting[FUNCTIONCODE_GETORDERS]
req = {'url': self.restDomain + setting['path'],
'method': setting['method'],
'params': {}}
r, error = self.processRequest(req)
if r:
try:
data = r.json()
# v20 api 没有instrument字段
# symbols = [d['name'] for d in data['instruments']]
l = data['orders']
for d in l:
if not 'instrument' in d and not d['instrument'] in self.orderInstrument:
self.orderInstrument.append(d['instrument'])
# apiLog.info("get instrument name %s" % self.orderInstrument)
except Exception, e:
self.onError(e, -1)
# apiLog.error("get instruments error %s" % e)
return
else:
self.onError(error, -1)
return
# 然后订阅所有的合约行情
setting = self.functionSetting[FUNCTIONCODE_STREAMPRICES]
# params = {'accountId': self.accountId,
# 'instruments': ','.join(self.orderInstrument)}
params = {'instruments': ','.join(self.orderInstrument)}
# params = {'instruments': self.orderInstrument[0]}
req = {'url': self.streamDomain + setting['path'],
'method': setting['method'],
'params': params,
'stream': True}
# r, error = self.processRequest(req)
myreq = requests.Request(req['method'], req['url'], headers=self.headers, params=req['params'])
pre = myreq.prepare()
s = requests.session()
r = s.send(pre, stream=True)
if r:
for line in r.iter_lines():
if line:
try:
msg = json.loads(line)
# if self.DEBUG:
# print self.onPrice.__name__
# apiLog.info("get price %s" % msg)
self.onPrice(msg)
except Exception, e:
self.onError(e, -1)
if not self.active:
break
else:
self.onError(error, -1)
# ----------------------------------------------------------------------
def processStreamEvents(self):
"""获取事件推送"""
setting = self.functionSetting[FUNCTIONCODE_STREAMEVENTS]
req = {'url': self.streamDomain + setting['path'],
'method': setting['method'],
'params': {},
'stream': True}
r, error = self.processRequest(req)
myreq = requests.Request(req['method'], req['url'], headers=self.headers, params=req['params'])
pre = myreq.prepare()
s = requests.session()
r = s.send(pre, stream=True)
if r:
for line in r.iter_lines():
if line:
try:
msg = json.loads(line)
# if self.DEBUG:
# print self.onEvent.__name__
self.onEvent(msg)
except Exception, e:
self.onError(e, -1)
if not self.active:
break
else:
self.onError(error, -1)
|
keyboard.py
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# OpneWinchPy : a library for controlling the Raspberry Pi's Winch
# Copyright (c) 2020 Mickael Gaillard <mick.gaillard@gmail.com>
from openwinch.input import InputType
import threading
import click
class Keyboard(object):
__winch = None
__lcd = None
__controlLoop = None
def __init__(self, winch, lcd):
self.__winch = winch
self.__lcd = lcd
self.__controlLoop = threading.Thread(target=self.__runControlLoop, name="Kbd", args=(), daemon=True)
self.__controlLoop.start()
def __runControlLoop(self):
while(True):
input = self.get()
self.__lcd.enter(input)
def get(self):
k = click.getchar()
if k == '\x1b[A':
return InputType.UP
elif k == '\x1b[B':
return InputType.DOWN
elif k == '\x1b[C':
return InputType.RIGHT
elif k == '\x1b[D':
return InputType.LEFT
else:
print("not an arrow key!\n")
|
rtsp_webserver.py
|
"""
# TODO: Load ML model with redis and keep it for sometime.
1- detector/yolov3/detector.py |=> yolov3 weightfile -> redis cache
2- deepsort/deep/feature_extractor |=> model_path -> redis cache
3- Use tmpfs (Insert RAM as a virtual disk and store model state): https://pypi.org/project/memory-tempfile/
"""
import argparse
from os import environ, getenv
from os.path import join
from threading import Thread
from flask import Flask, Response, abort, jsonify, request
from config.config import DevelopmentConfig
from dotenv import load_dotenv
from redis import Redis
from rtsp_threaded_tracker import RealTimeTracking
from server_cfg import deep_sort_dict, model
from utils.parser import get_config
redis_cache = Redis('127.0.0.1')
app = Flask(__name__)
environ['in_progress'] = 'off'
def parse_args():
"""
Parses the arguments
Returns:
argparse Namespace
"""
assert 'project_root' in environ.keys()
project_root = getenv('project_root')
parser = argparse.ArgumentParser()
parser.add_argument("--input",
type=str,
default=getenv('camera_stream'))
parser.add_argument("--model",
type=str,
default=join(project_root,
getenv('model_type')))
parser.add_argument("--cpu",
dest="use_cuda",
action="store_false", default=True)
args = parser.parse_args()
return args
def gen():
"""
Returns: video frames from redis cache
"""
while True:
frame = redis_cache.get('frame')
if frame is not None:
yield b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n'
def pedestrian_tracking(cfg, args):
"""
starts the pedestrian detection on rtsp link
Args:
cfg:
args:
Returns:
"""
tracker = RealTimeTracking(cfg, args)
tracker.run()
def trigger_process(cfg, args):
"""
triggers pedestrian_tracking process on rtsp link using a thread
Args:
cfg:
args:
Returns:
"""
try:
t = Thread(target=pedestrian_tracking, args=(cfg, args))
t.start()
return jsonify({"message": "Pedestrian detection started successfully"})
except Exception:
return jsonify({'message': "Unexpected exception occured in process"})
@app.errorhandler(400)
def bad_argument(error):
return jsonify({'message': error.description['message']})
# Routes
@app.route('/stream', methods=['GET'])
def stream():
"""
Provides video frames on http link
Returns:
"""
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route("/run", methods=['GET'])
def process_manager():
"""
request parameters:
run (bool): 1 -> start the pedestrian tracking
0 -> stop it
camera_stream: str -> rtsp link to security camera
:return:
"""
# data = request.args
data = request.args
status = data['run']
status = int(status) if status.isnumeric() else abort(
400, {'message': f"bad argument for run {data['run']}"})
if status == 1:
# if pedestrian tracking is not running, start it off!
try:
if environ.get('in_progress', 'off') == 'off':
global cfg, args
vdo = data.get('camera_stream')
if vdo is not None:
args.input = int(vdo)
environ['in_progress'] = 'on'
return trigger_process(cfg, args)
elif environ.get('in_progress') == 'on':
# if pedestrian tracking is running, don't start another one (we are short of gpu resources)
return jsonify({"message": " Pedestrian detection is already in progress."})
except Exception:
environ['in_progress'] = 'off'
return abort(503)
elif status == 0:
if environ.get('in_progress', 'off') == 'off':
return jsonify({"message": "pedestrian detection is already terminated!"})
else:
environ['in_progress'] = 'off'
return jsonify({"message": "Pedestrian detection terminated!"})
if __name__ == '__main__':
load_dotenv()
app.config.from_object(DevelopmentConfig)
# BackProcess Initialization
args = parse_args()
cfg = get_config()
cfg.merge_from_dict(model)
cfg.merge_from_dict(deep_sort_dict)
# Start the flask app
app.run()
|
app.py
|
import time
import datetime
import sqlite3
from multiprocessing import Process
import numpy as np
from bokeh.embed import server_document
from bokeh.server.server import Server
from bokeh.themes import Theme
import bokeh.io
import bokeh.models
import bokeh.plotting
import bokeh.layouts
import bokeh.driving
import board
import busio
import adafruit_veml7700
import adafruit_veml6070
import adafruit_sgp30
import adafruit_bme280
from flask import Flask, jsonify, render_template
from tornado.ioloop import IOLoop
app = Flask(__name__)
# Database names
db = "environmental_monitor.db"
# Sample frequency in seconds
sample_frequency = 2
# Create busio I2C
i2c = busio.I2C(board.SCL, board.SDA, frequency=100000)
# Create objects
light = adafruit_veml7700.VEML7700(i2c)
light.light_gain = light.ALS_GAIN_1_8
light.light_integration_time = light.ALS_25MS
uv = adafruit_veml6070.VEML6070(i2c)
bme280 = adafruit_bme280.Adafruit_BME280_I2C(i2c)
sgp30 = adafruit_sgp30.Adafruit_SGP30(i2c)
sgp30.iaq_init()
sgp30.set_iaq_baseline(0x8973, 0x8aae)
def read_sensors():
"""Read sensors every `sample_frequency` seconds and write to `db` database."""
previous_time = datetime.datetime.now()
while True:
now = datetime.datetime.now()
delta = now - previous_time
if delta.seconds >= sample_frequency:
previous_time = now
# Read SGP30.
eCO2_data = sgp30.eCO2
tvoc_data = sgp30.TVOC
# Read VEML6070 and VEML7700, sample ten times.
for j in range(10):
light_data = light.lux
uv_raw = uv.uv_raw
uv_data = uv.get_index(uv_raw)
# Read BME280.
temp_data = bme280.temperature
# Convert temperature (C->F)
temp_data = temp_data * 1.8 + 32
humid_data = bme280.humidity
pressure_data = bme280.pressure
# Write to database
conn = sqlite3.connect(db)
curs = conn.cursor()
curs.execute("INSERT INTO data values(?, ?, ?, ?, ?, ?, ?, ?)",
(now, temp_data, humid_data, pressure_data, eCO2_data, tvoc_data,
light_data, uv_data))
conn.commit()
conn.close()
def get_last_data():
conn = sqlite3.connect(db)
curs = conn.cursor()
for row in curs.execute("SELECT * FROM data ORDER BY timestamp DESC LIMIT 1"):
now_string = row[0]
temp_data = row[1]
humid_data = row[2]
pressure_data = row[3]
eCO2_data = row[4]
tvoc_data = row[5]
light_data = row[6]
uv_data = row[7]
conn.close()
return now_string, temp_data, humid_data, pressure_data, eCO2_data, tvoc_data, light_data, uv_data
@app.route('/_get_last_data')
def show_last_data():
now_string, temp_data, humid_data, pressure_data, eCO2_data, tvoc_data, light_data, uv_data = get_last_data()
return jsonify(temp_data=temp_data, humid_data=humid_data, pressure_data=pressure_data, eCO2_data=eCO2_data,
tvoc_data=tvoc_data, light_data=light_data, uv_data=uv_data, now=now_string)
def get_num_datapoints(db):
conn = sqlite3.connect(db)
curs = conn.cursor()
for row in curs.execute("SELECT COUNT(timestamp) FROM data"):
num_datapoints = row[0]
conn.close()
return num_datapoints
def bkapp(doc):
num_datapoints = get_num_datapoints(db)
rollover = 300
conn = sqlite3.connect("environmental_monitor.db")
curs = conn.cursor()
curs.execute("SELECT * FROM data ORDER BY timestamp DESC LIMIT "
+ str(min(rollover, num_datapoints)))
db_data = curs.fetchall()
datatype_names = ("timestamps", "temps", "humidities", "pressures", "eCO2s", "tvocs", "lights", "uvs")
data_lists = ([] for name in datatype_names)
data = dict(zip(datatype_names, data_lists))
for row in reversed(db_data):
for datatype, datapoint in zip(data, row):
data[datatype].append(datapoint)
for i, timestamp in enumerate(data['timestamps']):
data['timestamps'][i] = datetime.datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S.%f')
source = bokeh.models.ColumnDataSource(data=data)
p_temp_hum = bokeh.plotting.figure(
x_axis_label="time",
y_axis_label="temperature (°F)",
x_axis_type="datetime"
)
temp_line = p_temp_hum.line(source=source, x="timestamps", y="temps")
p_temp_hum.y_range.renderers = [temp_line]
hum_range = bokeh.models.DataRange1d()
p_temp_hum.extra_y_ranges = {"humidity": hum_range}
hum_line = p_temp_hum.line(source=source,x="timestamps", y="humidities", color="orange", y_range_name="humidity")
hum_range.renderers = [hum_line]
p_temp_hum.add_layout(bokeh.models.LinearAxis(y_range_name="humidity", axis_label="humidity (%)"), "right")
p_temp_hum.yaxis[0].axis_label_text_color = '#1F77B4'
p_temp_hum.yaxis[1].axis_label_text_color = "orange"
p_light = bokeh.plotting.figure(
x_axis_label="time",
y_axis_label="light (lux)",
x_axis_type="datetime"
)
light_line = p_light.line(source=source, x="timestamps", y="lights")
p_eco2_tvoc = bokeh.plotting.figure(
x_axis_label="time",
y_axis_label="eCO2 (ppm)",
x_axis_type="datetime"
)
eCO2_line = p_eco2_tvoc.line(source=source, x="timestamps", y="eCO2s")
p_eco2_tvoc.y_range.renderers = [eCO2_line]
tvoc_range = bokeh.models.DataRange1d()
p_eco2_tvoc.extra_y_ranges = {"tvoc": tvoc_range}
tvoc_line = p_eco2_tvoc.line(source=source, x="timestamps", y="tvocs", color="orange", y_range_name="tvoc")
tvoc_range.renderers = [tvoc_line]
p_eco2_tvoc.add_layout(bokeh.models.LinearAxis(y_range_name="tvoc", axis_label="TVOC (ppb)"), "right")
p_eco2_tvoc.yaxis[0].axis_label_text_color = '#1F77B4'
p_eco2_tvoc.yaxis[1].axis_label_text_color = "orange"
p_pressure = bokeh.plotting.figure(
x_axis_label="time",
y_axis_label="pressure (hPa)",
x_axis_type="datetime"
)
pressure_line = p_pressure.line(source=source, x="timestamps", y="pressures")
for p in (p_temp_hum, p_light, p_eco2_tvoc, p_pressure):
p.xaxis.formatter=bokeh.models.DatetimeTickFormatter(
seconds = ['%H:%M:%S'],
minutes = ['%H:%M:%S'],
hourmin = ['%H:%M:%S'],
days = ['%Y-%m-%d %H:%M:%S'],
months = ['%Y-%m-%d %H:%M:%S'],
)
p.xaxis.major_label_orientation = np.pi/4
p.x_range.range_padding = 0
if p.legend:
p.legend.background_fill_alpha=0
p.legend.border_line_alpha=0
grid = bokeh.layouts.gridplot(
[p_temp_hum, p_light, p_pressure, p_eco2_tvoc],
ncols=2,
plot_height=175,
plot_width=400,
sizing_mode="stretch_width"
)
@bokeh.driving.linear()
def update(step):
last_data = [[data] for data in get_last_data()]
update_dict = dict(zip(datatype_names, last_data))
timestamp = update_dict['timestamps'][0]
update_dict['timestamps'][0] = datetime.datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S.%f')
source.stream(update_dict, rollover)
doc.add_root(grid)
theme = Theme(filename="theme.yaml")
doc.theme = theme
pc = doc.add_periodic_callback(update, sample_frequency*1000)
@app.route('/')
def index():
script = server_document("http://192.168.0.105:5006/bkapp")
return render_template('index.html', script=script, template="Flask")
def bk_worker():
server = Server(
{'/bkapp': bkapp},
io_loop=IOLoop(),
allow_websocket_origin=[
"localhost:5000",
"0.0.0.0:5000",
"192.168.0.105:5000",
"192.168.0.105:5006"
]
)
server.start()
server.io_loop.start()
process_read = Process(target=read_sensors)
process_plot = Process(target=bk_worker)
process_read.start()
process_plot.start()
if __name__ == '__main__':
app.run(debug=True, use_reloader=False, host='0.0.0.0', port=5000)
|
app.py
|
# -*- coding utf-8 -*-
from threading import Thread
from dctcs.user.user import User
from dctcs.db.models import db_handler
from dctcs.constdef.const import DEFAULT_TMP
from dctcs.schedule.scheduler import Scheduler
from flask import Flask, request, jsonify
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
# 初始化温控系统
scheduler = Scheduler()
# 新建线程运行调度器
t = Thread(target=scheduler.dispatcher)
t.start()
'''
用户侧相关接口
* /check_in 入住
* /adjust_temperature 调温
* /adjust_wind 调风
* /status_heartbeat 状态心跳
* /check_out 退房
'''
@app.route('/check_in', methods=['POST'])
def check_in():
room_id = db_handler.check_in()
# 初始化第一个调度请求
user = User(scheduler, room_id, DEFAULT_TMP, 'mid')
user.change_target_temp(DEFAULT_TMP)
return jsonify(
status=1,
message='Check in successfully',
room_id=room_id,
)
@app.route('/adjust_temperature', methods=['POST'])
def adjust_temperature():
room_id = int(request.form['room_id'])
cur_temp = float(request.form['cur_temp'])
cur_speed = str(request.form['cur_speed'])
target_temp = float(request.form['target_temp'])
user = User(scheduler, room_id, cur_temp, cur_speed)
return jsonify(
status=1 if user.change_target_temp(target_temp) else -1,
message='Adjust temperature request received'
)
@app.route('/adjust_wind', methods=['POST'])
def adjust_wind():
room_id = int(request.form['room_id'])
cur_temp = float(request.form['cur_temp'])
cur_speed = str(request.form['cur_speed'])
target_speed = str(request.form['target_speed'])
user = User(scheduler, room_id, cur_temp, cur_speed)
return jsonify(
status=1 if user.change_fan_speed(target_speed) else -1,
message='Adjust fan speed request received'
)
@app.route('/status_heartbeat', methods=['GET'])
def status_heartbeat():
room_id = int(request.args.get('room_id', -1))
status = scheduler.query(room_id)
return jsonify(
temperature=status[0],
speed=status[1],
electrical_usage=status[2],
fee=status[3],
)
@app.route('/check_out', methods=['POST'])
def check_out():
room_id = int(request.form['room_id'])
bill, detailed_list = scheduler.check_out(room_id)
return jsonify(
status=1,
message='Check out successfully',
bill=bill,
detailed_list=detailed_list
)
'''
空调管理员侧相关接口
* /get_room_detial_list 获取当前已入住客房列表及详单信息
'''
@app.route('/get_room_detial_list', methods=['GET'])
def get_room_detial_list():
return jsonify(
rooms=db_handler.get_checked_in_room_info()
)
|
fcfs_refinement.py
|
import os
import sys
import time
import psutil
import startinpy
import numpy as np
from multiprocessing import cpu_count, Process, Lock, Queue, current_process
from scipy.spatial import KDTree
COARSE_THRESHOLD = 2
FINE_THRESHOLD = 0.2
class MemoryUsage:
def __init__(self, process_name, timestamp, memory_usage):
self.process_name = process_name
self.timestamp = timestamp
self.memory_usage = memory_usage
class Triangulation:
def __init__(self):
self.cell_size = None
self.min_x = None
self.min_y = None
self.max_x = None
self.max_y = None
def set_bbox(self, min_x, min_y, max_x, max_y):
self.min_x = min_x
self.min_y = min_y
self.max_x = max_x
self.max_y = max_y
def finalize(self, input_line, grid_x, grid_y, vertices, lock, memory_usage_queue):
stdout_lines = []
if len(vertices) > 0:
triangulation = startinpy.DT()
x_vals = []
y_vals = []
z_vals = []
for vertex_id, point in vertices.items():
x_vals.append(point[0])
y_vals.append(point[1])
z_vals.append(point[2])
tree = KDTree(np.c_[x_vals, y_vals])
corner_points = [
[self.min_x + (self.cell_size * grid_x), self.min_y + (self.cell_size * grid_y)],
[self.min_x + (self.cell_size * grid_x) + self.cell_size - 1E-5, self.min_y + (self.cell_size * grid_y)],
[self.min_x + (self.cell_size * grid_x), self.min_y + (self.cell_size * grid_y) + self.cell_size - 1E-5],
[self.min_x + (self.cell_size * grid_x) + self.cell_size - 1E-5, self.min_y + (self.cell_size * grid_y) + self.cell_size - 1E-5]
]
near_corner_points = []
for corner_point in corner_points:
# Get nearest point to corner
distances, indexes = tree.query(corner_point, k=10)
queried_z_vals = [z_vals[index] for index in indexes if index < len(z_vals)]
# add a corner point with average z value of 10 nearest
near_corner_points.append([corner_point[0], corner_point[1], sum(queried_z_vals) / len(queried_z_vals)])
triangulation.insert(near_corner_points)
to_delete = []
# First coarse loop
for key, vertex in vertices.items():
x = vertex[0]
y = vertex[1]
z = vertex[2]
try:
interpolated_value = triangulation.interpolate_tin_linear(x, y)
if abs(interpolated_value - z) > COARSE_THRESHOLD:
triangulation.insert_one_pt(x, y, z, 0)
to_delete.append(key)
# In rare cases we get a point outside CH due to ----00.0 being counted as wrong cell
# FIXME: Adjust get_cell function to return correct cell for ----00.0 points
except OSError:
pass
for key in reversed(to_delete):
del vertices[key]
# Fine loop
for key, vertex in vertices.items():
x = vertex[0]
y = vertex[1]
z = vertex[2]
try:
interpolated_value = triangulation.interpolate_tin_linear(x, y)
if abs(interpolated_value - z) > FINE_THRESHOLD:
triangulation.insert_one_pt(x, y, z, 0)
# In rare cases we get a point outside CH due to ----00.0 being counted as wrong cell
# FIXME: Adjust get_cell function to return correct cell for ----00.0 points
except OSError:
pass
if triangulation.number_of_vertices() > 4:
for i in [1, 2, 3, 4]:
triangulation.remove(i)
for vertex in triangulation.all_vertices():
# Don't print infinite vertex
if vertex[0] > 0:
stdout_lines.append("v " + str(vertex[0]) + " " + str(vertex[1]) + " " + str(vertex[2]) + "\n")
memory_usage_queue.put(MemoryUsage(current_process().name, round(time.time()), psutil.Process(os.getpid()).memory_info().rss))
stdout_lines.append(input_line)
with lock:
sys.stdout.write("".join(stdout_lines))
sys.stdout.flush()
sys.stderr.write(current_process().name + " - FINISHED.\n")
class Processor:
def __init__(self, dt):
self.triangulation = dt
self.vertex_id = 1
self.vertices = {}
self.sprinkling = True
self.processes = []
self.last_log_time = round(time.time())
self.stdout_lock = Lock()
self.memory_usage_queue = Queue()
self.memory_usage_queue.put(MemoryUsage("Main", self.last_log_time, psutil.Process(os.getpid()).memory_info().rss))
self.memory_usage_writer = Process(target=self.write_memory_usage, args=(self.memory_usage_queue,), daemon=True)
self.memory_usage_writer.start()
def write_memory_usage(self, memory_usage_queue):
while True:
with open(os.path.join(os.getcwd(), "memlog_direct_refinement.csv"), "a") as memory_log_file:
val = memory_usage_queue.get()
if val:
memory_log_file.write(str(val.process_name) + ", " + str(val.timestamp) + ", " + str(val.memory_usage) + "\n")
else:
time.sleep(0.5)
def process_line(self, input_line):
split_line = input_line.rstrip("\n").split(" ")
identifier = split_line[0]
data = split_line[1:]
current_time = round(time.time())
if current_time != self.last_log_time:
self.memory_usage_queue.put(MemoryUsage("Main", current_time, psutil.Process(os.getpid()).memory_info().rss))
self.last_log_time = current_time
if identifier == "#" or identifier == "":
if data[0] == "endsprinkle":
self.sprinkling = False
sys.stderr.write("Sprinkling done!\n")
sys.stderr.flush()
elif identifier == "n":
# Total number of points
self.triangulation.total_points = int(data[0])
sys.stdout.write(input_line)
elif identifier == "c":
# Grid dimensions (cXc)
sys.stdout.write(input_line)
elif identifier == "s":
# Cell size
self.triangulation.cell_size = int(data[0])
sys.stdout.write(input_line)
elif identifier == "b":
# bbox
self.triangulation.set_bbox(float(data[0]), float(data[1]), float(data[2]), float(data[3]))
sys.stdout.write(input_line)
sys.stderr.write(input_line)
sys.stderr.flush()
elif identifier == "v":
# vertex
# All sprinkle points get passed to output directly
if not self.sprinkling:
self.vertices[self.vertex_id] = [float(data[0]), float(data[1]), float(data[2])]
self.vertex_id += 1
else:
sys.stdout.write(input_line)
elif identifier == "x":
# cell finalizer
# While sprinkling, don't bother processing since all finalized cells now are still empty anyways
if self.sprinkling:
sys.stdout.write(input_line)
return
sys.stderr.write("Starting new process to finalize cell: {}, {}. Processing currently running: {}\n".format(data[0], data[1], len(self.processes)))
sys.stderr.flush()
sleep_time = 1
# Ensure total number of processes never exceeds capacity
while len(self.processes) >= cpu_count() - 2:
for i in reversed(range(len(self.processes))):
if not self.processes[i].is_alive():
del self.processes[i]
time.sleep(sleep_time)
process = Process(target=self.triangulation.finalize, args=(input_line, int(data[0]), int(data[1]), self.vertices, self.stdout_lock, self.memory_usage_queue,), daemon=True)
self.vertices = {}
self.vertex_id = 1
self.processes.append(process)
process.start()
else:
# Unknown identifier in stream
pass
sys.stdout.flush()
if __name__ == "__main__":
triangulation = Triangulation()
processor = Processor(triangulation)
start_time = time.time()
for stdin_line in sys.stdin:
processor.process_line(stdin_line)
for process in processor.processes:
process.join()
sys.stderr.write("duration: " + str(time.time() - start_time) + "\n")
|
RoleManager.py
|
#!/usr/bin/env python
import rospy
import socket
import threading
import roslaunch
import roslib
import select
import time
from std_msgs.msg import String
class RoleManager:
PORT = 65535
RECEIVE_SOCKET_TIMEOUT = 3
CONNECTION_ATTEMPT_TIMEOUT = 5
def __init__(self):
"""
The rosparam ipList MUST be setted as list of strings ("['127.0.0.1', '127.0.1.1']".
In ipList there's also my ip address.
"""
rospy.init_node("role_manager", anonymous=True)
self.myIPAddress = rospy.get_param("/myIPAddress")
self.ipList = rospy.get_param("/ipList") # list of ip (me included)
self.nodeListenerSub = rospy.Subscriber("node_to_rolemanager", String, self.ownNodeListener)
self.nodeSpeakerPub = rospy.Publisher("rolemanager_to_node", String, queue_size=10)
self.role = False # role = True for Witch, role = False for Kid
self.witchIPAddress = min(self.ipList) # the first witch
self.host = None
self.port = self.PORT
self.sock = None
self.launch = None
self.myThread = []
self.conn = [] # list of socket objects
self.address = [] # list of other players' ip addresses
self.topicHandlers = []
self.winnersIPAddress = []
self.stopThreads = False
self.noConnected = 0
self.listenSock = None
self.listenSockSetted = False
self.socketKidToWitch = None
# handlers to methods that manages sockets sends
self.WITCH_COLOR_HANDLERS = [self.tellColorBySocket, self.tellEndGameBySocket]
self.KID_COLOR_HANDLERS = [self.tellColorTouchedBySocket]
# handlers to methods that manages the received socket messages
self.handlers = [self.manageColorMsg,
self.manageColorTouchedMsg,
self.manageEndGameMsg]
self.config()
rospy.spin() # in loop until is killed
def tellColorBySocket(self, color):
"""
Only Witch call this method.
Send to Kids' RM the chosen color.
:param color: the chosen color
"""
msg = "0:" + color + "|"
noPartecipants = len(self.ipList)-1
while self.noConnected < noPartecipants:
time.sleep(0.1)
for c in self.conn:
c.send(msg)
def tellEndGameBySocket(self):
"""
Only Witch call this method.
Send to Kid's RM the end of the game and loser's IP address.
"""
last = self.winnersIPAddress[-1]
self.winnersIPAddress.append(self.myIPAddress)
# rescue ip in ipList but not in winnersIPAddress
nextWitchIP = [ip for ip in self.ipList if ip not in self.winnersIPAddress]
# if len(nextWitchIP) < 1:
nextWitchIP.append(last)
msg = "2:" + str(nextWitchIP[0]) + "|"
for c in self.conn:
c.send(msg)
self.manageEndGameMsg([nextWitchIP[0]])
def tellColorTouchedBySocket(self):
"""
Only Kid call this method.
Send to Witch's RM that the color has been touched.
"""
msg = "1:" + self.myIPAddress + "|"
self.socketKidToWitch.sendall(msg)
def createAndStartConnection(self):
"""
Create and start socket connection.
:return False if connection doesn't succeed, True otherwise.
"""
self.host = self.witchIPAddress
self.stopThreads = False
if self.myIPAddress == self.witchIPAddress:
if not self.listenSockSetted:
self.listenSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listenSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listenSock.bind((self.host, self.port))
self.listenSockSetted = True
self.listenSock.listen(len(self.ipList) - 1)
for i in range(1, len(self.ipList)):
conn, address = self.listenSock.accept()
conn.setblocking(0)
self.conn.append(conn)
self.address.append(address)
thread = threading.Thread(target=self.manageSocket, args=[conn])
self.myThread.append(thread)
thread.start()
self.noConnected = self.noConnected+1
print("Connected to client ", address, ".")
print("Connected to ALL clients")
else:
try:
self.socketKidToWitch = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socketKidToWitch.connect((self.host, self.port))
except Exception as e:
print(str(e))
return False
self.socketKidToWitch.setblocking(0)
socketThread = threading.Thread(target=self.manageSocket, args=[self.socketKidToWitch])
self.myThread.append(socketThread)
socketThread.start()
print("Connected to server.")
return True
def manageSocket(self, threadSocket):
"""
Manage socket messages.
:param threadSocket: the socket to listen.
"""
size = 1024
while not self.stopThreads:
try:
ready = select.select([threadSocket], [], [], self.RECEIVE_SOCKET_TIMEOUT)
if ready[0]:
data = threadSocket.recv(size)[:-1]
if data:
for msg in data.split("|"):
params = msg.split(":") # param[0]=type of msg; param[1]=msg
self.handlers[int(params[0])](params[1:]) # call the method that msg refers to
else:
print('Disconnected.')
break
except Exception as e:
print(str(e))
exit(1)
break
threadSocket.shutdown(socket.SHUT_RDWR)
threadSocket.close()
def manageColorMsg(self, args):
"""
Only Kid call this method.
Write on topic the color to touch.
:param args: args[0]=color name (uppercase).
"""
color = args[0]
self.ownNodeSpeaker(0, color)
def manageColorTouchedMsg(self, args):
"""
Only Witch call this method.
:param args: args[0]=Kid's - who touched the color - IP address.
"""
ipWinner = args[0]
self.winnersIPAddress.append(ipWinner) # add this winner to winners list
print("Color touched by ", ipWinner)
self.ownNodeSpeaker(0, "") # write on topic that another Kid wins
def manageEndGameMsg(self, args):
"""
Based on whether I'm the loser or not, kill the actual node and launch the next one.
:param args: args[0]=loser's name (IP address).
"""
ipLoser = args[0]
self.launch.shutdown()
threading.Thread(target=self.resetParameters, args=[ipLoser]).start() # another thread will call resetParams
# self.resetParameters(ipLoser)
def ownNodeSpeaker(self, typeOfMess, color):
# manage messages TO Kid/Witch nodes writing on its publisher
# Witch:
# 0 (a "color touched" socket is arrived)
# 1 (the number of total players)
# Kid:
# 0 (the socket which contains the color arrived)
# 1 (the "go!" socket arrived)
typeOfMess = int(typeOfMess)
if self.role: # I am a Witch
if typeOfMess == 0:
self.nodeSpeakerPub.publish("0") # another robot touched the color
elif typeOfMess == 1:
self.nodeSpeakerPub.publish("1:" + str(len(self.ipList) - 1)) # number of Kids
else: # I am a Kid
if typeOfMess == 0:
self.nodeSpeakerPub.publish("0:" + color) # color received
def ownNodeListener(self, msg):
# manage messages FROM Kid/Witch nodes reading from its subscriber
# Witch
# 0:colore (il colore che la mia Witch ha scelto, da inviare a tutti i RoleManager dei Kid)
# 1 (ho ricevuto tutti i messaggi necessari alla fine del gioco) senza info
# Kid
# 0 (colore trovato) senza info
msg = msg.data
if self.role: # I am a Witch
if msg[0] == "0":
color = msg[2:]
self.topicHandlers[0](color) # call tellColorBySocket(color)
elif msg[0] == "1":
# game over: n-1 messages received
self.topicHandlers[1]() # tellEndGameBySocket()
else: # I am a Kid
if msg[0] == "0":
self.topicHandlers[0]() # tellColorTouchedBySocket
# self.sock.sendall("1:" + self.myIPAddress + "|") # send a "color touched msg" to the Witch
def prepareLaunchNode(self, iAmWitch):
"""
Choose and configure the Witch/Kid node that will be launched.
:param iAmWitch: True if I am the witch of the next game, False otherwise
:return: the launcher object refers to the Witch/Kid node to launch
"""
path = roslib.packages.get_pkg_dir("touch_the_color")
if iAmWitch:
path += "/src/witchLauncher.launch"
else:
path += "/src/kidLauncher.launch"
uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)
roslaunch.configure_logging(uuid)
launch = roslaunch.parent.ROSLaunchParent(uuid, [path])
return launch
def startNode(self):
"""
Start the node referred to self.launch.
... it seams there's no way to control the launcher status
"""
self.launch.start()
rospy.loginfo("started")
def config(self):
"""
Configure topic handlers, prepare and start the launch node and call createAndStartConnection.
"""
connected = False
if self.myIPAddress == self.witchIPAddress:
self.role = True
self.topicHandlers = self.WITCH_COLOR_HANDLERS
else:
self.role = False
self.topicHandlers = self.KID_COLOR_HANDLERS
# based on its role, launch Witch or Kid node
self.launch = self.prepareLaunchNode(self.role)
self.startNode()
time.sleep(2)
if self.role: # if I am a Witch
self.ownNodeSpeaker(1, "") # send to my Witch the number of players
startConnectionAttempt = time.time()
while not connected and time.time()-startConnectionAttempt<self.CONNECTION_ATTEMPT_TIMEOUT:
connected = self.createAndStartConnection()
if not connected:
self.launch.shutdown()
exit(1)
def resetParameters(self, witchIp):
"""
Reset all parameters for the next game. At the end, call config.
"""
self.stopThreads = True
for t in self.myThread:
if t is not None:
t.join()
self.host = None
self.launch = None
self.role = None
self.socketKidToWitch = None
self.noConnected = 0
self.topicHandlers = []
del self.myThread[:]
del self.conn[:]
del self.address[:]
del self.winnersIPAddress[:]
self.witchIPAddress = witchIp
self.config()
if __name__ == "__main__":
try:
rm = RoleManager()
except rospy.ROSInterruptException:
print("ROSInterruptException")
except KeyboardInterrupt:
print("KeyboardInterrupt")
|
graphql.py
|
import re
import sys
import traceback
from threading import Thread
import sublime
import sublime_plugin
from ..core import RequestCommandMixin
from ..core.parsers import parse_requests
from ..core.responses import prepare_request
from ..deps import requests
from ..deps.graphql.lexer import GraphQLLexer
from ..deps.graphql.parser import GraphQLParser
placeholder = "__introspection_placeholder"
introspection_query = """
query IntrospectionQuery {
__schema {
queryType { name }
mutationType { name }
subscriptionType { name }
types {
...FullType
}
}
}
fragment FullType on __Type {
kind
name
description
fields(includeDeprecated: true) {
name
description
args {
...InputValue
}
type {
...TypeRef
}
isDeprecated
}
}
fragment InputValue on __InputValue {
name
type { ...TypeRef }
defaultValue
}
fragment TypeRef on __Type {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
}
}
}
}
}
}
}
}
"""
def set_graphql_schema_on_view(view, req):
"""If request was to a GraphQL endpoint, send introspection query on a separate
thread, parse response and set it on view.
"""
if not req.skwargs.get("gql"):
return
def _set(view, url):
"""Ensure types and fields within types can be looked up quickly by name.
`types` dict has the following format:
typeName -> typeDict
Within `typeDict`, `fields` dict has similar format:
fieldName -> fieldDict
"""
kwargs = dict(req.kwargs)
kwargs.pop("params", None)
kwargs.pop("json", None)
kwargs["timeout"] = 3
try:
response = requests.get(url, params={"query": introspection_query}, **kwargs)
schema = response.json()["data"]["__schema"] # get root `Query` type
query_type = schema["queryType"]["name"]
except:
response = requests.post(url, json={"query": introspection_query}, **kwargs)
schema = response.json()["data"]["__schema"] # get root `Query` type
query_type = schema["queryType"]["name"]
types = {}
for t in schema["types"]:
types[t["name"]] = t
fields = {f["name"]: f for f in (t["fields"] or [])}
t["fields"] = fields
view.settings().set("requester.gql_schema", (query_type, types))
thread = Thread(target=lambda: _set(view, req.url.split("?")[0]))
thread.start()
class RequesterGqlAutocompleteListener(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
"""Runs on all views, but is NOOP unless view is response view or history
view. Inside gql query string, only completions returned by this method
are shown.
"""
response_view = view.settings().get("requester.response_view", False)
history_view = view.settings().get("requester.history_view", False)
if not response_view and not history_view:
return None
content = view.substr(sublime.Region(0, view.size()))
m = re.search(r'\bgql\s*=\s*("|\')+', content)
if m is None:
return None
offset, idx = m.end(), view.sel()[0].begin()
try:
request = parse_requests(content, n=1)[0]
if getattr(view, "_env", None) is None:
view._env = RequestCommandMixin.get_env_dict_from_string(
view.settings().get("requester.env_string", None)
)
req = prepare_request(request, view._env, 1)
schema = view.settings().get("requester.gql_schema", None)
if not schema: # let user know schema is being retrieved
set_graphql_schema_on_view(view, req)
raise Exception("Loading GraphQL schema info")
gql = req.skwargs["gql"]
completions = get_completions(gql, idx - offset, schema)
return completions
except Exception as e:
print("GraphQL Error:")
traceback.print_exc(file=sys.stdout)
return (
[[str(e), " "], ["...", " "]],
sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS,
)
def get_completions(gql, idx, schema):
"""Creates AST from `gql` query string, finds out exactly where cursor is in
string, and uses `schema` to get appropriate completions. Doesn't protect
against exceptions. They should be handled by calling code.
"""
start, end = slurp_word(gql, idx)
gql_parser = GraphQLParser()
ast = gql_parser.parse(gql[:start] + placeholder + gql[end:], lexer=GraphQLLexer())
for query in ast.definitions: # get path if it exists
path = placeholder_path(query, placeholder)
if path is not None:
break
query_type, types = schema
t = resolve_type(path, types, query_type)
fields = types[t]["fields"]
completions = []
for f in fields.values():
name = f["name"]
args = [a["name"] + ":" for a in f["args"]]
args_string = "({})".format(", ".join(args)) if args else ""
type_name = resolve_field_type(f)
completions.append(
[
"{}{}\t{}".format(name, args_string, type_name),
"{}{}".format(name, args_string),
]
)
return (completions, sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS)
def resolve_type(path, types, query_type):
"""Moves back and forth between field names in `path` and GraphQL types to
find type name of leaf node in path.
"""
t = query_type
for f in path[:-1]: # stop before reaching placeholder
field = types[t]["fields"][f]
t = resolve_field_type(field)
return t
def resolve_field_type(field):
"""Keep digging into field type until finding a non-null `name`."""
type_ = field["type"]
while type_["name"] is None:
try:
type_ = type_["ofType"]
except:
return None
return type_["name"]
def placeholder_path(field, placeholder):
"""Not the most elegant implementation of DFS. It searches the whole tree and
keeps track of the path to each node. If it finds `placeholder`, it saves this
path and returns it after search is finished.
"""
path = None
def get_path(selection, placeholder, seen=tuple()):
for sel in selection.selections:
seen_next = seen + (sel.name,)
if sel.name == placeholder:
nonlocal path
path = seen_next
get_path(sel, placeholder, seen_next)
get_path(field, placeholder)
return path
def slurp_word(s, idx):
"""Returns index boundaries of word adjacent to `idx` in `s`."""
alnum = r"[A-Za-z0-9_]"
start, end = idx, idx
while True:
if re.match(alnum, s[start - 1]):
start -= 1
else:
break
end = idx
while True:
if re.match(alnum, s[end]):
end += 1
else:
break
return start, end
|
JumpscriptFactory.py
|
from JumpScale import j
import time
import imp
import linecache
import inspect
import JumpScale.baselib.redis
import multiprocessing
import tarfile
import StringIO
import collections
import os
import base64
import traceback
import signal
import sys
class Jumpscript(object):
def __init__(self, ddict=None, path=None):
self._loaded = False
self.name=""
self.organization=""
self.period = 0
self.category = ""
self.lastrun = 0
self.source=""
self.debug = False
self.path=path
self.id = None
self.startatboot = False
self.path = path
self.debug=False
self.timeout = None
if ddict:
ddict.pop('path', None)
self.__dict__.update(ddict)
if path:
self.load()
self.loadAttributes()
def write(self):
if not self.path:
jscriptdir = j.system.fs.joinPaths(j.dirs.tmpDir,"jumpscripts")
j.system.fs.createDir(jscriptdir)
self.path=j.system.fs.joinPaths(jscriptdir, "%s_%s.py" % (self.organization, self.name))
content="""
from JumpScale import j
"""
content += self.source
j.system.fs.writeFile(filename=self.path, contents=content)
def load(self):
self._loaded = True
md5sum = j.tools.hash.md5_string(self.path)
modulename = 'JumpScale.jumpscript_%s' % md5sum
linecache.checkcache(self.path)
self.module = imp.load_source(modulename, self.path)
if self.source.find("DEBUG NOW")!=-1:
self.debug=True
def getDict(self):
result = dict()
for attrib in ('name', 'author', 'organization', 'category', 'license', 'version', 'roles', 'source', 'path', 'descr', 'queue', 'async', 'period', 'order', 'log', 'enable', 'startatboot', 'gid', 'id','timeout'):
result[attrib] = getattr(self, attrib)
return result
def loadAttributes(self):
name = getattr(self.module, 'name', "")
if name=="":
name=j.system.fs.getBaseName(self.path)
name=name.replace(".py","").lower()
source = inspect.getsource(self.module)
self.name=name
self.author=getattr(self.module, 'author', "unknown")
self.organization=getattr(self.module, 'organization', "unknown")
self.category=getattr(self.module, 'category', "unknown")
self.license=getattr(self.module, 'license', "unknown")
self.version=getattr(self.module, 'version', "1.0")
self.debug=getattr(self.module, 'debug', False)
self.roles=getattr(self.module, 'roles', [])
self.source=source
self.descr=self.module.descr
self.queue=getattr(self.module, 'queue', "")
self.timeout=getattr(self.module, 'timeout', None)
self.async = getattr(self.module, 'async',False)
self.period=getattr(self.module, 'period',0)
self.order=getattr(self.module, 'order', 1)
self.log=getattr(self.module, 'log', True)
self.enable=getattr(self.module, 'enable', True)
self.startatboot=getattr(self.module, 'startatboot', False)
self.gid=getattr(self.module, 'gid', j.application.whoAmI.gid)
def getKey(self):
return "%s_%s" % (self.organization, self.name)
def __eq__(self, other):
if not isinstance(other, Jumpscript):
return False
return self.name == other.name and self.organization == other.organization
def executeInWorker(self, *args, **kwargs):
if not self.path:
self.write()
if not self._loaded:
self.load()
if self.debug:
result = self.executeInProcess(*args, **kwargs)
return result
else:
def helper(pipe):
def errorhandler(sig, frame):
try:
msg = 'Failed to execute job on time'
eco = j.errorconditionhandler.getErrorConditionObject(msg=msg)
eco.backtrace = eco.getBacktraceDetailed(frame=frame.f_back, startframe=9)
eco.backtraceDetailed = eco.backtrace
eco.tb = None
eco.tags = "jscategory:%s"%self.category
eco.jid = j.application.jid
eco.tags += " jsorganization:%s"%self.organization
eco.tags +=" jsname:%s"%self.name
j.errorconditionhandler.raiseOperationalCritical(eco=eco,die=False)
except Exception as e:
eco = str(e)
pipe.send(("TIMEOUT", eco))
# when handling sigterm we need to exit
sys.exit(2)
signal.signal(signal.SIGTERM, errorhandler)
try:
result = self.executeInProcess(*args, **kwargs)
pipe.send(result)
except Exception as e:
try:
result = self._getECO(e)
except Exception, e:
msg = 'Failed parsing original exception: %s' % e
result = j.errorconditionhandler.getErrorConditionObject(msg=msg)
pipe.send((False, result))
ppipe, cpipe = multiprocessing.Pipe()
proc = multiprocessing.Process(target=helper, args=(cpipe,))
proc.start()
cpipe.close()
proc.join(self.timeout)
if proc.is_alive():
proc.terminate()
proc.join(5)
if proc.is_alive():
try:
os.kill(proc.pid, signal.SIGKILL)
except (ProcessLookupError, OSError):
pass
# reap process
proc.join(5)
msg = 'Failed to execute job on time and failed to kill cleanly'
eco = j.errorconditionhandler.getErrorConditionObject(msg=msg)
eco.errormessagePub = 'JumpScript died unexpectedly %s'
eco.tb = False
return "TIMEOUT", eco.dump()
try:
return ppipe.recv()
except Exception as e:
eco = j.errorconditionhandler.parsePythonErrorObject(e)
eco['errormessagePub'] = 'JumpScript died unexpectedly %s'
return False, result
def _getECO(self, e):
eco = j.errorconditionhandler.parsePythonErrorObject(e)
eco.tb = None
eco.errormessage='Exec error procmgr jumpscr:%s_%s on node:%s_%s %s'%(self.organization,self.name, \
j.application.whoAmI.gid, j.application.whoAmI.nid,eco.errormessage)
eco.tags="jscategory:%s"%self.category
eco.jid = j.application.jid
eco.tags+=" jsorganization:%s"%self.organization
eco.tags+=" jsname:%s"%self.name
return eco
def executeInProcess(self, *args, **kwargs):
try:
return True, self.module.action(*args, **kwargs)
except Exception as e:
print "error in jumpscript factory: execute in process."
eco = self._getECO(e)
j.errorconditionhandler.raiseOperationalCritical(eco=eco,die=False)
print(eco)
return False, eco
def executeLocal(self, *args, **kwargs):
if not self.path:
self.write()
if not self._loaded:
self.load()
return self.module.action(*args, **kwargs)
def execute(self, *args, **kwargs):
"""
"""
result = None, None
redisw = kwargs.pop('_redisw', j.clients.redisworker)
if not self.enable:
return
if not self.async:
result = list(self.executeInProcess(*args, **kwargs))
if not result[0]:
eco = result[1]
eco.type = str(eco.type)
result[1] = eco.__dict__
else:
#make sure this gets executed by worker
queue = getattr(self, 'queue', 'default') #fall back to default queue if none specified
result=redisw.execJumpscript(jumpscript=self,_timeout=self.timeout,_queue=queue,_log=self.log,_sync=False)
self.lastrun = time.time()
if result!=None:
print("ok:%s"%self.name)
return result
"""
Metadata about a Lua Jumpscript.
"""
LuaJumpscript = collections.namedtuple('LuaJumpscript', field_names=(
'name', 'path', 'organization', 'queue', 'log', 'id', 'enable',
))
class JumpscriptFactory:
"""
"""
def __init__(self):
self.basedir = j.system.fs.joinPaths(j.dirs.baseDir, 'apps', 'processmanager')
def getJSClass(self):
return Jumpscript
def getArchivedJumpscripts(self, bz2_compressed=True, types=('processmanager', 'jumpscripts')):
"""
Returns the available jumpscripts in TAR format that is optionally compressed using bzip2.
Args:
bz2_compressed (boolean): If True then the returned TAR is bzip2-compressed
types (sequence of str): A sequence of the types of jumpscripts to be packed in the returned archive.
possible values in the sequence are 'processmanager', 'jumpscripts', and 'luajumpscripts'.
"""
fp = StringIO.StringIO()
with tarfile.open(fileobj=fp, mode='w:bz2' if bz2_compressed else 'w') as tar:
for jumpscript_type in types:
parent_path = '%s/apps/agentcontroller/%s' % (j.dirs.baseDir, jumpscript_type)
for allowed_filename_extension in ('py', 'lua'):
for file_path in j.system.fs.walkExtended(parent_path, recurse=1, dirs=False,
filePattern='*.' + allowed_filename_extension):
path_in_archive = jumpscript_type + '/' + file_path.split(parent_path)[1]
tar.add(file_path, path_in_archive)
return fp.getvalue()
def loadFromAC(self, acl=None):
if acl is None:
acl = j.clients.agentcontroller.getByInstance()
tar = base64.decodestring(acl.getJumpscripts())
self.loadFromTar(tar, 'bz2')
def loadFromTar(self, tarcontent, type):
j.system.fs.removeDirTree(self.basedir)
import tarfile
fp = StringIO.StringIO()
fp.write(tarcontent)
fp.seek(0)
mode = "r:%s" % type
tar = tarfile.open(fileobj=fp, mode=mode)
for tarinfo in tar:
if tarinfo.isfile():
print(tarinfo.name)
if tarinfo.name.find("processmanager/")==0:
tar.extract(tarinfo.name, j.system.fs.getParent(self.basedir))
if tarinfo.name.find("jumpscripts/")==0:
tar.extract(tarinfo.name, self.basedir)
@staticmethod
def introspectLuaJumpscript(path):
"""
Introspects for a Lua Jumpscript at the given path and returns a LuaJumpscript object with the results.
Args:
path (str): the absolute path to the jumpscript file.
Raises:
IOError if the file at the path could not be opened.
"""
assert os.path.isabs(path), 'An absolute file path is needed'
if not os.path.exists(path):
raise IOError(path + ' does not exist')
relative_path = path.split('agentcontroller/')[1] # Remove the string "^.*agentcontroller/"
# The Lua Jumpscript metadata is inferred conventionally using the jumpscript file's relative path as follows:
# luajumpscripts/ORGANIZATION[/IRRELEVANT_SUBPATH]/JUMPSCRIPT_NAME.lua
#
# Note: The IRRELEVANT_SUBPATH is optional and is not used.
path_components = relative_path.split('/')
jumpscript_name = os.path.splitext(path_components[-1])[0]
jumpscript_organization = path_components[1]
return LuaJumpscript(
name=jumpscript_name,
path=path,
organization=jumpscript_organization,
queue=None,
log=True,
id=None,
enable=True
)
|
draft.py
|
'''
Program name: ChatRoom/client.py
GUI Client: a GUI client that can communicate with the server.
a client can send text messages to all parties in the chat room,
as well as receive notifications when other clients connect or disconnect.
Clients do not communicate directly with other clients.
Instead, all communication is routed through the central server.
Usage: Run python client.py -u <user-name> (default ip = 'localhost', default port = '8765')
'''
import socket
import threading
import tkinter as tk
from tkinter.filedialog import askopenfilename
import argparse
import os
# Receive message from the server
def recvMessage(socket):
while True:
try:
msg = socket.recv(4096).decode('utf8')
msg_list.insert(tk.END, msg)
except OSError:
break
# Send message to the server
def sendMessage(event=None):
msg = my_msg.get()
my_msg.set("")
s.send(msg.encode('utf-8'))
if msg == "Gotta go, TTYL!":
s.close()
window.quit()
# Send file to the server
def sendFile(event=None):
file = askopenfilename()
if(len(file) > 0 and os.path.isfile(file)):
print("UI: Selected file: %s" % file)
with open(file, 'rb') as f:
filename = b'sending file name - ' + file.split('/')[-1].encode('utf-8') + b': '
s.send(filename + f.read())
else:
print("UI: File operation canceled")
# Close the window
def hover_close(event=None):
my_msg.set("Gotta go, TTYL!")
sendMessage()
# Main funciton
if __name__ == '__main__':
# Use argparse method
parser = argparse.ArgumentParser()
parser.add_argument('--version', '-v', action='version', version='%(prog)s 1.0')
parser.add_argument('--server_ip', '-ip', nargs='?', default='localhost')
parser.add_argument('--server_port', '-p', nargs='?', default=8765)
parser.add_argument('--username', '-u')
args = parser.parse_args()
# Create socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((args.server_ip , args.server_port))
# Use tkinter
window = tk.Tk()
window.title("ChatRoom/1.0 Connected to: "+ args.server_ip + ": "+str(args.server_port))
messages_frame = tk.Frame(window)
label = tk.Label(window, text = "TYPE <Gotta go, TTYL!> to QUIT", width = 53, font=("Helvetica", 12), fg="Blue", anchor="w")
label.pack()
my_msg = tk.StringVar()
s.send(args.username.encode('utf-8'))
scrollbar = tk.Scrollbar(messages_frame)
msg_list = tk.Listbox(messages_frame, height=25, width=40, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
msg_list.pack(side=tk.LEFT, fill=tk.BOTH)
msg_list.pack()
messages_frame.pack()
entry_field = tk.Entry(window, textvariable=my_msg, width = 30)
entry_field.bind("<Return>", sendMessage)
entry_field.pack(side=tk.LEFT)
send_button = tk.Button(window, text="Send", command=sendMessage, width=5)
send_button.pack(side=tk.LEFT)
file_button = tk.Button(window, text="File", command=sendFile, width = 5)
file_button.pack(side=tk.LEFT)
window.protocol("WM_DELETE_WINDOW", hover_close)
threading.Thread(target=recvMessage, args = (s,)).start()
tk.mainloop()
|
manage.py
|
#!/usr/bin/env python
import os
import sys
import time
import datetime as dt
import requests
import threading
from source.util import unpack_url_tar
from Vizard.settings import TOOL_PATH, CONF_JMETER, CONF_GATLING
def keep_alive():
while True:
if 8 < dt.datetime.utcnow().hour < 22:
print('Send keep alive thread...', end='')
print(requests.get('https://vizardous.herokuapp.com/'))
time.sleep(60 * 5) # sleep 5 minutes
if __name__ == "__main__":
keep_alive_thread = threading.Thread(target=keep_alive)
keep_alive_thread.daemon = True
keep_alive_thread.start()
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Vizard.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
'Couldn\'t import Django. Are you sure it\'s installed and '
'available on your PYTHONPATH environment variable? Did you '
'forget to activate a virtual environment?'
) from exc
if not os.path.exists(CONF_JMETER['executable_path']):
print('Download JMeter...')
unpack_url_tar(CONF_JMETER['download_url'], TOOL_PATH)
# if not os.path.exists(CONF_GATLING['executable_path']):
# print('Download Gatling...')
# unpack_url_zip(CONF_GATLING['download_url'], TOOL_PATH)
execute_from_command_line(sys.argv)
|
cico_transaction_receipt_origin_contract_address.py
|
#!/usr/bin/env python3
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.address import *
import threading
def waitforlogs(node, contract_address):
logs = node.cli.waitforlogs(node.cli.getblockcount()-1, 1000, '{"addresses": ["'+contract_address+'"]}')
node.result = logs
class CicoTransactionReceiptOriginContractAddressTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-logevents', '-txindex']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.node = self.nodes[0]
self.nodes[0].generate(10 + COINBASE_MATURITY)
"""
pragma solidity ^0.5.2;
contract Test {
event TestEvent();
address private child;
function setChildContract(address childContractAddress) external {
child = childContractAddress;
}
function doEvent() external {
if(child == address(0x0)) {
emit TestEvent();
} else {
Test(child).doEvent();
}
}
function getChildAddress() public view returns(address) {
return child;
}
}
"""
"""
Function signatures:
afd67ce7: doEvent()
bcb1c3a9: getChildAddress()
f8d86e18: setChildContract(address)
"""
# Set up a chain of 10 contracts that reference their child contract. I.e. the tenth contract is the leaf
contracts = []
contract_bytecode = "608060405234801561001057600080fd5b506102b8806100206000396000f3fe608060405234801561001057600080fd5b506004361061005e576000357c010000000000000000000000000000000000000000000000000000000090048063afd67ce714610063578063bcb1c3a91461006d578063f8d86e18146100b7575b600080fd5b61006b6100fb565b005b610075610220565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6100f9600480360360208110156100cd57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610249565b005b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff161415610182577f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405160405180910390a161021e565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663afd67ce76040518163ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401600060405180830381600087803b15801561020757600080fd5b5060325a03f115801561021957600080fd5b505050505b565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505056fea165627a7a723058203cf61a18e40f6e2bd01b2f7bd607c6e6aff032f12bd5e3eca68212d2e2c80dbf0029"
for i in range(10):
contracts.append(self.nodes[0].createcontract(contract_bytecode)['address'])
self.node.generate(1)
if len(contracts) > 1:
self.node.sendtocontract(contracts[-2], "f8d86e18" + (contracts[-1].zfill(64)), 0, 1000000)
self.node.generate(1)
# Run the doEvent function recursively starting at the root contract and make sure that no event entries is in the returndata for waitforlogs for the first 9 contracts
for contract_address in contracts[:-1]:
thread = threading.Thread(target=waitforlogs, args=(self.node, contract_address))
thread.start()
txid = self.node.sendtocontract(contracts[0], "afd67ce7", 0, 1000000)['txid']
self.node.generate(7)
thread.join()
receipt = self.node.gettransactionreceipt(txid)
assert_equal(receipt[0]['log'][0]['address'], contracts[-1])
assert_equal(len(self.node.result['entries']), 0)
# Do the same thing again but make sure that the event triggers for the "leaf" (10th) contract
thread = threading.Thread(target=waitforlogs, args=(self.node, contracts[-1]))
thread.start()
txid = self.node.sendtocontract(contracts[0], "afd67ce7", 0, 1000000)['txid']
self.node.generate(7)
thread.join()
receipt = self.node.gettransactionreceipt(txid)
assert_equal(receipt[0]['log'][0]['address'], contracts[-1])
assert_equal(len(self.node.result['entries']), 1)
if __name__ == '__main__':
CicoTransactionReceiptOriginContractAddressTest().main()
|
helper_funcs.py
|
import contextlib
import io
import threading
from typing import Any, Callable, Iterable, Union
def null_fn() -> None:
return None
def check_null_fn(fn: Union[Callable[[], None], None]) -> Callable[[], None]:
if fn is None:
return null_fn
return fn
def check_iterable(s: Union[str, Iterable[str]]) -> None:
if isinstance(s, str):
return [s]
return s
def start_daemon_thread(target, args: Union[Iterable[Any], None] = None) -> threading.Thread:
if not args:
args = []
thread = threading.Thread(target=target, args=args)
thread.setDaemon(True)
thread.start()
return thread
class NullIO(io.StringIO):
def write(self, txt: str) -> None:
pass
def silent_stderr(fn):
"""Decorator to silence stderr output of functions."""
def silent_fn(*args, **kwargs):
with contextlib.redirect_stderr(NullIO()):
return fn(*args, **kwargs)
return silent_fn
|
_exposition.py
|
# Copyright 2015-2019 Prometheus Python Client Developers
# Copyright 2019 Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based off `prometheus_client/exposition.py` from version 0.7.1.
Due to the renaming of metrics in prometheus_client 0.4.0, this customised
vendoring of the code will emit both the old versions that Synapse dashboards
expect, and the newer "best practice" version of the up-to-date official client.
"""
import math
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from typing import Dict, List
from urllib.parse import parse_qs, urlparse
from prometheus_client import REGISTRY
from twisted.web.resource import Resource
from synapse.util import caches
CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
INF = float("inf")
MINUS_INF = float("-inf")
def floatToGoString(d):
d = float(d)
if d == INF:
return "+Inf"
elif d == MINUS_INF:
return "-Inf"
elif math.isnan(d):
return "NaN"
else:
s = repr(d)
dot = s.find(".")
# Go switches to exponents sooner than Python.
# We only need to care about positive values for le/quantile.
if d > 0 and dot > 6:
mantissa = f"{s[0]}.{s[1:dot]}{s[dot + 1 :]}".rstrip("0.")
return f"{mantissa}e+0{dot - 1}"
return s
def sample_line(line, name):
if line.labels:
labelstr = "{{{0}}}".format(
",".join(
[
'{}="{}"'.format(
k,
v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""),
)
for k, v in sorted(line.labels.items())
]
)
)
else:
labelstr = ""
timestamp = ""
if line.timestamp is not None:
# Convert to milliseconds.
timestamp = f" {int(float(line.timestamp) * 1000):d}"
return "{}{} {}{}\n".format(name, labelstr, floatToGoString(line.value), timestamp)
def generate_latest(registry, emit_help=False):
# Trigger the cache metrics to be rescraped, which updates the common
# metrics but do not produce metrics themselves
for collector in caches.collectors_by_name.values():
collector.collect()
output = []
for metric in registry.collect():
if not metric.samples:
# No samples, don't bother.
continue
mname = metric.name
mnewname = metric.name
mtype = metric.type
# OpenMetrics -> Prometheus
if mtype == "counter":
mnewname = mnewname + "_total"
elif mtype == "info":
mtype = "gauge"
mnewname = mnewname + "_info"
elif mtype == "stateset":
mtype = "gauge"
elif mtype == "gaugehistogram":
mtype = "histogram"
elif mtype == "unknown":
mtype = "untyped"
# Output in the old format for compatibility.
if emit_help:
output.append(
"# HELP {} {}\n".format(
mname,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append(f"# TYPE {mname} {mtype}\n")
om_samples: Dict[str, List[str]] = {}
for s in metric.samples:
for suffix in ["_created", "_gsum", "_gcount"]:
if s.name == metric.name + suffix:
# OpenMetrics specific sample, put in a gauge at the end.
# (these come from gaugehistograms which don't get renamed,
# so no need to faff with mnewname)
om_samples.setdefault(suffix, []).append(sample_line(s, s.name))
break
else:
newname = s.name.replace(mnewname, mname)
if ":" in newname and newname.endswith("_total"):
newname = newname[: -len("_total")]
output.append(sample_line(s, newname))
for suffix, lines in sorted(om_samples.items()):
if emit_help:
output.append(
"# HELP {}{} {}\n".format(
metric.name,
suffix,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append(f"# TYPE {metric.name}{suffix} gauge\n")
output.extend(lines)
# Get rid of the weird colon things while we're at it
if mtype == "counter":
mnewname = mnewname.replace(":total", "")
mnewname = mnewname.replace(":", "_")
if mname == mnewname:
continue
# Also output in the new format, if it's different.
if emit_help:
output.append(
"# HELP {} {}\n".format(
mnewname,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append(f"# TYPE {mnewname} {mtype}\n")
for s in metric.samples:
# Get rid of the OpenMetrics specific samples (we should already have
# dealt with them above anyway.)
for suffix in ["_created", "_gsum", "_gcount"]:
if s.name == metric.name + suffix:
break
else:
output.append(
sample_line(s, s.name.replace(":total", "").replace(":", "_"))
)
return "".join(output).encode("utf-8")
class MetricsHandler(BaseHTTPRequestHandler):
"""HTTP handler that gives metrics from ``REGISTRY``."""
registry = REGISTRY
def do_GET(self):
registry = self.registry
params = parse_qs(urlparse(self.path).query)
if "help" in params:
emit_help = True
else:
emit_help = False
try:
output = generate_latest(registry, emit_help=emit_help)
except Exception:
self.send_error(500, "error generating metric output")
raise
self.send_response(200)
self.send_header("Content-Type", CONTENT_TYPE_LATEST)
self.send_header("Content-Length", str(len(output)))
self.end_headers()
self.wfile.write(output)
def log_message(self, format, *args):
"""Log nothing."""
@classmethod
def factory(cls, registry):
"""Returns a dynamic MetricsHandler class tied
to the passed registry.
"""
# This implementation relies on MetricsHandler.registry
# (defined above and defaulted to REGISTRY).
# As we have unicode_literals, we need to create a str()
# object for type().
cls_name = str(cls.__name__)
MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry})
return MyMetricsHandler
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
"""Thread per request HTTP server."""
# Make worker threads "fire and forget". Beginning with Python 3.7 this
# prevents a memory leak because ``ThreadingMixIn`` starts to gather all
# non-daemon threads in a list in order to join on them at server close.
# Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the
# same as Python 3.7's ``ThreadingHTTPServer``.
daemon_threads = True
def start_http_server(port, addr="", registry=REGISTRY):
"""Starts an HTTP server for prometheus metrics as a daemon thread"""
CustomMetricsHandler = MetricsHandler.factory(registry)
httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
class MetricsResource(Resource):
"""
Twisted ``Resource`` that serves prometheus metrics.
"""
isLeaf = True
def __init__(self, registry=REGISTRY):
self.registry = registry
def render_GET(self, request):
request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii"))
response = generate_latest(self.registry)
request.setHeader(b"Content-Length", str(len(response)))
return response
|
run_unittests.py
|
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import urllib.error
import urllib.request
import zipfile
import hashlib
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.dependencies.base
import mesonbuild.compilers
import mesonbuild.envconfig
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, python_command, version_compare, split_args,
quote_arg
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
import mesonbuild.dependencies.base
from mesonbuild.build import Target
import mesonbuild.modules.pkgconfig
from mesonbuild.mtest import TAPParser, TestResult
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
URLOPEN_TIMEOUT = 5
@contextmanager
def chdir(path: str):
curdir = os.getcwd()
os.chdir(path)
yield
os.chdir(curdir)
def get_dynamic_section_entry(fname, entry):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return m.group(1)
return None # The file did not contain the specified entry.
def get_soname(fname):
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname):
return get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def is_pull():
# Travis
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return True
# Azure
if 'SYSTEM_PULLREQUEST_ISFORK' in os.environ:
return True
return False
def _git_init(project_dir):
subprocess.check_call(['git', 'init'], cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if feature not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), 'unknown version')
self.assertEqual(searchfunc('2016.10.128'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
cc = mesonbuild.compilers.CCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock())
# Test that empty initialization works
a = cargsfunc(cc)
self.assertEqual(a, [])
# Test that list initialization works
a = cargsfunc(cc, ['-I.', '-I..'])
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cargsfunc(cc, ['-I.', '-I.']), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
self.assertEqual([1], listify(holder1, unholder=True))
self.assertEqual([1], listify([holder1], unholder=True))
self.assertEqual([1, 2], listify([holder1, 2], unholder=True))
self.assertEqual([1, 2, 3], listify([holder1, 2, [holder3]], unholder=True))
# Unholding doesn't work recursively when not flattening
self.assertEqual([1, [2], [holder3]], listify([holder1, [2], [holder3]], unholder=True, flatten=False))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True))
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True, pop=True))
self.assertEqual(kwargs, {})
# Test listification
kwargs = {'sources': [1, 2, 3], 'pch_sources': [4, 5, 6]}
self.assertEqual([[1, 2, 3], [4, 5, 6]], extract(kwargs, 'sources', 'pch_sources'))
def test_pkgconfig_module(self):
class Mock:
pass
dummystate = Mock()
dummystate.subproject = 'dummy'
mock = Mock()
mock.pcdep = Mock()
mock.pcdep.name = "some_name"
mock.version_reqs = []
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_libs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_reqs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc.find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.compiler_options.host['c_link_args'] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, '', ''
if args[0] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix()), ''
if args[0] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix()), ''
if args[0] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix()), ''
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(deps, expdeps)
def test_log_once(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once('foo')
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual, 'foo', actual)
def test_log_once_ansi(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
f.truncate()
mesonbuild.mlog.warning('bar', once=True)
mesonbuild.mlog.warning('bar', once=True)
actual = f.getvalue().strip()
self.assertEqual(actual.count('bar'), 1, actual)
def test_sort_libpaths(self):
sort_libpaths = mesonbuild.dependencies.base.sort_libpaths
self.assertEqual(sort_libpaths(
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/local/lib', '/home/mesonuser/.local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/libdata/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
def test_dependency_factory_order(self):
b = mesonbuild.dependencies.base
with tempfile.TemporaryDirectory() as tmpdir:
with chdir(tmpdir):
env = get_fake_env()
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.PKGCONFIG, b.DependencyMethods.CMAKE]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['pkgconfig', 'cmake'])
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.CMAKE, b.DependencyMethods.PKGCONFIG]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['cmake', 'pkgconfig'])
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options().keys():
self.assertIn(opt, md)
for opt in comp.base_options:
self.assertIn(opt, md)
self.assertNotIn('b_unknown', md)
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = list(re.finditer(r"^## (.+)$", md, re.MULTILINE)) + [None]
for s1, s2 in zip(sections[:], sections[1:]):
if s1.group(1) == "Universal options":
# Extract the content for this section
end = s2.start() if s2 is not None else len(md)
content = md[s1.end():end]
subsections = list(re.finditer(r"^### (.+)$", content, re.MULTILINE)) + [None]
for sub1, sub2 in zip(subsections[:], subsections[1:]):
if sub1.group(1) == "Directories" or sub1.group(1) == "Core options":
# Extract the content for this subsection
sub_end = sub2.start() if sub2 is not None else len(content)
subcontent = content[sub1.end():sub_end]
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) .* \|", subcontent, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(len(found_entries & arches), 0)
found_entries |= arches
break
self.assertEqual(found_entries, set([
*mesonbuild.coredata.builtin_options.keys(),
*mesonbuild.coredata.builtin_options_per_machine.keys()
]))
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
sections = list(re.finditer(r"^## (.+)$", md, re.MULTILINE))
for s1, s2 in zip(sections[::2], sections[1::2]):
if s1.group(1) == "CPU families":
# Extract the content for this section
content = md[s1.end():s2.start()]
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
@unittest.skipIf(is_pull(), 'Skipping because this is a pull request')
def test_json_grammar_syntax_highlighting(self):
'''
Ensure that syntax highlighting JSON grammar written by TingPing was
updated for new functions in the global namespace in build files.
https://github.com/TingPing/language-meson/
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
url = 'https://raw.githubusercontent.com/TingPing/language-meson/master/grammars/meson.json'
try:
# Use a timeout to avoid blocking forever in case the network is
# slow or unavailable in a weird way
r = urllib.request.urlopen(url, timeout=URLOPEN_TIMEOUT)
except urllib.error.URLError as e:
# Skip test when network is not available, such as during packaging
# by a distro or Flatpak
if not isinstance(e, urllib.error.HTTPError):
raise unittest.SkipTest('Network unavailable')
# Don't fail the test if github is down, but do fail if 4xx
if e.code >= 500:
raise unittest.SkipTest('Server error ' + str(e.code))
raise e
# On Python 3.5, we must decode bytes to string. Newer versions don't require that.
grammar = json.loads(r.read().decode('utf-8', 'surrogatepass'))
for each in grammar['patterns']:
if 'name' in each and each['name'] == 'support.function.builtin.meson':
# The string is of the form: (?x)\\b(func1|func2|...\n)\\b\\s*(?=\\() and
# we convert that to [func1, func2, ...] without using regex to parse regex
funcs = set(each['match'].split('\\b(')[1].split('\n')[0].split('|'))
if 'name' in each and each['name'] == 'support.variable.meson':
# \\b(builtin1|builtin2...)\\b
builtin = set(each['match'].split('\\b(')[1].split(')\\b')[0].split('|'))
self.assertEqual(builtin, set(interp.builtin.keys()))
self.assertEqual(funcs, set(interp.funcs.keys()))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
class BasePlatformTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.maxDiff = None
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
self.prefix = '/usr'
self.libdir = 'lib'
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix,
'--libdir', self.libdir]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args)
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
try:
run_mtest_inprocess(['-C', self.builddir])
finally:
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
clre = re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE)
linkre = re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE)
self.assertNotRegex(ret, clre)
self.assertNotRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
self.init(testdir, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
# on Windows, /someabs is *not* an absolute path
prefix = 'x:/someabs' if is_windows() else '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
if is_windows():
args = ['--prefix', 'x:/opt', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
if is_windows():
args = ['--prefix', 'x:/usr', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
if is_windows():
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=x:/opt', False)
else:
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
if mesonbuild.mesonlib.default_prefix() == '/usr/local':
expected[None] = expected['/usr/local']
for prefix in expected:
args = []
if prefix:
args += ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '168 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir)
self.run_target('check_exists')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officially in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '144 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
logged = list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
for name in logged:
self.assertTrue(name in expected, 'Log contains extra entry {}'.format(name))
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, 'Log is missing entry for {}'.format(name))
self.assertLess(count, 2, 'Log has multiple entries for {}'.format(name))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '63 test env does not stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt')) as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt')) as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt')) as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '133 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
testdir = os.path.join(self.common_test_dir, '134 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# target private dir
someexe_id = Target.construct_id_from_path("sub4", "someexe", "@exe")
self.assertPathEqual(incs[0], "-I" + os.path.join("sub4", someexe_id))
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
self.assertPathEqual(incs[0], '-Isomefxe@exe')
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows() and platform.machine().lower() != 'e2k':
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
wrappercc_s = ''
for w in wrappercc:
wrappercc_s += quote_arg(w) + ' '
os.environ[evar] = wrappercc_s
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
wrapperlinker_s = ''
for w in wrapperlinker:
wrapperlinker_s += quote_arg(w) + ' '
os.environ['AR'] = wrapperlinker_s
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '137 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '136 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = '-D{}="{}"'.format(define, value)
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'.format(define)
self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '113 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertRebuiltTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '60 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertRebuiltTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '94 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def test_dist_hg(self):
if not shutil.which('hg'):
raise unittest.SkipTest('Mercurial not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init, include_subprojects=False)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def create_dummy_subproject(self, project_dir, name):
path = os.path.join(project_dir, 'subprojects', name)
os.makedirs(path)
with open(os.path.join(path, 'meson.build'), 'w') as ofile:
ofile.write("project('{}')".format(name))
return path
def dist_impl(self, vcs_init, include_subprojects=True):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write('''project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
subproject('vcssub', required : false)
subproject('tarballsub', required : false)
''')
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write('''#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
''')
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
if include_subprojects:
vcs_init(self.create_dummy_subproject(project_dir, 'vcssub'))
self.create_dummy_subproject(project_dir, 'tarballsub')
self.create_dummy_subproject(project_dir, 'unusedsub')
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
if include_subprojects:
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c']),
sorted(z.namelist()))
self._run(self.meson_command + ['dist', '--formats', 'zip', '--include-subprojects'],
workdir=self.builddir)
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/subprojects/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c',
'disttest-1.4.3/subprojects/tarballsub/',
'disttest-1.4.3/subprojects/vcssub/',
'disttest-1.4.3/subprojects/tarballsub/meson.build',
'disttest-1.4.3/subprojects/vcssub/meson.build']),
sorted(z.namelist()))
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '42 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '154 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '154 reserved targets')
targets = mesonbuild.coredata.forbidden_target_names
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix()]
self.assertEqual(foo_dep.get_compile_args(), cargs)
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '43 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'], cwd=workdir)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
langs = ['c']
env = get_fake_env()
try:
env.detect_cpp_compiler(MachineChoice.HOST)
langs.append('cpp')
except EnvironmentException:
pass
try:
env.detect_cs_compiler(MachineChoice.HOST)
langs.append('cs')
except EnvironmentException:
pass
try:
env.detect_d_compiler(MachineChoice.HOST)
langs.append('d')
except EnvironmentException:
pass
try:
env.detect_java_compiler(MachineChoice.HOST)
langs.append('java')
except EnvironmentException:
pass
try:
env.detect_cuda_compiler(MachineChoice.HOST)
langs.append('cuda')
except EnvironmentException:
pass
try:
env.detect_fortran_compiler(MachineChoice.HOST)
langs.append('fortran')
except EnvironmentException:
pass
try:
env.detect_objc_compiler(MachineChoice.HOST)
langs.append('objc')
except EnvironmentException:
pass
try:
env.detect_objcpp_compiler(MachineChoice.HOST)
langs.append('objcpp')
except EnvironmentException:
pass
# FIXME: omitting rust as Windows AppVeyor CI finds Rust but doesn't link correctly
for lang in langs:
for target_type in ('executable', 'library'):
# test empty directory
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
# test directory with existing code file
if lang in ('c', 'cpp'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main(void) {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
# The test uses mocking and thus requires that
# the current process is the one to run the Meson steps.
# If we are using an external test executable (most commonly
# in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
if is_sunos():
cc = 'gcc'
else:
cc = 'cc'
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = textwrap.dedent("""\
[binaries]
c = '/usr/bin/{}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""".format(cc))
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '177 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '186 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('''WARNING: target links against shared modules. This is not
recommended as it is not supported on some platforms''')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform paths passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises(subprocess.CalledProcessError) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('passed as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'static')
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.assertEqual(obj.user_options['set_sub_opt'].value, True)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['-Dfoo'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf('-Dfoo')
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c_args'].value, ['foo bar', 'one', 'two'])
self.wipe()
self.init(testdir, extra_args=['-Dset_percent_opt=myoption%'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['set_percent_opt'].value, 'myoption%')
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'bar')
self.assertEqual(obj.builtins['buildtype'].value, 'release')
self.assertEqual(obj.base_options['b_sanitize'].value, 'thread')
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'foo')
self.assertEqual(obj.builtins['buildtype'].value, 'plain')
self.assertEqual(obj.base_options['b_sanitize'].value, 'address')
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '214 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targeting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targeting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targeting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'plain')
self.assertEqual(opts['optimization'], '0')
# Setting optimizations to 3 should cause buildtype
# to go to release mode.
self.setconf('-Doptimization=3')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'release')
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['optimization'], '3')
# Going to debug build type should reset debugging
# and optimization
self.setconf('-Dbuildtype=debug')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '0')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = r'{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_pkg_config_libdir(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = 'pkg-config'
[properties]
pkg_config_libdir = [r'{0}']
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '161 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '35 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '43 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '46 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '102 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': 'undefined'
}
]
}
self.assertDictEqual(res, expected)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '78 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '78 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-tidy is for now only supported on Ninja, not {}'.format(self.backend.name))
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '70 clang-tidy')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '71 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj):
for i in key_type_list:
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
('protocol', str),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('version', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
for idx, i in enumerate(res1):
if i['name'] == 'cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'buildtype':
res1[idx]['value'] = 'release'
if i['name'] == 'optimization':
res1[idx]['value'] = '3'
if i['name'] == 'debug':
res1[idx]['value'] = False
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'version': ['>=1.2.3'],
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'version': ['>=1.0.0', '<=99.9.9'],
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '65 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '66 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
def test_summary(self):
testdir = os.path.join(self.unit_test_dir, '72 summary')
out = self.init(testdir)
expected = textwrap.dedent(r'''
Some Subproject 2.0
string: bar
integer: 1
boolean: True
My Project 1.0
Configuration
Some boolean: False
Another boolean: True
Some string: Hello World
A list: string
1
True
empty list:
A number: 1
yes: YES
no: NO
Subprojects
sub: YES
sub2: NO
''')
expected_lines = expected.split('\n')[1:]
out_start = out.find(expected_lines[0])
out_lines = out[out_start:].split('\n')[:len(expected_lines)]
if sys.version_info < (3, 7, 0):
# Dictionary order is not stable in Python <3.7, so sort the lines
# while comparing
self.assertEqual(sorted(expected_lines), sorted(out_lines))
else:
self.assertEqual(expected_lines, out_lines)
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "[Vv]ersion.*string or list"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
"(BOOST_ROOT.*absolute|{})".format(self.dnf),
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. The correct message is outputted when the .wrap file is missing for
a sub-subproject.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Subproject directory not found and .*nosubproj.wrap.* file not found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'WARNING:.* Dependency .*subsubproject.* not found but it is available in a sub-subproject.')
self.assertRegex(out, r'Subproject directory not found and .*subsubproject.wrap.* file not found')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
def test_assert_default_message(self):
self.assertMesonRaises("k1 = 'a'\n" +
"assert({\n" +
" k1: 1,\n" +
"}['a'] == 2)\n",
r"Assert failed: {k1 : 1}\['a'\] == 2")
def test_wrap_nofallback(self):
self.assertMesonRaises("dependency('notfound', fallback : ['foo', 'foo_dep'])",
r"Dependency \'notfound\' not found and fallback is disabled",
extra_args=['--wrap-mode=nofallback'])
def test_message(self):
self.assertMesonOutputs("message('Array:', ['a', 'b'])",
r"Message:.* Array: \['a', 'b'\]")
def test_warning(self):
self.assertMesonOutputs("warning('Array:', ['a', 'b'])",
r"WARNING:.* Array: \['a', 'b'\]")
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# Finding a script in PATH w/o extension works and adds the interpreter
# (check only if `.PY` is in PATHEXT)
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Ensure that WindowsApps gets removed from PATH
path = os.environ['PATH']
if 'WindowsApps' not in path:
username = os.environ['USERNAME']
appstore_dir = r'C:\Users\{}\AppData\Local\Microsoft\WindowsApps'.format(username)
path = os.pathsep + appstore_dir
path = ExternalProgram._windows_sanitize_path(path)
self.assertNotIn('WindowsApps', path)
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
def _check_ld(self, name: str, lang: str, expected: str) -> None:
if not shutil.which(name):
raise unittest.SkipTest('Could not find {}.'.format(name))
envvar = mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
try:
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('Could not find a compiler for {}'.format(lang))
self.assertEqual(comp.linker.id, expected)
def test_link_environment_variable_lld_link(self):
self._check_ld('lld-link', 'c', 'lld-link')
def test_link_environment_variable_link(self):
self._check_ld('link', 'c', 'link')
def test_link_environment_variable_optlink(self):
self._check_ld('optlink', 'c', 'optlink')
def test_link_environment_variable_rust(self):
self._check_ld('link', 'rust', 'link')
def test_pefile_checksum(self):
try:
import pefile
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('pefile module not found')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir)
self.build()
# Test that binaries have a non-zero checksum
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
cc_id = cc.get_id()
ld_id = cc.get_linker_id()
dll = glob(os.path.join(self.builddir, '*mycpplib.dll'))[0]
exe = os.path.join(self.builddir, 'cppprog.exe')
for f in (dll, exe):
pe = pefile.PE(f)
msg = 'PE file: {!r}, compiler: {!r}, linker: {!r}'.format(f, cc_id, ld_id)
if cc_id == 'clang-cl':
# Latest clang-cl tested (7.0) does not write checksums out
self.assertFalse(pe.verify_checksum(), msg=msg)
else:
# Verify that a valid checksum was written by all other compilers
self.assertTrue(pe.verify_checksum(), msg=msg)
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '152 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
os.environ
env = {
'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2]),
'PKG_CONFIG_SYSTEM_LIBRARY_PATH': '/usr/lib',
}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
def test_pkgconfig_uninstalled(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
self.build()
os.environ['PKG_CONFIG_LIBDIR'] = os.path.join(self.builddir, 'meson-uninstalled')
if is_cygwin():
os.environ['PATH'] += os.pathsep + self.builddir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir)
self.build()
self.run_tests()
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(glob(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(glob(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(glob(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(glob(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(glob(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '39 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '39 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir, compiler, p: str):
lang_std = p + '_std'
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
for v in compiler.get_options()[lang_std].choices:
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
std_opt = '{}={}'.format(lang_std, v)
self.init(testdir, extra_args=['-D' + std_opt])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print('{} was {!r}'.format(lang_std, v))
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if p == 'c':
env_flag_name = 'CFLAGS'
elif p == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError('Language {} not defined.'.format(p))
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc, 'c')
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp, 'cpp')
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir, extra_args='--unity=subprojects')
simpletest_id = Target.construct_id_from_path('subprojects/sublib', 'simpletest', '@exe')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', simpletest_id, 'simpletest-unity.c'))
sublib_id = Target.construct_id_from_path('subprojects/sublib', 'sublib', '@sha')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', sublib_id, 'sublib-unity.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '195 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
# C program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
# C++ program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_coverage(self):
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found')
if not shutil.which('genhtml') and not gcovr_new_rootdir:
raise unittest.SkipTest('genhtml not found and gcovr is too old')
if 'clang' in os.environ.get('CC', ''):
# We need to use llvm-cov instead of gcovr with clang
raise unittest.SkipTest('Coverage does not work with clang right now, help wanted!')
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
crossfile.write(textwrap.dedent('''\
[binaries]
c = '/usr/bin/{1}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
sometool.py = ['{0}']
someothertool.py = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
''').format(os.path.join(testdir, 'some_cross_tool.py'),
'gcc' if is_sunos() else 'cc'))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({'pkg_config_path': pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '67 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.common_test_dir, '201 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not is_osx():
# Rest of the workflow only works on macOS
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
curdir = os.getcwd()
with chdir(subdir):
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('62 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
crossfile = tempfile.NamedTemporaryFile(mode='w')
env = {'CC': '"' + os.path.join(testdir, 'build_wrapper.py') + '"'}
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '68 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = ['-Dc_link_args=-L{}'.format(libdir),
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '68 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def _check_ld(self, check: str, name: str, lang: str, expected: str) -> None:
if is_sunos():
raise unittest.SkipTest('Solaris currently cannot override the linker.')
if not shutil.which(check):
raise unittest.SkipTest('Could not find {}.'.format(check))
envvar = mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
if lang != 'rust' and comp.use_linker_args('foo') == []:
raise unittest.SkipTest(
'Compiler {} does not support using alternative linkers'.format(comp.id))
self.assertEqual(comp.linker.id, expected)
def test_ld_environment_variable_bfd(self):
self._check_ld('ld.bfd', 'bfd', 'c', 'GNU ld.bfd')
def test_ld_environment_variable_gold(self):
self._check_ld('ld.gold', 'gold', 'c', 'GNU ld.gold')
def test_ld_environment_variable_lld(self):
self._check_ld('ld.lld', 'lld', 'c', 'lld')
@skipIfNoExecutable('rustc')
def test_ld_environment_variable_rust(self):
self._check_ld('ld.gold', 'gold', 'rust', 'GNU ld.gold')
def test_ld_environment_variable_cpp(self):
self._check_ld('ld.gold', 'gold', 'cpp', 'GNU ld.gold')
def test_ld_environment_variable_objc(self):
self._check_ld('ld.gold', 'gold', 'objc', 'GNU ld.gold')
def test_ld_environment_variable_objcpp(self):
self._check_ld('ld.gold', 'gold', 'objcpp', 'GNU ld.gold')
@skipIfNoExecutable('gfortran')
def test_ld_environment_variable_fortran(self):
self._check_ld('ld.gold', 'gold', 'fortran', 'GNU ld.gold')
def compute_sha256(self, filename):
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def test_wrap_with_file_url(self):
testdir = os.path.join(self.unit_test_dir, '73 wrap file url')
source_filename = os.path.join(testdir, 'subprojects', 'foo.tar.xz')
patch_filename = os.path.join(testdir, 'subprojects', 'foo-patch.tar.xz')
wrap_filename = os.path.join(testdir, 'subprojects', 'foo.wrap')
source_hash = self.compute_sha256(source_filename)
patch_hash = self.compute_sha256(patch_filename)
wrap = textwrap.dedent("""\
[wrap-file]
directory = foo
source_url = file://{}
source_filename = foo.tar.xz
source_hash = {}
patch_url = file://{}
patch_filename = foo-patch.tar.xz
patch_hash = {}
""".format(source_filename, source_hash, patch_filename, patch_hash))
with open(wrap_filename, 'w') as f:
f.write(wrap)
self.init(testdir)
self.build()
self.run_tests()
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'packagecache'))
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'foo'))
os.unlink(wrap_filename)
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BasePlatformTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BasePlatformTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*target.*use-exe-wrapper'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*run target.*run-prog'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*PATH'):
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
f.write(r'@{} {} %*'.format(sys.executable, filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
if os.path.exists('/etc/debian_version'):
rc = subprocess.call(['pkg-config', '--cflags', 'python2'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc != 0:
# Python 2 will be removed in Debian Bullseye, thus we must
# remove the build dependency on python2-dev. Keep the tests
# but only run them if dev packages are available.
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', 'python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang, binary, version_str, version):
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = wrapper
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = wrapper
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functionality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser(io.StringIO(s))
return iter(parser.parse())
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, count=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, count=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, count=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
def _clang_at_least(compiler, minver: str, apple_minver: str) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.cflags_mapping.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def main():
unset_envs()
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
pytest_args = ['-n', 'auto', './run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
# All attempts at locating pytest failed, fall back to plain unittest.
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
raise SystemExit(main())
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import BIP32Node
from electrum import constants
from electrum.i18n import _
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = keepkeylib.transport_hid.DEVICE_IDS
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def create_client(self, device, handler):
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.get_keepkey_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.get_keepkey_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
ib_api.py
|
"""
Module to facilitate trading through Interactive Brokers's API
see: https://interactivebrokers.github.io/tws-api/index.html
Brent Maranzano
Dec. 14, 2018
Classes
IBClient (EClient): Creates a socket to TWS or IBGateway, and handles
sending commands to IB through the socket.
IBWrapper (EWrapper): Hanldes the incoming data from IB. Many of these
methods are callbacks from the request commands.
IBApp (IBWrapper, IBClilent): This provides the main functionality. Many
of the methods are over-rides of the IBWrapper commands to customize
the functionality.
"""
import os.path
import time
import logging
import threading
import json
import numpy as np
import pandas as pd
from datetime import datetime
from ibapi import wrapper
from ibapi.client import EClient
from ibapi.contract import Contract
from ibapi.common import OrderId, ListOfContractDescription, BarData,\
HistogramDataList, TickerId
from ibapi.order import Order
from ibapi.order_state import OrderState
API_THREAD = None
def setup_logger():
"""Setup the logger.
"""
if not os.path.exists("log"):
os.makedirs("log")
time.strftime("pyibapi.%Y%m%d_%H%M%S.log")
recfmt = "(%(threadName)s) %(asctime)s.%(msecs)03d %(levelname)s" \
"%(filename)s:%(lineno)d %(message)s"
timefmt = '%y%m%d_%H:%M:%S'
logging.basicConfig(
filename=time.strftime("log/pyibapi.%y%m%d_%H%M%S.log"),
filemode="w",
level=logging.INFO,
format=recfmt, datefmt=timefmt
)
logger = logging.getLogger()
console = logging.StreamHandler()
console.setLevel(logging.ERROR)
logger.addHandler(console)
logging.debug("now is %s", datetime.now())
class IBClient(EClient):
"""Subclass EClient, which delivers message to the TWS API socket.
"""
def __init__(self, app_wrapper):
EClient.__init__(self, app_wrapper)
class IBWrapper(wrapper.EWrapper):
"""Subclass EWrapper, which translates messages from the TWS API socket
to the program.
"""
def __init__(self):
wrapper.EWrapper.__init__(self)
class HistoricalRequestError(Exception):
"""Exceptions generated during requesting historical stock price data.
"""
def __init__(self, message, errors):
super().__init__(message)
self.errors = errors
self.message = message
class IBApp(IBWrapper, IBClient):
"""Main program class. The TWS calls nextValidId after connection, so
the method is over-ridden to provide an entry point into the program.
class variables:
saved_contracts (dict): keys are symbols, values are dictionaries of
information to uniquely define a contract used for stock trading.
{symbol: {'contract_info_dictionary'}}
saved_orders (dict): keys are order ids, values are Order, Contract
{id: {order: Order, contract: Contract}}
TODO
positions
"""
def __init__(self):
IBWrapper.__init__(self)
IBClient.__init__(self, app_wrapper=self)
self.order_id = None
self.saved_contract_details = {}
self.positions = []
self._contract_details = {}
self._saved_orders = {}
self._open_orders = []
self._historical_data = []
self._historical_data_req_end = False
self._histogram = None
self._load_contracts('contract_file.json')
def error(self, reqId: TickerId, errorCode: int, errorString: str):
"""Overide EWrapper error method.
"""
super().error(reqId, errorCode, errorString)
print(reqId)
def _load_contracts(self, filename):
"""Load saved contracts.
"""
try:
with open(filename, mode='r') as file_obj:
self.saved_contracts = json.load(file_obj)
except FileNotFoundError:
pass
def _save_contracts(self):
"""Save contracts.
"""
with open("contracts.json", mode='a') as file_obj:
json.dump(self._contract_details, file_obj)
def nextValidId(self, orderId: int):
"""Method of EWrapper.
Sets the order_id class variable.
This method is called from after connection completion, so
provides an entry point into the class.
"""
super().nextValidId(orderId)
self.order_id = orderId
return self
def _get_next_order_id(self):
"""Retrieve the current class variable order_id and increment
it by one.
Returns (int) current order_id
"""
# reqIds can be used to update the order_id, if tracking is lost.
# self.reqIds(-1)
current_order_id = self.order_id
self.order_id += 1
return current_order_id
def get_contract_details(self, symbol=None):
"""Find the contract (STK, USD, NYSE|NASDAY.NMS|ARGA) for the symbol.
Upon execution of IB backend, the EWrapper.symbolSamples is called,
which is over-ridden to save the contracts to a class dictionary.
This function then monitors the class dictionary until
the symbol is found and then returns the contract.
Arguments:
symbol (string): Ticker.
Returns: (Contract) Contract for the symbol.
"""
# If the symbol has not already been saved, look it up.
if symbol not in self.saved_contract_details:
self._contract_details = None
# The IB server will call symbolSamples upon completion.
self.reqMatchingSymbols(1001, symbol)
# Loop until the server has completed the request.
while self._contract_details is None:
time.sleep(0.2)
# Select the proper contract
for contract in self._contract_details:
if contract.symbol == symbol and contract.currency == "USD"\
and contract.secType == "STK":
# NYSE stock
if contract.primaryExchange == "NYSE":
break
# Common ETFs
elif contract.primaryExchange == "ARCA":
break
# Nasdaq stock
elif contract.primaryExchange == "NASDAQ.NMS":
contract.primaryExchange = "ISLAND"
break
# Save the contract information needed for defining a Contract.
self.saved_contract_details[symbol] = {
'currency': contract.currency,
'secType': contract.secType,
'exchange': "SMART",
'primaryExchange': contract.primaryExchange,
'secIdType': contract.secIdType,
'secId': contract.secId
}
return self.saved_contract_details[symbol]
def symbolSamples(self, reqId: int,
contractDescriptions: ListOfContractDescription):
"""Callback from reqMatchingSymbols. Add contracts that are of
secType=STK, currency=USD, and primaryExchange=(NYSE | ISLAND) to the
class variable contract_search_results.
"""
super().symbolSamples(reqId, contractDescriptions)
# Add all contracts to the to a list that the calling function can
# access.
contracts = []
for desc in contractDescriptions:
contracts.append(desc.contract)
# is complete.
self._contract_details = contracts
def make_contract(self, symbol):
"""Create a contract for the given symbol.
Arguments:
symbol (str): Ticker symbol
"""
contract_info = self.get_contract_details(symbol)
contract = Contract()
contract.symbol = symbol
contract.currency = contract_info['currency']
contract.exchange = contract_info['exchange']
contract.primaryExchange = contract_info['primaryExchange']
contract.secType = contract_info['secType']
contract.secId = contract_info['secId']
return contract
def get_positions(self):
"""Get the account positions. If the class variable, positions, exists,
return that value, else call the EClient method reqPositions, wait for
a short time and then return the class variable positions.
Returns (dict): Dictionary of the positions information.
"""
self.positions = []
self.reqPositions()
time.sleep(1)
return pd.DataFrame.from_dict(self.positions).set_index('account')
def position(self, account: str, contract: Contract, position: float,
avgCost: float):
super().position(account, contract, position, avgCost)
self.positions.append({
'account': account,
'symbol': contract.symbol,
'secType': contract.secType,
'position': position,
'cost': avgCost
})
def positionEnd(self):
"""Cancel the position subscription after a return.
"""
super().positionEnd()
self.cancelPositions()
def create_bracket_orders(self, req_orders=None):
"""Create orders, but do not place.
Arguments:
req_orders (list): list of dictionaries - keys are:
symbol (str): Equity ticker symbol.
instruction (str): "BUY" | "SELL"
price (float): Order set price.
quantity (float): Order quantity.
outside_rth (bool): outside regular trading hours
tif (str): Time in force "DAY" | "GTC"
profit_price (float): Price for profit taking
stop_price (float): Price for stop loss
parent_id (int): Id of parent trade.
"""
# If only a single contract (dict) is passed convert it
# to a list with a single item.
if not isinstance(req_orders, list):
req_orders = [req_orders]
for req_order in req_orders:
contract = self.make_contract(symbol=req_order['symbol'])
# Create the parent order
order_id = self._get_next_order_id()
parent = Order()
parent.orderId = order_id
parent.action = req_order['instruction']
parent.orderType = "LMT"
parent.totalQuantity = req_order['quantity']
parent.lmtPrice = req_order['price']
parent.outsideRth = req_order['outside_rth']
parent.tif = req_order['tif']
parent.transmit = False
self._saved_orders[order_id] = {
"order": parent, "contract": contract
}
# Create the profit taker order
if req_order['profit_price'] is not None:
order_id = self._get_next_order_id()
profit_taker = Order()
profit_taker.orderId = order_id
profit_taker.action = "SELL"\
if req_order['instruction'] == "BUY" else "BUY"
profit_taker.orderType = "LMT"
profit_taker.totalQuantity = req_order['quantity']
profit_taker.lmtPrice = req_order['profit_price']
profit_taker.parentId = parent.orderId
profit_taker.transmit = False
self._saved_orders[order_id] = {
"order": profit_taker, "contract": contract
}
# Create stop loss order
if req_order['stop_price'] is not None:
order_id = self._get_next_order_id()
stop_loss = Order()
stop_loss.orderId = order_id
stop_loss.action = "SELL"\
if req_order['instruction'] == "BUY" else "BUY"
stop_loss.orderType = "STP"
stop_loss.auxPrice = req_order['stop_price']
stop_loss.totalQuantity = req_order['quantity']
stop_loss.parentId = parent.orderId
stop_loss.transmit = False
self._saved_orders[order_id] = {
"order": stop_loss, "contract": contract
}
def create_trailing_stop_orders(self, req_orders=None):
"""Create a trailing stop order.
Arguments:
req_orders (list): list of dictionaries - keys are:
symbol (str): Equity ticker symbol.
instruction (str): "BUY" | "SELL"
quantity (float): Order quantity.
trail_stop_price (float): Trailing stop price
trail_amount (float): Trailing amount in dollars.
limit_offset (float): Offset of limit price
for sell - limit offset is greater than trailing amount
for buy - limit offset is less than trailing amount
outside_rth (bool): outside regular trading hours
tif (str): Time in force "DAY" | "GTC"
parent_id (int): Id of parent trade.
"""
# If only a single contract (dict) is passed convert it
# to a list with a single item.
if not isinstance(req_orders, list):
req_orders = [req_orders]
for req_order in req_orders:
contract = self.make_contract(symbol=req_order['symbol'])
# Create the order
order_id = self._get_next_order_id()
order = Order()
order.orderId = order_id
order.action = req_order['instruction']
order.orderType = "TRAIL LIMIT"
order.totalQuantity = req_order['quantity']
order.trailStopPrice = req_order['trail_stop_price']
order.auxPrice = req_order['trail_amount']
order.lmtPriceOffset = req_order['limit_offset']
order.outsideRth = req_order['outside_rth']
order.tif = req_order['tif']
order.transmit = False
# TODO parent_id
self._saved_orders[order_id] = {
"order": order, "contract": contract
}
def create_stop_limit_orders(self, req_orders=None):
"""Create a trailing stop order.
Arguments:
req_orders (list): list of dictionaries - keys are:
symbol (str): Equity ticker symbol.
instruction (str): "BUY" | "SELL"
quantity (float): Order quantity.
stop_price (float): stop price
limit_price (float): limit price.
outside_rth (bool): outside regular trading hours
tif (str): Time in force "DAY" | "GTC"
profit_price (float): Profit taking price.
"""
# If only a single contract (dict) is passed convert it
# to a list with a single item.
if not isinstance(req_orders, list):
req_orders = [req_orders]
for req_order in req_orders:
contract = self.make_contract(symbol=req_order['symbol'])
# Create the order
order_id = self._get_next_order_id()
order = Order()
order.orderId = order_id
order.action = req_order['instruction']
order.orderType = "STP LMT"
order.totalQuantity = req_order['quantity']
order.lmtPrice = req_order['limit_price']
order.auxPrice = req_order['stop_price']
order.outsideRth = req_order['outside_rth']
order.tif = req_order['tif']
order.transmit = False
self._saved_orders[order_id] = {
"order": order, "contract": contract
}
# Create the profit taker order
if req_order['profit_price'] is not None:
profit_taker_order_id = self._get_next_order_id()
profit_taker = Order()
profit_taker.orderId = profit_taker_order_id
profit_taker.action = "SELL"\
if req_order['instruction'] == "BUY" else "BUY"
profit_taker.orderType = "LMT"
profit_taker.totalQuantity = req_order['quantity']
profit_taker.lmtPrice = req_order['profit_price']
profit_taker.parentId = order.orderId
profit_taker.transmit = False
self._saved_orders[profit_taker_order_id] = {
"order": profit_taker, "contract": contract
}
def create_pegged_orders(self, req_orders=None):
"""Create a pegged to bench mark order.
Arguments:
req_orders (list): list of dictionaries - keys are:
symbol (str): Equity ticker symbol.
instruction (str): "BUY" | "SELL"
quantity (float): Order quantity.
starting_price (float): Order starting price.
outside_rth (bool): outside regular trading hours
tif (str): Time in force "DAY" | "GTC"
peg_change_amount (float): Change of price for the target
ref_change_amount (float): Change of price of the reference
ref_contract_id (int): Contract ID of the reference
SPY: ConID: 756733, exchange: ARCA
QQQ: ConID: 320227571, exchange: NASDAQ
ref_exchange (str): Exchange of the reference
ref_price (float): Start price of the reference
ref_lower_price (float): Lower ref price allowed
ref_upper_price (float): Upper ref price allowed
"""
# If only a single contract (dict) is passed convert it
# to a list with a single item.
if not isinstance(req_orders, list):
req_orders = [req_orders]
for req_order in req_orders:
contract = self.make_contract(symbol=req_order['symbol'])
# Create the parent order
order_id = self._get_next_order_id()
order = Order()
order.orderId = order_id
order.orderType = "PEG BENCH"
order.action = req_order['instruction']
order.totalQuantity = req_order['quantity']
order.startingPrice = req_order['starting_price']
order.isPeggedChangeAmountDecrease = False
order.peggedChangeAmount = req_order['peg_change_amount']
order.referenceChangeAmount = req_order['ref_change_amount']
order.referenceContractId = req_order['ref_contract_id']
order.referenceExchange = req_order['ref_exchange']
order.stockRefPrice = req_order['ref_price']
order.stockRangeLower = req_order['ref_lower_price']
order.stockRangeUpper = req_order['ref_upper_price']
order.transmit = False
self._saved_orders[order_id] = {
"order": order, "contract": contract
}
def get_saved_orders(self, symbol=None):
"""Return saved orders for symbol. If symbol is None
return all saved orders.
Returns (dict) {order_id: {order: order, contract: contract}}
"""
if symbol is None:
return self._saved_orders
orders = dict()
for oid, order in self._saved_orders.items():
if order['contract'].symbol == symbol:
orders[oid] = order
return orders
def place_order(self, order_id=None):
"""Place a saved order. from a previously created saved order with
order_id.
Arguments:
order_id (int): The order_id of a previously created order.
"""
if order_id in self._saved_orders:
self.placeOrder(order_id, self._saved_orders[order_id]['contract'],
self._saved_orders[order_id]['order'])
del self._saved_orders[order_id]
def place_all_orders(self):
"""Place all the saved orders.
"""
order_ids = list(self._saved_orders.keys())
for order_id in order_ids:
self.place_order(order_id=order_id)
def get_open_orders(self):
"""Call the IBApi.EClient reqOpenOrders. Open orders are returned via
the callback openOrder.
"""
self.reqOpenOrders()
def openOrder(self, orderId: OrderId, contract: Contract, order: Order,
orderState: OrderState):
"""Callback from reqOpenOrders(). Method is over-ridden from the
EWrapper class.
"""
super().openOrder(orderId, contract, order, orderState)
self._open_orders.append({
'order_id': orderId,
'contract': contract,
'order': order
})
def get_quotes(self, symbols=None):
"""Get a quote for the symbol. Callsback to
Warning: This may incur fees!
Arguments:
symbols (str|list): Equity ticker symbol or list of ticker symbols.
Returns (Panda Series): Last trade price for the symbols.
"""
# If only a single symbol is passed convert it
# to a list with a single item.
if isinstance(symbols, str):
symbols = [symbols]
# Get the bar data for each symbol
quotes = pd.Series(index=symbols)
for symbol in symbols:
quote = self._req_historical_data(
symbol,
end_date="",
duration="2 D",
size="1 min",
info="TRADES",
rth=False
)
quotes[symbol] = float(quote.iloc[-1]['close_price'])
return quotes
def get_price_history(self, symbols=None, start_date=None, end_date=None,
bar_size="1 day", rth=False):
"""Get the price history for symbols.
Arguments:
symbols (str|list): Equity ticker symbol or list of ticker symbols.
start_date (datetime): First date for data retrieval.
end_date (datetime): Last data for data retrieval.
bar_size (str): Bar size (e.g. "1 min", "1 day", "1 month")
for valid strings see:
http://interactivebrokers.github.io/tws-api/historical_bars.html
rth (bool): True to only return data within regular trading hours.
return (pandas.DataFrame): Price history data.
"""
if end_date is None:
end_date = datetime.today()
# If only a single symbol is passed convert it
# to a list with a single item.
if isinstance(symbols, str):
symbols = [symbols]
# Estimate a duration string for the given date span.
# TODO fix duration of seconds
duration = end_date - start_date
if duration.days >= 365:
duration = "{} Y".format(int(duration.days/365))
elif duration.days < 365 and duration.days > 1:
duration = "{} D".format(np.busday_count(start_date, end_date))
else:
duration = "{} S".format(duration.seconds)
# Get the bar data for each symbol
bars = {}
for symbol in symbols:
try:
bars[symbol] = self._req_historical_data(
symbol,
end_date=end_date.strftime("%Y%m%d %H:%M:%S"),
duration=duration,
size=bar_size,
info="TRADES",
rth=rth
)
except HistoricalRequestError as err:
print(err.message)
# Format the bars dictionary for conversion into DataFrame
bars = {(outerKey, innerKey): values for outerKey, innerDict
in bars.items() for innerKey, values in innerDict.items()}
bars = pd.DataFrame(bars)
# Reindex the bars using real time stamps.
if (bar_size.find("secs") != -1 or bar_size.find("min") != -1 or
bar_size.find("hour") != -1):
index = [datetime.strptime(d, "%Y-%m-%d %H:%M:%S")
for d in bars.index]
else:
index = [datetime.strptime(d, "%Y-%m-%d") for d in bars.index]
bars.index = index
# Try to get rid of any missing data.
bars.fillna(method="ffill", inplace=True)
return bars
def _req_historical_data(self, symbol, end_date="", duration="20 D",
size="1 day", info="TRADES", rth=False):
"""Get historical data using reqHistoricalData. Upon completion the
server will callback historicalData, which is overridden.
http://interactivebrokers.github.io/tws-api/historical_bars.html#hd_duration
Arguments:
symbol (str): Ticker symbol
end_date (datetime): Last date requested
duration (str): How far to go back - valid options: (S, D, W, M, Y)
size (str): Bar size (see link)
info (str): Type of data to return (see link)
rth (bool): Return data only in regular trading hours
"""
contract = self.make_contract(symbol)
self._historical_data = []
self._historical_data_req_end = False
self.reqHistoricalData(2001, contract, end_date, duration, size,
info, rth, 1, False, [])
# Wait until the request has returned (make it blocking).
start_time = datetime.now()
while self._historical_data_req_end is not True:
if (datetime.now() - start_time).microseconds > 1000000:
raise HistoricalRequestError(
"Timeout occurred while retrieving price data for {}"
.format(symbol),
"_req_historical_data({})".format(symbol)
)
time.sleep(0.2)
# Convert the data into
bars_index = [b.date[:4]+"-"+b.date[4:6]+"-"+b.date[6:]
for b in self._historical_data]
bars_data = [[float(b.open), float(b.high), float(b.low),
float(b.close), float(b.volume)]
for b in self._historical_data]
bars = pd.DataFrame(
index=bars_index,
columns=['open_price', 'high', 'low', 'close_price', 'volume'],
data=bars_data
)
return bars
def historicalData(self, reqId: int, bar: BarData):
"""Overridden method from EWrapper. Checks to make sure reqId matches
the self.historical_data[req_id] to confirm correct symbol.
"""
self._historical_data.append(bar)
def historicalDataEnd(self, reqId: int, start: str, end: str):
"""Overrides the EWrapper method.
"""
self._historical_data_req_end = True
def get_histogram(self, symbol=None, period="20 days"):
"""Get histograms of the symbols.
Arguments:
symbol (str): Equity ticker symbol or list of ticker symbols.
period (str): Number of days to collect data.
Returns (?): Histograms of the symbols
"""
# If only a single symbol is passed convert it
# to a list with a single item.
contract = self.make_contract(symbol)
self._histogram = None
self.reqHistogramData(3001, contract, False, period)
while self._histogram is None:
time.sleep(0.2)
histogram = pd.DataFrame(
columns=["price", "count"],
data=[[float(p.price), int(p.count)] for p in self._histogram]
)
return histogram
def histogramData(self, reqId: int, items: HistogramDataList):
"""EWrapper method called from reqHistogramData.
http://interactivebrokers.github.io/tws-api/histograms.html
"""
self._histogram = items
def keyboardInterrupt(self):
"""Stop exectution.
"""
pass
def quick_bracket(self, symbol=None, instruction=None, quantity=None,
amount=1000, limit_percent=None, profit_percent=None):
"""Calculate bracket order for symbol using a limit provided by
limit_percent.
Arguments
symbol (str): Ticker symbol
instruction (str): "BUY" | "SELL"
quantity (int): Number of shares
amount (float): Amount in dollars to trade
limit_percent (float): Percent change from current quote to set limit.
profit_percent (float): Percent change from limit price to take profit.
Returns (dict) Parameters necessary to place a bracket order.
"""
# Calculate a reasonable change if limit_percent is not given.
if limit_percent is None:
if instruction == "BUY":
limit_percent = -0.3
if instruction == "SELL":
limit_percent = 0.3
# Calculate a reasonable change if limit_percent is not given.
if profit_percent is None:
if instruction == "BUY":
profit_percent = 0.3
if instruction == "SELL":
profit_percent = -0.3
# Get the quote
quote = self.get_quotes(symbol).loc[symbol]
# Calculate the limit price from the limit_percent.
limit_price = round(quote * (1 + limit_percent/100.), 2)
# Calculate the profit price from the limit_price.
profit_price = round(limit_price * (1 + profit_percent/100.), 2)
# Calculate quantity if amount was provided.
if quantity is None:
quantity = int(amount / quote)
req_order = {
'symbol': symbol,
'instruction': instruction,
'quantity': quantity,
'price': limit_price,
'tif': "DAY",
'outside_rth': True,
'profit_price': profit_price,
'stop_price': None
}
self.create_bracket_orders(req_orders=[req_order])
for order_id in list(self.get_saved_orders(symbol).keys()):
self.place_order(order_id=order_id)
def main(port=7497):
"""Entry point into the program.
Arguments:
port (int): Port number that IBGateway, or TWS is listening.
"""
global API_THREAD
try:
app = IBApp()
app.connect("127.0.0.1", port, clientId=0)
print("serverVersion:%s connectionTime:%s" % (app.serverVersion(),
app.twsConnectionTime()))
API_THREAD = threading.Thread(target=app.run)
API_THREAD.start()
return app
except KeyboardInterrupt:
pass
if __name__ == "__main__":
import sys
# port number socker server is using (paper: 7497, live: 7496)
PORT_NUMBER = sys.argv[1]
main(port=PORT_NUMBER)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.